": { // AudioSelector
- * // AudioDurationCorrection: "DISABLED" || "AUTO" || "TRACK" || "FRAME",
+ * // AudioDurationCorrection: "DISABLED" || "AUTO" || "TRACK" || "FRAME" || "FORCE",
* // CustomLanguageCode: "STRING_VALUE",
* // DefaultSelection: "DEFAULT" || "NOT_DEFAULT",
* // ExternalAudioFileInput: "STRING_VALUE",
@@ -203,6 +205,7 @@ export interface ListJobsCommandOutput extends ListJobsResponse, __MetadataBeare
* // TerminateCaptions: "END_OF_INPUT" || "DISABLED",
* // },
* // FileSourceSettings: { // FileSourceSettings
+ * // ByteRateLimit: "ENABLED" || "DISABLED",
* // Convert608To708: "UPCONVERT" || "DISABLED",
* // ConvertPaintToPop: "ENABLED" || "DISABLED",
* // Framerate: { // CaptionSourceFramerate
@@ -294,6 +297,13 @@ export interface ListJobsCommandOutput extends ListJobsResponse, __MetadataBeare
* // VideoOverlays: [ // __listOfVideoOverlay
* // { // VideoOverlay
* // EndTimecode: "STRING_VALUE",
+ * // InitialPosition: { // VideoOverlayPosition
+ * // Height: Number("int"),
+ * // Unit: "PIXELS" || "PERCENTAGE",
+ * // Width: Number("int"),
+ * // XPosition: Number("int"),
+ * // YPosition: Number("int"),
+ * // },
* // Input: { // VideoOverlayInput
* // FileInput: "STRING_VALUE",
* // InputClippings: [ // __listOfVideoOverlayInputClipping
@@ -305,7 +315,21 @@ export interface ListJobsCommandOutput extends ListJobsResponse, __MetadataBeare
* // TimecodeSource: "EMBEDDED" || "ZEROBASED" || "SPECIFIEDSTART",
* // TimecodeStart: "STRING_VALUE",
* // },
+ * // Playback: "ONCE" || "REPEAT",
* // StartTimecode: "STRING_VALUE",
+ * // Transitions: [ // __listOfVideoOverlayTransition
+ * // { // VideoOverlayTransition
+ * // EndPosition: {
+ * // Height: Number("int"),
+ * // Unit: "PIXELS" || "PERCENTAGE",
+ * // Width: Number("int"),
+ * // XPosition: Number("int"),
+ * // YPosition: Number("int"),
+ * // },
+ * // EndTimecode: "STRING_VALUE",
+ * // StartTimecode: "STRING_VALUE",
+ * // },
+ * // ],
* // },
* // ],
* // VideoSelector: { // VideoSelector
@@ -970,7 +994,7 @@ export interface ListJobsCommandOutput extends ListJobsResponse, __MetadataBeare
* // TimedMetadataSchemeIdUri: "STRING_VALUE",
* // TimedMetadataValue: "STRING_VALUE",
* // },
- * // Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "WEBM" || "RAW" || "Y4M",
+ * // Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "OGG" || "WEBM" || "RAW" || "Y4M",
* // F4vSettings: { // F4vSettings
* // MoovPlacement: "PROGRESSIVE_DOWNLOAD" || "NORMAL",
* // },
@@ -1196,6 +1220,7 @@ export interface ListJobsCommandOutput extends ListJobsResponse, __MetadataBeare
* // },
* // RateControlMode: "VBR" || "CBR" || "QVBR",
* // RepeatPps: "DISABLED" || "ENABLED",
+ * // SaliencyAwareEncoding: "DISABLED" || "PREFERRED",
* // ScanTypeConversionMode: "INTERLACED" || "INTERLACED_OPTIMIZE",
* // SceneChangeDetect: "DISABLED" || "ENABLED" || "TRANSITION_DETECTION",
* // Slices: Number("int"),
diff --git a/clients/client-mediaconvert/src/commands/ListPresetsCommand.ts b/clients/client-mediaconvert/src/commands/ListPresetsCommand.ts
index 2d2c2d8d60f63..5e6c5ff558d77 100644
--- a/clients/client-mediaconvert/src/commands/ListPresetsCommand.ts
+++ b/clients/client-mediaconvert/src/commands/ListPresetsCommand.ts
@@ -319,7 +319,7 @@ export interface ListPresetsCommandOutput extends ListPresetsResponse, __Metadat
* // TimedMetadataSchemeIdUri: "STRING_VALUE",
* // TimedMetadataValue: "STRING_VALUE",
* // },
- * // Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "WEBM" || "RAW" || "Y4M",
+ * // Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "OGG" || "WEBM" || "RAW" || "Y4M",
* // F4vSettings: { // F4vSettings
* // MoovPlacement: "PROGRESSIVE_DOWNLOAD" || "NORMAL",
* // },
@@ -531,6 +531,7 @@ export interface ListPresetsCommandOutput extends ListPresetsResponse, __Metadat
* // },
* // RateControlMode: "VBR" || "CBR" || "QVBR",
* // RepeatPps: "DISABLED" || "ENABLED",
+ * // SaliencyAwareEncoding: "DISABLED" || "PREFERRED",
* // ScanTypeConversionMode: "INTERLACED" || "INTERLACED_OPTIMIZE",
* // SceneChangeDetect: "DISABLED" || "ENABLED" || "TRANSITION_DETECTION",
* // Slices: Number("int"),
diff --git a/clients/client-mediaconvert/src/commands/ListVersionsCommand.ts b/clients/client-mediaconvert/src/commands/ListVersionsCommand.ts
new file mode 100644
index 0000000000000..985a64dba9c24
--- /dev/null
+++ b/clients/client-mediaconvert/src/commands/ListVersionsCommand.ts
@@ -0,0 +1,105 @@
+// smithy-typescript generated code
+import { getEndpointPlugin } from "@smithy/middleware-endpoint";
+import { getSerdePlugin } from "@smithy/middleware-serde";
+import { Command as $Command } from "@smithy/smithy-client";
+import { MetadataBearer as __MetadataBearer } from "@smithy/types";
+
+import { commonParams } from "../endpoint/EndpointParameters";
+import { MediaConvertClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../MediaConvertClient";
+import { ListVersionsRequest, ListVersionsResponse } from "../models/models_2";
+import { de_ListVersionsCommand, se_ListVersionsCommand } from "../protocols/Aws_restJson1";
+
+/**
+ * @public
+ */
+export type { __MetadataBearer };
+export { $Command };
+/**
+ * @public
+ *
+ * The input for {@link ListVersionsCommand}.
+ */
+export interface ListVersionsCommandInput extends ListVersionsRequest {}
+/**
+ * @public
+ *
+ * The output of {@link ListVersionsCommand}.
+ */
+export interface ListVersionsCommandOutput extends ListVersionsResponse, __MetadataBearer {}
+
+/**
+ * Retrieve a JSON array of all available Job engine versions and the date they expire.
+ * @example
+ * Use a bare-bones client and the command you need to make an API call.
+ * ```javascript
+ * import { MediaConvertClient, ListVersionsCommand } from "@aws-sdk/client-mediaconvert"; // ES Modules import
+ * // const { MediaConvertClient, ListVersionsCommand } = require("@aws-sdk/client-mediaconvert"); // CommonJS import
+ * const client = new MediaConvertClient(config);
+ * const input = { // ListVersionsRequest
+ * MaxResults: Number("int"),
+ * NextToken: "STRING_VALUE",
+ * };
+ * const command = new ListVersionsCommand(input);
+ * const response = await client.send(command);
+ * // { // ListVersionsResponse
+ * // NextToken: "STRING_VALUE",
+ * // Versions: [ // __listOfJobEngineVersion
+ * // { // JobEngineVersion
+ * // ExpirationDate: new Date("TIMESTAMP"),
+ * // Version: "STRING_VALUE",
+ * // },
+ * // ],
+ * // };
+ *
+ * ```
+ *
+ * @param ListVersionsCommandInput - {@link ListVersionsCommandInput}
+ * @returns {@link ListVersionsCommandOutput}
+ * @see {@link ListVersionsCommandInput} for command's `input` shape.
+ * @see {@link ListVersionsCommandOutput} for command's `response` shape.
+ * @see {@link MediaConvertClientResolvedConfig | config} for MediaConvertClient's `config` shape.
+ *
+ * @throws {@link BadRequestException} (client fault)
+ * The service can't process your request because of a problem in the request. Please check your request form and syntax.
+ *
+ * @throws {@link ConflictException} (client fault)
+ * The service couldn't complete your request because there is a conflict with the current state of the resource.
+ *
+ * @throws {@link ForbiddenException} (client fault)
+ * You don't have permissions for this action with the credentials you sent.
+ *
+ * @throws {@link InternalServerErrorException} (server fault)
+ * The service encountered an unexpected condition and can't fulfill your request.
+ *
+ * @throws {@link NotFoundException} (client fault)
+ * The resource you requested doesn't exist.
+ *
+ * @throws {@link TooManyRequestsException} (client fault)
+ * Too many requests have been sent in too short of a time. The service limits the rate at which it will accept requests.
+ *
+ * @throws {@link MediaConvertServiceException}
+ * Base exception class for all service exceptions from MediaConvert service.
+ *
+ * @public
+ */
+export class ListVersionsCommand extends $Command
+ .classBuilder<
+ ListVersionsCommandInput,
+ ListVersionsCommandOutput,
+ MediaConvertClientResolvedConfig,
+ ServiceInputTypes,
+ ServiceOutputTypes
+ >()
+ .ep(commonParams)
+ .m(function (this: any, Command: any, cs: any, config: MediaConvertClientResolvedConfig, o: any) {
+ return [
+ getSerdePlugin(config, this.serialize, this.deserialize),
+ getEndpointPlugin(config, Command.getEndpointParameterInstructions()),
+ ];
+ })
+ .s("MediaConvert", "ListVersions", {})
+ .n("MediaConvertClient", "ListVersionsCommand")
+ .f(void 0, void 0)
+ .ser(se_ListVersionsCommand)
+ .de(de_ListVersionsCommand)
+ .build() {}
diff --git a/clients/client-mediaconvert/src/commands/SearchJobsCommand.ts b/clients/client-mediaconvert/src/commands/SearchJobsCommand.ts
index 23d5869433726..227a05b64b311 100644
--- a/clients/client-mediaconvert/src/commands/SearchJobsCommand.ts
+++ b/clients/client-mediaconvert/src/commands/SearchJobsCommand.ts
@@ -67,6 +67,8 @@ export interface SearchJobsCommandOutput extends SearchJobsResponse, __MetadataB
* // },
* // ],
* // Id: "STRING_VALUE",
+ * // JobEngineVersionRequested: "STRING_VALUE",
+ * // JobEngineVersionUsed: "STRING_VALUE",
* // JobPercentComplete: Number("int"),
* // JobTemplate: "STRING_VALUE",
* // Messages: { // JobMessages
@@ -145,7 +147,7 @@ export interface SearchJobsCommandOutput extends SearchJobsResponse, __MetadataB
* // },
* // AudioSelectors: { // __mapOfAudioSelector
* // "": { // AudioSelector
- * // AudioDurationCorrection: "DISABLED" || "AUTO" || "TRACK" || "FRAME",
+ * // AudioDurationCorrection: "DISABLED" || "AUTO" || "TRACK" || "FRAME" || "FORCE",
* // CustomLanguageCode: "STRING_VALUE",
* // DefaultSelection: "DEFAULT" || "NOT_DEFAULT",
* // ExternalAudioFileInput: "STRING_VALUE",
@@ -204,6 +206,7 @@ export interface SearchJobsCommandOutput extends SearchJobsResponse, __MetadataB
* // TerminateCaptions: "END_OF_INPUT" || "DISABLED",
* // },
* // FileSourceSettings: { // FileSourceSettings
+ * // ByteRateLimit: "ENABLED" || "DISABLED",
* // Convert608To708: "UPCONVERT" || "DISABLED",
* // ConvertPaintToPop: "ENABLED" || "DISABLED",
* // Framerate: { // CaptionSourceFramerate
@@ -295,6 +298,13 @@ export interface SearchJobsCommandOutput extends SearchJobsResponse, __MetadataB
* // VideoOverlays: [ // __listOfVideoOverlay
* // { // VideoOverlay
* // EndTimecode: "STRING_VALUE",
+ * // InitialPosition: { // VideoOverlayPosition
+ * // Height: Number("int"),
+ * // Unit: "PIXELS" || "PERCENTAGE",
+ * // Width: Number("int"),
+ * // XPosition: Number("int"),
+ * // YPosition: Number("int"),
+ * // },
* // Input: { // VideoOverlayInput
* // FileInput: "STRING_VALUE",
* // InputClippings: [ // __listOfVideoOverlayInputClipping
@@ -306,7 +316,21 @@ export interface SearchJobsCommandOutput extends SearchJobsResponse, __MetadataB
* // TimecodeSource: "EMBEDDED" || "ZEROBASED" || "SPECIFIEDSTART",
* // TimecodeStart: "STRING_VALUE",
* // },
+ * // Playback: "ONCE" || "REPEAT",
* // StartTimecode: "STRING_VALUE",
+ * // Transitions: [ // __listOfVideoOverlayTransition
+ * // { // VideoOverlayTransition
+ * // EndPosition: {
+ * // Height: Number("int"),
+ * // Unit: "PIXELS" || "PERCENTAGE",
+ * // Width: Number("int"),
+ * // XPosition: Number("int"),
+ * // YPosition: Number("int"),
+ * // },
+ * // EndTimecode: "STRING_VALUE",
+ * // StartTimecode: "STRING_VALUE",
+ * // },
+ * // ],
* // },
* // ],
* // VideoSelector: { // VideoSelector
@@ -971,7 +995,7 @@ export interface SearchJobsCommandOutput extends SearchJobsResponse, __MetadataB
* // TimedMetadataSchemeIdUri: "STRING_VALUE",
* // TimedMetadataValue: "STRING_VALUE",
* // },
- * // Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "WEBM" || "RAW" || "Y4M",
+ * // Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "OGG" || "WEBM" || "RAW" || "Y4M",
* // F4vSettings: { // F4vSettings
* // MoovPlacement: "PROGRESSIVE_DOWNLOAD" || "NORMAL",
* // },
@@ -1197,6 +1221,7 @@ export interface SearchJobsCommandOutput extends SearchJobsResponse, __MetadataB
* // },
* // RateControlMode: "VBR" || "CBR" || "QVBR",
* // RepeatPps: "DISABLED" || "ENABLED",
+ * // SaliencyAwareEncoding: "DISABLED" || "PREFERRED",
* // ScanTypeConversionMode: "INTERLACED" || "INTERLACED_OPTIMIZE",
* // SceneChangeDetect: "DISABLED" || "ENABLED" || "TRANSITION_DETECTION",
* // Slices: Number("int"),
diff --git a/clients/client-mediaconvert/src/commands/UpdateJobTemplateCommand.ts b/clients/client-mediaconvert/src/commands/UpdateJobTemplateCommand.ts
index 7d86fbe5d3bb1..be8ac11a481a6 100644
--- a/clients/client-mediaconvert/src/commands/UpdateJobTemplateCommand.ts
+++ b/clients/client-mediaconvert/src/commands/UpdateJobTemplateCommand.ts
@@ -95,7 +95,7 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* },
* AudioSelectors: { // __mapOfAudioSelector
* "": { // AudioSelector
- * AudioDurationCorrection: "DISABLED" || "AUTO" || "TRACK" || "FRAME",
+ * AudioDurationCorrection: "DISABLED" || "AUTO" || "TRACK" || "FRAME" || "FORCE",
* CustomLanguageCode: "STRING_VALUE",
* DefaultSelection: "DEFAULT" || "NOT_DEFAULT",
* ExternalAudioFileInput: "STRING_VALUE",
@@ -154,6 +154,7 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* TerminateCaptions: "END_OF_INPUT" || "DISABLED",
* },
* FileSourceSettings: { // FileSourceSettings
+ * ByteRateLimit: "ENABLED" || "DISABLED",
* Convert608To708: "UPCONVERT" || "DISABLED",
* ConvertPaintToPop: "ENABLED" || "DISABLED",
* Framerate: { // CaptionSourceFramerate
@@ -228,6 +229,13 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* VideoOverlays: [ // __listOfVideoOverlay
* { // VideoOverlay
* EndTimecode: "STRING_VALUE",
+ * InitialPosition: { // VideoOverlayPosition
+ * Height: Number("int"),
+ * Unit: "PIXELS" || "PERCENTAGE",
+ * Width: Number("int"),
+ * XPosition: Number("int"),
+ * YPosition: Number("int"),
+ * },
* Input: { // VideoOverlayInput
* FileInput: "STRING_VALUE",
* InputClippings: [ // __listOfVideoOverlayInputClipping
@@ -239,7 +247,21 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* TimecodeSource: "EMBEDDED" || "ZEROBASED" || "SPECIFIEDSTART",
* TimecodeStart: "STRING_VALUE",
* },
+ * Playback: "ONCE" || "REPEAT",
* StartTimecode: "STRING_VALUE",
+ * Transitions: [ // __listOfVideoOverlayTransition
+ * { // VideoOverlayTransition
+ * EndPosition: {
+ * Height: Number("int"),
+ * Unit: "PIXELS" || "PERCENTAGE",
+ * Width: Number("int"),
+ * XPosition: Number("int"),
+ * YPosition: Number("int"),
+ * },
+ * EndTimecode: "STRING_VALUE",
+ * StartTimecode: "STRING_VALUE",
+ * },
+ * ],
* },
* ],
* VideoSelector: { // VideoSelector
@@ -904,7 +926,7 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* TimedMetadataSchemeIdUri: "STRING_VALUE",
* TimedMetadataValue: "STRING_VALUE",
* },
- * Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "WEBM" || "RAW" || "Y4M",
+ * Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "OGG" || "WEBM" || "RAW" || "Y4M",
* F4vSettings: { // F4vSettings
* MoovPlacement: "PROGRESSIVE_DOWNLOAD" || "NORMAL",
* },
@@ -1130,6 +1152,7 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* },
* RateControlMode: "VBR" || "CBR" || "QVBR",
* RepeatPps: "DISABLED" || "ENABLED",
+ * SaliencyAwareEncoding: "DISABLED" || "PREFERRED",
* ScanTypeConversionMode: "INTERLACED" || "INTERLACED_OPTIMIZE",
* SceneChangeDetect: "DISABLED" || "ENABLED" || "TRANSITION_DETECTION",
* Slices: Number("int"),
@@ -1548,7 +1571,7 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* // },
* // AudioSelectors: { // __mapOfAudioSelector
* // "": { // AudioSelector
- * // AudioDurationCorrection: "DISABLED" || "AUTO" || "TRACK" || "FRAME",
+ * // AudioDurationCorrection: "DISABLED" || "AUTO" || "TRACK" || "FRAME" || "FORCE",
* // CustomLanguageCode: "STRING_VALUE",
* // DefaultSelection: "DEFAULT" || "NOT_DEFAULT",
* // ExternalAudioFileInput: "STRING_VALUE",
@@ -1607,6 +1630,7 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* // TerminateCaptions: "END_OF_INPUT" || "DISABLED",
* // },
* // FileSourceSettings: { // FileSourceSettings
+ * // ByteRateLimit: "ENABLED" || "DISABLED",
* // Convert608To708: "UPCONVERT" || "DISABLED",
* // ConvertPaintToPop: "ENABLED" || "DISABLED",
* // Framerate: { // CaptionSourceFramerate
@@ -1681,6 +1705,13 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* // VideoOverlays: [ // __listOfVideoOverlay
* // { // VideoOverlay
* // EndTimecode: "STRING_VALUE",
+ * // InitialPosition: { // VideoOverlayPosition
+ * // Height: Number("int"),
+ * // Unit: "PIXELS" || "PERCENTAGE",
+ * // Width: Number("int"),
+ * // XPosition: Number("int"),
+ * // YPosition: Number("int"),
+ * // },
* // Input: { // VideoOverlayInput
* // FileInput: "STRING_VALUE",
* // InputClippings: [ // __listOfVideoOverlayInputClipping
@@ -1692,7 +1723,21 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* // TimecodeSource: "EMBEDDED" || "ZEROBASED" || "SPECIFIEDSTART",
* // TimecodeStart: "STRING_VALUE",
* // },
+ * // Playback: "ONCE" || "REPEAT",
* // StartTimecode: "STRING_VALUE",
+ * // Transitions: [ // __listOfVideoOverlayTransition
+ * // { // VideoOverlayTransition
+ * // EndPosition: {
+ * // Height: Number("int"),
+ * // Unit: "PIXELS" || "PERCENTAGE",
+ * // Width: Number("int"),
+ * // XPosition: Number("int"),
+ * // YPosition: Number("int"),
+ * // },
+ * // EndTimecode: "STRING_VALUE",
+ * // StartTimecode: "STRING_VALUE",
+ * // },
+ * // ],
* // },
* // ],
* // VideoSelector: { // VideoSelector
@@ -2357,7 +2402,7 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* // TimedMetadataSchemeIdUri: "STRING_VALUE",
* // TimedMetadataValue: "STRING_VALUE",
* // },
- * // Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "WEBM" || "RAW" || "Y4M",
+ * // Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "OGG" || "WEBM" || "RAW" || "Y4M",
* // F4vSettings: { // F4vSettings
* // MoovPlacement: "PROGRESSIVE_DOWNLOAD" || "NORMAL",
* // },
@@ -2583,6 +2628,7 @@ export interface UpdateJobTemplateCommandOutput extends UpdateJobTemplateRespons
* // },
* // RateControlMode: "VBR" || "CBR" || "QVBR",
* // RepeatPps: "DISABLED" || "ENABLED",
+ * // SaliencyAwareEncoding: "DISABLED" || "PREFERRED",
* // ScanTypeConversionMode: "INTERLACED" || "INTERLACED_OPTIMIZE",
* // SceneChangeDetect: "DISABLED" || "ENABLED" || "TRANSITION_DETECTION",
* // Slices: Number("int"),
diff --git a/clients/client-mediaconvert/src/commands/UpdatePresetCommand.ts b/clients/client-mediaconvert/src/commands/UpdatePresetCommand.ts
index bad023efb79f2..d8b9aa0c69aef 100644
--- a/clients/client-mediaconvert/src/commands/UpdatePresetCommand.ts
+++ b/clients/client-mediaconvert/src/commands/UpdatePresetCommand.ts
@@ -304,7 +304,7 @@ export interface UpdatePresetCommandOutput extends UpdatePresetResponse, __Metad
* TimedMetadataSchemeIdUri: "STRING_VALUE",
* TimedMetadataValue: "STRING_VALUE",
* },
- * Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "WEBM" || "RAW" || "Y4M",
+ * Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "OGG" || "WEBM" || "RAW" || "Y4M",
* F4vSettings: { // F4vSettings
* MoovPlacement: "PROGRESSIVE_DOWNLOAD" || "NORMAL",
* },
@@ -516,6 +516,7 @@ export interface UpdatePresetCommandOutput extends UpdatePresetResponse, __Metad
* },
* RateControlMode: "VBR" || "CBR" || "QVBR",
* RepeatPps: "DISABLED" || "ENABLED",
+ * SaliencyAwareEncoding: "DISABLED" || "PREFERRED",
* ScanTypeConversionMode: "INTERLACED" || "INTERLACED_OPTIMIZE",
* SceneChangeDetect: "DISABLED" || "ENABLED" || "TRANSITION_DETECTION",
* Slices: Number("int"),
@@ -1124,7 +1125,7 @@ export interface UpdatePresetCommandOutput extends UpdatePresetResponse, __Metad
* // TimedMetadataSchemeIdUri: "STRING_VALUE",
* // TimedMetadataValue: "STRING_VALUE",
* // },
- * // Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "WEBM" || "RAW" || "Y4M",
+ * // Container: "F4V" || "ISMV" || "M2TS" || "M3U8" || "CMFC" || "MOV" || "MP4" || "MPD" || "MXF" || "OGG" || "WEBM" || "RAW" || "Y4M",
* // F4vSettings: { // F4vSettings
* // MoovPlacement: "PROGRESSIVE_DOWNLOAD" || "NORMAL",
* // },
@@ -1336,6 +1337,7 @@ export interface UpdatePresetCommandOutput extends UpdatePresetResponse, __Metad
* // },
* // RateControlMode: "VBR" || "CBR" || "QVBR",
* // RepeatPps: "DISABLED" || "ENABLED",
+ * // SaliencyAwareEncoding: "DISABLED" || "PREFERRED",
* // ScanTypeConversionMode: "INTERLACED" || "INTERLACED_OPTIMIZE",
* // SceneChangeDetect: "DISABLED" || "ENABLED" || "TRANSITION_DETECTION",
* // Slices: Number("int"),
diff --git a/clients/client-mediaconvert/src/commands/index.ts b/clients/client-mediaconvert/src/commands/index.ts
index 81584bec707d6..c9591da2ad0dc 100644
--- a/clients/client-mediaconvert/src/commands/index.ts
+++ b/clients/client-mediaconvert/src/commands/index.ts
@@ -21,6 +21,7 @@ export * from "./ListJobsCommand";
export * from "./ListPresetsCommand";
export * from "./ListQueuesCommand";
export * from "./ListTagsForResourceCommand";
+export * from "./ListVersionsCommand";
export * from "./PutPolicyCommand";
export * from "./SearchJobsCommand";
export * from "./TagResourceCommand";
diff --git a/clients/client-mediaconvert/src/models/models_0.ts b/clients/client-mediaconvert/src/models/models_0.ts
index 03935cd2b1932..a22ee1779aab2 100644
--- a/clients/client-mediaconvert/src/models/models_0.ts
+++ b/clients/client-mediaconvert/src/models/models_0.ts
@@ -342,7 +342,7 @@ export interface AacSettings {
Bitrate?: number;
/**
- * AAC Profile.
+ * Specify the AAC profile. For the widest player compatibility and where higher bitrates are acceptable: Keep the default profile, LC (AAC-LC) For improved audio performance at lower bitrates: Choose HEV1 or HEV2. HEV1 (AAC-HE v1) adds spectral band replication to improve speech audio at low bitrates. HEV2 (AAC-HE v2) adds parametric stereo, which optimizes for encoding stereo audio at very low bitrates.
* @public
*/
CodecProfile?: AacCodecProfile;
@@ -354,7 +354,7 @@ export interface AacSettings {
CodingMode?: AacCodingMode;
/**
- * Rate Control Mode.
+ * Specify the AAC rate control mode. For a constant bitrate: Choose CBR. Your AAC output bitrate will be equal to the value that you choose for Bitrate. For a variable bitrate: Choose VBR. Your AAC output bitrate will vary according to your audio content and the value that you choose for Bitrate quality.
* @public
*/
RateControlMode?: AacRateControlMode;
@@ -366,7 +366,7 @@ export interface AacSettings {
RawFormat?: AacRawFormat;
/**
- * Specify the Sample rate in Hz. Valid sample rates depend on the Profile and Coding mode that you select. The following list shows valid sample rates for each Profile and Coding mode. * LC Profile, Coding mode 1.0, 2.0, and Receiver Mix: 8000, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 88200, 96000. * LC Profile, Coding mode 5.1: 32000, 44100, 48000, 96000. * HEV1 Profile, Coding mode 1.0 and Receiver Mix: 22050, 24000, 32000, 44100, 48000. * HEV1 Profile, Coding mode 2.0 and 5.1: 32000, 44100, 48000, 96000. * HEV2 Profile, Coding mode 2.0: 22050, 24000, 32000, 44100, 48000.
+ * Specify the AAC sample rate in samples per second (Hz). Valid sample rates depend on the AAC profile and Coding mode that you select. For a list of supported sample rates, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/aac-support.html
* @public
*/
SampleRate?: number;
@@ -378,7 +378,7 @@ export interface AacSettings {
Specification?: AacSpecification;
/**
- * VBR Quality Level - Only used if rate_control_mode is VBR.
+ * Specify the quality of your variable bitrate (VBR) AAC audio. For a list of approximate VBR bitrates, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/aac-support.html#aac_vbr
* @public
*/
VbrQuality?: AacVbrQuality;
@@ -3325,6 +3325,7 @@ export interface AudioSelectorGroup {
export const AudioDurationCorrection = {
AUTO: "AUTO",
DISABLED: "DISABLED",
+ FORCE: "FORCE",
FRAME: "FRAME",
TRACK: "TRACK",
} as const;
@@ -3394,7 +3395,7 @@ export type AudioSelectorType = (typeof AudioSelectorType)[keyof typeof AudioSel
*/
export interface AudioSelector {
/**
- * Apply audio timing corrections to help synchronize audio and video in your output. To apply timing corrections, your input must meet the following requirements: * Container: MP4, or MOV, with an accurate time-to-sample (STTS) table. * Audio track: AAC. Choose from the following audio timing correction settings: * Disabled (Default): Apply no correction. * Auto: Recommended for most inputs. MediaConvert analyzes the audio timing in your input and determines which correction setting to use, if needed. * Track: Adjust the duration of each audio frame by a constant amount to align the audio track length with STTS duration. Track-level correction does not affect pitch, and is recommended for tonal audio content such as music. * Frame: Adjust the duration of each audio frame by a variable amount to align audio frames with STTS timestamps. No corrections are made to already-aligned frames. Frame-level correction may affect the pitch of corrected frames, and is recommended for atonal audio content such as speech or percussion.
+ * Apply audio timing corrections to help synchronize audio and video in your output. To apply timing corrections, your input must meet the following requirements: * Container: MP4, or MOV, with an accurate time-to-sample (STTS) table. * Audio track: AAC. Choose from the following audio timing correction settings: * Disabled (Default): Apply no correction. * Auto: Recommended for most inputs. MediaConvert analyzes the audio timing in your input and determines which correction setting to use, if needed. * Track: Adjust the duration of each audio frame by a constant amount to align the audio track length with STTS duration. Track-level correction does not affect pitch, and is recommended for tonal audio content such as music. * Frame: Adjust the duration of each audio frame by a variable amount to align audio frames with STTS timestamps. No corrections are made to already-aligned frames. Frame-level correction may affect the pitch of corrected frames, and is recommended for atonal audio content such as speech or percussion. * Force: Apply audio duration correction, either Track or Frame depending on your input, regardless of the accuracy of your input's STTS table. Your output audio and video may not be aligned or it may contain audio artifacts.
* @public
*/
AudioDurationCorrection?: AudioDurationCorrection;
@@ -3588,6 +3589,20 @@ export interface EmbeddedSourceSettings {
TerminateCaptions?: EmbeddedTerminateCaptions;
}
+/**
+ * @public
+ * @enum
+ */
+export const CaptionSourceByteRateLimit = {
+ DISABLED: "DISABLED",
+ ENABLED: "ENABLED",
+} as const;
+
+/**
+ * @public
+ */
+export type CaptionSourceByteRateLimit = (typeof CaptionSourceByteRateLimit)[keyof typeof CaptionSourceByteRateLimit];
+
/**
* @public
* @enum
@@ -3654,6 +3669,12 @@ export type FileSourceTimeDeltaUnits = (typeof FileSourceTimeDeltaUnits)[keyof t
* @public
*/
export interface FileSourceSettings {
+ /**
+ * Choose whether to limit the byte rate at which your SCC input captions are inserted into your output. To not limit the caption rate: We recommend that you keep the default value, Disabled. MediaConvert inserts captions in your output according to the byte rates listed in the EIA-608 specification, typically 2 or 3 caption bytes per frame depending on your output frame rate. To limit your output caption rate: Choose Enabled. Choose this option if your downstream systems require a maximum of 2 caption bytes per frame. Note that this setting has no effect when your output frame rate is 30 or 60.
+ * @public
+ */
+ ByteRateLimit?: CaptionSourceByteRateLimit;
+
/**
* Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.
* @public
@@ -4150,6 +4171,56 @@ export interface InputVideoGenerator {
SampleRate?: number;
}
+/**
+ * @public
+ * @enum
+ */
+export const VideoOverlayUnit = {
+ PERCENTAGE: "PERCENTAGE",
+ PIXELS: "PIXELS",
+} as const;
+
+/**
+ * @public
+ */
+export type VideoOverlayUnit = (typeof VideoOverlayUnit)[keyof typeof VideoOverlayUnit];
+
+/**
+ * position of video overlay
+ * @public
+ */
+export interface VideoOverlayPosition {
+ /**
+ * To scale your video overlay to the same height as the base input video: Leave blank. To scale the height of your video overlay to a different height: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 360 and choose Pixels, your video overlay will be rendered with a height of 360. When you enter 50, choose Percentage, and your overlay's source has a height of 1080, your video overlay will be rendered with a height of 540. To scale your overlay to a specific height while automatically maintaining its original aspect ratio, enter a value for Height and leave Width blank.
+ * @public
+ */
+ Height?: number;
+
+ /**
+ * Specify the Unit type to use when you enter a value for X position, Y position, Width, or Height. You can choose Pixels or Percentage. Leave blank to use the default value, Pixels.
+ * @public
+ */
+ Unit?: VideoOverlayUnit;
+
+ /**
+ * To scale your video overlay to the same width as the base input video: Leave blank. To scale the width of your video overlay to a different width: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 640 and choose Pixels, your video overlay will scale to a height of 640 pixels. When you enter 50, choose Percentage, and your overlay's source has a width of 1920, your video overlay will scale to a width of 960. To scale your overlay to a specific width while automatically maintaining its original aspect ratio, enter a value for Width and leave Height blank.
+ * @public
+ */
+ Width?: number;
+
+ /**
+ * To position the left edge of your video overlay along the left edge of the base input video's frame: Keep blank, or enter 0. To position the left edge of your video overlay to the right, relative to the left edge of the base input video's frame: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 10 and choose Pixels, your video overlay will be positioned 10 pixels from the left edge of the base input video's frame. When you enter 10, choose Percentage, and your base input video is 1920x1080, your video overlay will be positioned 192 pixels from the left edge of the base input video's frame.
+ * @public
+ */
+ XPosition?: number;
+
+ /**
+ * To position the top edge of your video overlay along the top edge of the base input video's frame: Keep blank, or enter 0. To position the top edge of your video overlay down, relative to the top edge of the base input video's frame: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 10 and choose Pixels, your video overlay will be positioned 10 pixels from the top edge of the base input video's frame. When you enter 10, choose Percentage, and your underlying video is 1920x1080, your video overlay will be positioned 108 pixels from the top edge of the base input video's frame.
+ * @public
+ */
+ YPosition?: number;
+}
+
/**
* To transcode only portions of your video overlay, include one input clip for each part of your video overlay that you want in your output.
* @public
@@ -4174,7 +4245,8 @@ export interface VideoOverlayInputClipping {
*/
export interface VideoOverlayInput {
/**
- * Specify the input file S3, HTTP, or HTTPS URI for your video overlay. For consistency in color and formatting in your output video image, we recommend that you specify a video with similar characteristics as the underlying input video.
+ * Specify the input file S3, HTTP, or HTTPS URL for your video overlay.
+ * To specify one or more Transitions for your base input video instead: Leave blank.
* @public
*/
FileInput?: string;
@@ -4198,17 +4270,61 @@ export interface VideoOverlayInput {
TimecodeStart?: string;
}
+/**
+ * @public
+ * @enum
+ */
+export const VideoOverlayPlayBackMode = {
+ ONCE: "ONCE",
+ REPEAT: "REPEAT",
+} as const;
+
+/**
+ * @public
+ */
+export type VideoOverlayPlayBackMode = (typeof VideoOverlayPlayBackMode)[keyof typeof VideoOverlayPlayBackMode];
+
+/**
+ * Specify one or more Transitions for your video overlay. Use Transitions to reposition or resize your overlay over time. To use the same position and size for the duration of your video overlay: Leave blank. To specify a Transition: Enter a value for Start timecode, End Timecode, X Position, Y Position, Width, or Height.
+ * @public
+ */
+export interface VideoOverlayTransition {
+ /**
+ * Specify the ending position for this transition, relative to the base input video's frame. Your video overlay will move smoothly to this position, beginning at this transition's Start timecode and ending at this transition's End timecode.
+ * @public
+ */
+ EndPosition?: VideoOverlayPosition;
+
+ /**
+ * Specify the timecode for when this transition ends. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for Timecode source.
+ * @public
+ */
+ EndTimecode?: string;
+
+ /**
+ * Specify the timecode for when this transition begins. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for Timecode source.
+ * @public
+ */
+ StartTimecode?: string;
+}
+
/**
* Overlay one or more videos on top of your input video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/video-overlays.html
* @public
*/
export interface VideoOverlay {
/**
- * Enter the end timecode in the underlying input video for this overlay. Your overlay will be active through this frame. To display your video overlay for the duration of the underlying video: Leave blank. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for the underlying Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your overlay to end ten minutes into the video, enter 01:10:00:00.
+ * Enter the end timecode in the base input video for this overlay. Your overlay will be active through this frame. To display your video overlay for the duration of the base input video: Leave blank. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS isthe second, and FF is the frame number. When entering this value, take into account your choice for the base input video's timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your overlay to end ten minutes into the video, enter 01:10:00:00.
* @public
*/
EndTimecode?: string;
+ /**
+ * Specify the Initial position of your video overlay. To specify the Initial position of your video overlay, including distance from the left or top edge of the base input video's frame, or size: Enter a value for X position, Y position, Width, or Height. To use the full frame of the base input video: Leave blank.
+ * @public
+ */
+ InitialPosition?: VideoOverlayPosition;
+
/**
* Input settings for Video overlay. You can include one or more video overlays in sequence at different times that you specify.
* @public
@@ -4216,10 +4332,22 @@ export interface VideoOverlay {
Input?: VideoOverlayInput;
/**
- * Enter the start timecode in the underlying input video for this overlay. Your overlay will be active starting with this frame. To display your video overlay starting at the beginning of the underlying video: Leave blank. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for the underlying Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your overlay to begin five minutes into the video, enter 01:05:00:00.
+ * Specify whether your video overlay repeats or plays only once. To repeat your video overlay on a loop: Keep the default value, Repeat. Your overlay will repeat for the duration of the base input video. To playback your video overlay only once: Choose Once. With either option, you can end playback at a time that you specify by entering a value for End timecode.
+ * @public
+ */
+ Playback?: VideoOverlayPlayBackMode;
+
+ /**
+ * Enter the start timecode in the base input video for this overlay. Your overlay will be active starting with this frame. To display your video overlay starting at the beginning of the base input video: Leave blank. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for the base input video's timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your overlay to begin five minutes into the video, enter 01:05:00:00.
* @public
*/
StartTimecode?: string;
+
+ /**
+ * Specify one or more transitions for your video overlay. Use Transitions to reposition or resize your overlay over time. To use the same position and size for the duration of your video overlay: Leave blank. To specify a Transition: Enter a value for Start timecode, End Timecode, X Position, Y Position, Width, or Height.
+ * @public
+ */
+ Transitions?: VideoOverlayTransition[];
}
/**
@@ -7386,75 +7514,3 @@ export const CmfcAudioTrackType = {
* @public
*/
export type CmfcAudioTrackType = (typeof CmfcAudioTrackType)[keyof typeof CmfcAudioTrackType];
-
-/**
- * @public
- * @enum
- */
-export const CmfcDescriptiveVideoServiceFlag = {
- DONT_FLAG: "DONT_FLAG",
- FLAG: "FLAG",
-} as const;
-
-/**
- * @public
- */
-export type CmfcDescriptiveVideoServiceFlag =
- (typeof CmfcDescriptiveVideoServiceFlag)[keyof typeof CmfcDescriptiveVideoServiceFlag];
-
-/**
- * @public
- * @enum
- */
-export const CmfcIFrameOnlyManifest = {
- EXCLUDE: "EXCLUDE",
- INCLUDE: "INCLUDE",
-} as const;
-
-/**
- * @public
- */
-export type CmfcIFrameOnlyManifest = (typeof CmfcIFrameOnlyManifest)[keyof typeof CmfcIFrameOnlyManifest];
-
-/**
- * @public
- * @enum
- */
-export const CmfcKlvMetadata = {
- NONE: "NONE",
- PASSTHROUGH: "PASSTHROUGH",
-} as const;
-
-/**
- * @public
- */
-export type CmfcKlvMetadata = (typeof CmfcKlvMetadata)[keyof typeof CmfcKlvMetadata];
-
-/**
- * @public
- * @enum
- */
-export const CmfcManifestMetadataSignaling = {
- DISABLED: "DISABLED",
- ENABLED: "ENABLED",
-} as const;
-
-/**
- * @public
- */
-export type CmfcManifestMetadataSignaling =
- (typeof CmfcManifestMetadataSignaling)[keyof typeof CmfcManifestMetadataSignaling];
-
-/**
- * @public
- * @enum
- */
-export const CmfcScte35Esam = {
- INSERT: "INSERT",
- NONE: "NONE",
-} as const;
-
-/**
- * @public
- */
-export type CmfcScte35Esam = (typeof CmfcScte35Esam)[keyof typeof CmfcScte35Esam];
diff --git a/clients/client-mediaconvert/src/models/models_1.ts b/clients/client-mediaconvert/src/models/models_1.ts
index f94da7ad2fcea..6d5d82b435c28 100644
--- a/clients/client-mediaconvert/src/models/models_1.ts
+++ b/clients/client-mediaconvert/src/models/models_1.ts
@@ -1,8 +1,4 @@
// smithy-typescript generated code
-import { ExceptionOptionType as __ExceptionOptionType } from "@smithy/smithy-client";
-
-import { MediaConvertServiceException as __BaseException } from "./MediaConvertServiceException";
-
import {
AccelerationSettings,
AccelerationStatus,
@@ -14,11 +10,6 @@ import {
CaptionDescriptionPreset,
CmfcAudioDuration,
CmfcAudioTrackType,
- CmfcDescriptiveVideoServiceFlag,
- CmfcIFrameOnlyManifest,
- CmfcKlvMetadata,
- CmfcManifestMetadataSignaling,
- CmfcScte35Esam,
ColorConversion3DLUTSetting,
EsamSettings,
ExtendedDataServices,
@@ -40,6 +31,78 @@ import {
Rectangle,
} from "./models_0";
+/**
+ * @public
+ * @enum
+ */
+export const CmfcDescriptiveVideoServiceFlag = {
+ DONT_FLAG: "DONT_FLAG",
+ FLAG: "FLAG",
+} as const;
+
+/**
+ * @public
+ */
+export type CmfcDescriptiveVideoServiceFlag =
+ (typeof CmfcDescriptiveVideoServiceFlag)[keyof typeof CmfcDescriptiveVideoServiceFlag];
+
+/**
+ * @public
+ * @enum
+ */
+export const CmfcIFrameOnlyManifest = {
+ EXCLUDE: "EXCLUDE",
+ INCLUDE: "INCLUDE",
+} as const;
+
+/**
+ * @public
+ */
+export type CmfcIFrameOnlyManifest = (typeof CmfcIFrameOnlyManifest)[keyof typeof CmfcIFrameOnlyManifest];
+
+/**
+ * @public
+ * @enum
+ */
+export const CmfcKlvMetadata = {
+ NONE: "NONE",
+ PASSTHROUGH: "PASSTHROUGH",
+} as const;
+
+/**
+ * @public
+ */
+export type CmfcKlvMetadata = (typeof CmfcKlvMetadata)[keyof typeof CmfcKlvMetadata];
+
+/**
+ * @public
+ * @enum
+ */
+export const CmfcManifestMetadataSignaling = {
+ DISABLED: "DISABLED",
+ ENABLED: "ENABLED",
+} as const;
+
+/**
+ * @public
+ */
+export type CmfcManifestMetadataSignaling =
+ (typeof CmfcManifestMetadataSignaling)[keyof typeof CmfcManifestMetadataSignaling];
+
+/**
+ * @public
+ * @enum
+ */
+export const CmfcScte35Esam = {
+ INSERT: "INSERT",
+ NONE: "NONE",
+} as const;
+
+/**
+ * @public
+ */
+export type CmfcScte35Esam = (typeof CmfcScte35Esam)[keyof typeof CmfcScte35Esam];
+
/**
* @public
* @enum
@@ -190,6 +253,7 @@ export const ContainerType = {
MP4: "MP4",
MPD: "MPD",
MXF: "MXF",
+ OGG: "OGG",
RAW: "RAW",
WEBM: "WEBM",
Y4M: "Y4M",
@@ -2539,6 +2603,20 @@ export const H264RepeatPps = {
*/
export type H264RepeatPps = (typeof H264RepeatPps)[keyof typeof H264RepeatPps];
+/**
+ * @public
+ * @enum
+ */
+export const H264SaliencyAwareEncoding = {
+ DISABLED: "DISABLED",
+ PREFERRED: "PREFERRED",
+} as const;
+
+/**
+ * @public
+ */
+export type H264SaliencyAwareEncoding = (typeof H264SaliencyAwareEncoding)[keyof typeof H264SaliencyAwareEncoding];
+
/**
* @public
* @enum
@@ -2800,7 +2878,7 @@ export interface H264Settings {
MaxBitrate?: number;
/**
- * Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs.
+ * Specify the minimum number of frames allowed between two IDR-frames in your output. This includes frames created at the start of a GOP or a scene change. Use Min I-Interval to improve video compression by varying GOP size when two IDR-frames would be created near each other. For example, if a regular cadence-driven IDR-frame would fall within 5 frames of a scene-change IDR-frame, and you set Min I-interval to 5, then the encoder would only write an IDR-frame for the scene-change. In this way, one GOP is shortened or extended. If a cadence-driven IDR-frame would be further than 5 frames from a scene-change IDR-frame, then the encoder leaves all IDR-frames in place. To use an automatically determined interval: We recommend that you keep this value blank. This allows for MediaConvert to use an optimal setting according to the characteristics of your input video, and results in better video compression. To manually specify an interval: Enter a value from 1 to 30. Use when your downstream systems have specific GOP size requirements. To disable GOP size variance: Enter 0. MediaConvert will only create IDR-frames at the start of your output's cadence-driven GOP. Use when your downstream systems require a regular GOP size.
* @public
*/
MinIInterval?: number;
@@ -2859,6 +2937,12 @@ export interface H264Settings {
*/
RepeatPps?: H264RepeatPps;
+ /**
+ * Specify whether to apply Saliency aware encoding to your output. Use to improve the perceptual video quality of your output by allocating more encoding bits to the prominent or noticeable parts of your content. To apply saliency aware encoding, when possible: We recommend that you choose Preferred. The effects of Saliency aware encoding are best seen in lower bitrate outputs. When you choose Preferred, note that Saliency aware encoding will only apply to outputs that are 720p or higher in resolution. To not apply saliency aware encoding, prioritizing encoding speed over perceptual video quality: Choose Disabled.
+ * @public
+ */
+ SaliencyAwareEncoding?: H264SaliencyAwareEncoding;
+
/**
* Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.
* @public
@@ -3486,7 +3570,7 @@ export interface H265Settings {
MaxBitrate?: number;
/**
- * Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs.
+ * Specify the minimum number of frames allowed between two IDR-frames in your output. This includes frames created at the start of a GOP or a scene change. Use Min I-Interval to improve video compression by varying GOP size when two IDR-frames would be created near each other. For example, if a regular cadence-driven IDR-frame would fall within 5 frames of a scene-change IDR-frame, and you set Min I-interval to 5, then the encoder would only write an IDR-frame for the scene-change. In this way, one GOP is shortened or extended. If a cadence-driven IDR-frame would be further than 5 frames from a scene-change IDR-frame, then the encoder leaves all IDR-frames in place. To use an automatically determined interval: We recommend that you keep this value blank. This allows for MediaConvert to use an optimal setting according to the characteristics of your input video, and results in better video compression. To manually specify an interval: Enter a value from 1 to 30. Use when your downstream systems have specific GOP size requirements. To disable GOP size variance: Enter 0. MediaConvert will only create IDR-frames at the start of your output's cadence-driven GOP. Use when your downstream systems require a regular GOP size.
* @public
*/
MinIInterval?: number;
@@ -4009,7 +4093,7 @@ export interface Mpeg2Settings {
MaxBitrate?: number;
/**
- * Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. When you specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs.
+ * Specify the minimum number of frames allowed between two IDR-frames in your output. This includes frames created at the start of a GOP or a scene change. Use Min I-Interval to improve video compression by varying GOP size when two IDR-frames would be created near each other. For example, if a regular cadence-driven IDR-frame would fall within 5 frames of a scene-change IDR-frame, and you set Min I-interval to 5, then the encoder would only write an IDR-frame for the scene-change. In this way, one GOP is shortened or extended. If a cadence-driven IDR-frame would be further than 5 frames from a scene-change IDR-frame, then the encoder leaves all IDR-frames in place. To manually specify an interval: Enter a value from 1 to 30. Use when your downstream systems have specific GOP size requirements. To disable GOP size variance: Enter 0. MediaConvert will only create IDR-frames at the start of your output's cadence-driven GOP. Use when your downstream systems require a regular GOP size.
* @public
*/
MinIInterval?: number;
@@ -6823,6 +6907,18 @@ export interface Job {
*/
Id?: string;
+ /**
+ * The Job engine version that you requested for your job. Valid versions are in a YYYY-MM-DD format.
+ * @public
+ */
+ JobEngineVersionRequested?: string;
+
+ /**
+ * The Job engine version that your job used. Job engine versions are in a YYYY-MM-DD format. When you request an expired version, the response for this property will be empty. Requests to create jobs with an expired version result in a regular job, as if no specific Job engine version was requested. When you request an invalid version, the response for this property will be empty. Requests to create jobs with an invalid version result in a 400 error message, and no job is created.
+ * @public
+ */
+ JobEngineVersionUsed?: string;
+
/**
* An estimate of how far your job has progressed. This estimate is shown as a percentage of the total time from when your job leaves its queue to when your output files appear in your output Amazon S3 bucket. AWS Elemental MediaConvert provides jobPercentComplete in CloudWatch STATUS_UPDATE events and in the response to GetJob and ListJobs requests. The jobPercentComplete estimate is reliable for the following input containers: Quicktime, Transport Stream, MP4, and MXF. For some jobs, the service can't provide information about job progress. In those cases, jobPercentComplete returns a null value.
* @public
@@ -6920,6 +7016,24 @@ export interface Job {
Warnings?: WarningGroup[];
}
+/**
+ * Use Job engine versions to run jobs for your production workflow on one version, while you test and validate the latest version. Job engine versions are in a YYYY-MM-DD format.
+ * @public
+ */
+export interface JobEngineVersion {
+ /**
+ * The date that this Job engine version expires. Requests to create jobs with an expired version result in a regular job, as if no specific Job engine version was requested.
+ * @public
+ */
+ ExpirationDate?: Date;
+
+ /**
+ * Use Job engine versions to run jobs for your production workflow on one version, while you test and validate the latest version. Job engine versions are in a YYYY-MM-DD format.
+ * @public
+ */
+ Version?: string;
+}
+
/**
* JobTemplateSettings contains all the transcode settings saved in the template that will be applied to jobs created from it.
* @public
@@ -7374,129 +7488,3 @@ export interface Queue {
*/
Type?: Type;
}
-
-/**
- * @public
- */
-export interface AssociateCertificateRequest {
- /**
- * The ARN of the ACM certificate that you want to associate with your MediaConvert resource.
- * @public
- */
- Arn: string | undefined;
-}
-
-/**
- * @public
- */
-export interface AssociateCertificateResponse {}
-
-/**
- * The service can't process your request because of a problem in the request. Please check your request form and syntax.
- * @public
- */
-export class BadRequestException extends __BaseException {
- readonly name: "BadRequestException" = "BadRequestException";
- readonly $fault: "client" = "client";
- Message?: string;
- /**
- * @internal
- */
- constructor(opts: __ExceptionOptionType) {
- super({
- name: "BadRequestException",
- $fault: "client",
- ...opts,
- });
- Object.setPrototypeOf(this, BadRequestException.prototype);
- this.Message = opts.Message;
- }
-}
-
-/**
- * The service couldn't complete your request because there is a conflict with the current state of the resource.
- * @public
- */
-export class ConflictException extends __BaseException {
- readonly name: "ConflictException" = "ConflictException";
- readonly $fault: "client" = "client";
- Message?: string;
- /**
- * @internal
- */
- constructor(opts: __ExceptionOptionType) {
- super({
- name: "ConflictException",
- $fault: "client",
- ...opts,
- });
- Object.setPrototypeOf(this, ConflictException.prototype);
- this.Message = opts.Message;
- }
-}
-
-/**
- * You don't have permissions for this action with the credentials you sent.
- * @public
- */
-export class ForbiddenException extends __BaseException {
- readonly name: "ForbiddenException" = "ForbiddenException";
- readonly $fault: "client" = "client";
- Message?: string;
- /**
- * @internal
- */
- constructor(opts: __ExceptionOptionType) {
- super({
- name: "ForbiddenException",
- $fault: "client",
- ...opts,
- });
- Object.setPrototypeOf(this, ForbiddenException.prototype);
- this.Message = opts.Message;
- }
-}
-
-/**
- * The service encountered an unexpected condition and can't fulfill your request.
- * @public
- */
-export class InternalServerErrorException extends __BaseException {
- readonly name: "InternalServerErrorException" = "InternalServerErrorException";
- readonly $fault: "server" = "server";
- Message?: string;
- /**
- * @internal
- */
- constructor(opts: __ExceptionOptionType) {
- super({
- name: "InternalServerErrorException",
- $fault: "server",
- ...opts,
- });
- Object.setPrototypeOf(this, InternalServerErrorException.prototype);
- this.Message = opts.Message;
- }
-}
-
-/**
- * The resource you requested doesn't exist.
- * @public
- */
-export class NotFoundException extends __BaseException {
- readonly name: "NotFoundException" = "NotFoundException";
- readonly $fault: "client" = "client";
- Message?: string;
- /**
- * @internal
- */
- constructor(opts: __ExceptionOptionType) {
- super({
- name: "NotFoundException",
- $fault: "client",
- ...opts,
- });
- Object.setPrototypeOf(this, NotFoundException.prototype);
- this.Message = opts.Message;
- }
-}
diff --git a/clients/client-mediaconvert/src/models/models_2.ts b/clients/client-mediaconvert/src/models/models_2.ts
index 2ffd478ab1e5f..b1101b9514d03 100644
--- a/clients/client-mediaconvert/src/models/models_2.ts
+++ b/clients/client-mediaconvert/src/models/models_2.ts
@@ -8,6 +8,7 @@ import { AccelerationSettings, BillingTagsSource, Endpoint, HopDestination } fro
import {
Commitment,
Job,
+ JobEngineVersion,
JobSettings,
JobStatus,
JobTemplate,
@@ -22,6 +23,132 @@ import {
StatusUpdateInterval,
} from "./models_1";
+/**
+ * @public
+ */
+export interface AssociateCertificateRequest {
+ /**
+ * The ARN of the ACM certificate that you want to associate with your MediaConvert resource.
+ * @public
+ */
+ Arn: string | undefined;
+}
+
+/**
+ * @public
+ */
+export interface AssociateCertificateResponse {}
+
+/**
+ * The service can't process your request because of a problem in the request. Please check your request form and syntax.
+ * @public
+ */
+export class BadRequestException extends __BaseException {
+ readonly name: "BadRequestException" = "BadRequestException";
+ readonly $fault: "client" = "client";
+ Message?: string;
+ /**
+ * @internal
+ */
+ constructor(opts: __ExceptionOptionType) {
+ super({
+ name: "BadRequestException",
+ $fault: "client",
+ ...opts,
+ });
+ Object.setPrototypeOf(this, BadRequestException.prototype);
+ this.Message = opts.Message;
+ }
+}
+
+/**
+ * The service couldn't complete your request because there is a conflict with the current state of the resource.
+ * @public
+ */
+export class ConflictException extends __BaseException {
+ readonly name: "ConflictException" = "ConflictException";
+ readonly $fault: "client" = "client";
+ Message?: string;
+ /**
+ * @internal
+ */
+ constructor(opts: __ExceptionOptionType) {
+ super({
+ name: "ConflictException",
+ $fault: "client",
+ ...opts,
+ });
+ Object.setPrototypeOf(this, ConflictException.prototype);
+ this.Message = opts.Message;
+ }
+}
+
+/**
+ * You don't have permissions for this action with the credentials you sent.
+ * @public
+ */
+export class ForbiddenException extends __BaseException {
+ readonly name: "ForbiddenException" = "ForbiddenException";
+ readonly $fault: "client" = "client";
+ Message?: string;
+ /**
+ * @internal
+ */
+ constructor(opts: __ExceptionOptionType) {
+ super({
+ name: "ForbiddenException",
+ $fault: "client",
+ ...opts,
+ });
+ Object.setPrototypeOf(this, ForbiddenException.prototype);
+ this.Message = opts.Message;
+ }
+}
+
+/**
+ * The service encountered an unexpected condition and can't fulfill your request.
+ * @public
+ */
+export class InternalServerErrorException extends __BaseException {
+ readonly name: "InternalServerErrorException" = "InternalServerErrorException";
+ readonly $fault: "server" = "server";
+ Message?: string;
+ /**
+ * @internal
+ */
+ constructor(opts: __ExceptionOptionType) {
+ super({
+ name: "InternalServerErrorException",
+ $fault: "server",
+ ...opts,
+ });
+ Object.setPrototypeOf(this, InternalServerErrorException.prototype);
+ this.Message = opts.Message;
+ }
+}
+
+/**
+ * The resource you requested doesn't exist.
+ * @public
+ */
+export class NotFoundException extends __BaseException {
+ readonly name: "NotFoundException" = "NotFoundException";
+ readonly $fault: "client" = "client";
+ Message?: string;
+ /**
+ * @internal
+ */
+ constructor(opts: __ExceptionOptionType) {
+ super({
+ name: "NotFoundException",
+ $fault: "client",
+ ...opts,
+ });
+ Object.setPrototypeOf(this, NotFoundException.prototype);
+ this.Message = opts.Message;
+ }
+}
+
/**
* Too many requests have been sent in too short of a time. The service limits the rate at which it will accept requests.
* @public
@@ -88,6 +215,12 @@ export interface CreateJobRequest {
*/
HopDestinations?: HopDestination[];
+ /**
+ * Use Job engine versions to run jobs for your production workflow on one version, while you test and validate the latest version. To specify a Job engine version: Enter a date in a YYYY-MM-DD format. For a list of valid Job engine versions, submit a ListVersions request. To not specify a Job engine version: Leave blank.
+ * @public
+ */
+ JobEngineVersion?: string;
+
/**
* Optional. When you create a job, you can either specify a job template or specify the transcoding settings individually.
* @public
@@ -924,6 +1057,40 @@ export interface ListTagsForResourceResponse {
ResourceTags?: ResourceTags;
}
+/**
+ * @public
+ */
+export interface ListVersionsRequest {
+ /**
+ * Optional. Number of valid Job engine versions, up to twenty, that will be returned at one time.
+ * @public
+ */
+ MaxResults?: number;
+
+ /**
+ * Optional. Use this string, provided with the response to a previous request, to request the next batch of Job engine versions.
+ * @public
+ */
+ NextToken?: string;
+}
+
+/**
+ * @public
+ */
+export interface ListVersionsResponse {
+ /**
+ * Optional. Use this string, provided with the response to a previous request, to request the next batch of Job engine versions.
+ * @public
+ */
+ NextToken?: string;
+
+ /**
+ * Retrieve a JSON array of all available Job engine versions and the date they expire.
+ * @public
+ */
+ Versions?: JobEngineVersion[];
+}
+
/**
* @public
*/
diff --git a/clients/client-mediaconvert/src/pagination/ListVersionsPaginator.ts b/clients/client-mediaconvert/src/pagination/ListVersionsPaginator.ts
new file mode 100644
index 0000000000000..9b0e9cf659a7e
--- /dev/null
+++ b/clients/client-mediaconvert/src/pagination/ListVersionsPaginator.ts
@@ -0,0 +1,24 @@
+// smithy-typescript generated code
+import { createPaginator } from "@smithy/core";
+import { Paginator } from "@smithy/types";
+
+import {
+ ListVersionsCommand,
+ ListVersionsCommandInput,
+ ListVersionsCommandOutput,
+} from "../commands/ListVersionsCommand";
+import { MediaConvertClient } from "../MediaConvertClient";
+import { MediaConvertPaginationConfiguration } from "./Interfaces";
+
+/**
+ * @public
+ */
+export const paginateListVersions: (
+ config: MediaConvertPaginationConfiguration,
+ input: ListVersionsCommandInput,
+ ...rest: any[]
+) => Paginator = createPaginator<
+ MediaConvertPaginationConfiguration,
+ ListVersionsCommandInput,
+ ListVersionsCommandOutput
+>(MediaConvertClient, ListVersionsCommand, "NextToken", "NextToken", "MaxResults");
diff --git a/clients/client-mediaconvert/src/pagination/index.ts b/clients/client-mediaconvert/src/pagination/index.ts
index b11157123e31e..abb0ff61ed724 100644
--- a/clients/client-mediaconvert/src/pagination/index.ts
+++ b/clients/client-mediaconvert/src/pagination/index.ts
@@ -5,4 +5,5 @@ export * from "./ListJobTemplatesPaginator";
export * from "./ListJobsPaginator";
export * from "./ListPresetsPaginator";
export * from "./ListQueuesPaginator";
+export * from "./ListVersionsPaginator";
export * from "./SearchJobsPaginator";
diff --git a/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts b/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts
index cd93ac2b3f43e..4409519aa00de 100644
--- a/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts
+++ b/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts
@@ -58,6 +58,7 @@ import {
ListTagsForResourceCommandInput,
ListTagsForResourceCommandOutput,
} from "../commands/ListTagsForResourceCommand";
+import { ListVersionsCommandInput, ListVersionsCommandOutput } from "../commands/ListVersionsCommand";
import { PutPolicyCommandInput, PutPolicyCommandOutput } from "../commands/PutPolicyCommand";
import { SearchJobsCommandInput, SearchJobsCommandOutput } from "../commands/SearchJobsCommand";
import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand";
@@ -175,6 +176,8 @@ import {
VideoOverlay,
VideoOverlayInput,
VideoOverlayInputClipping,
+ VideoOverlayPosition,
+ VideoOverlayTransition,
VideoSelector,
VorbisSettings,
WavSettings,
@@ -186,12 +189,10 @@ import {
Av1Settings,
AvcIntraSettings,
AvcIntraUhdSettings,
- BadRequestException,
BandwidthReductionFilter,
ClipLimits,
CmfcSettings,
ColorCorrector,
- ConflictException,
ContainerSettings,
Deinterlacer,
DolbyVision,
@@ -200,7 +201,6 @@ import {
DvbSdtSettings,
DvbTdtSettings,
F4vSettings,
- ForbiddenException,
FrameCaptureSettings,
H264QvbrSettings,
H264Settings,
@@ -208,8 +208,8 @@ import {
H265Settings,
Hdr10Plus,
HlsSettings,
- InternalServerErrorException,
Job,
+ JobEngineVersion,
JobSettings,
JobTemplate,
JobTemplateSettings,
@@ -227,7 +227,6 @@ import {
NoiseReducerFilterSettings,
NoiseReducerSpatialFilterSettings,
NoiseReducerTemporalFilterSettings,
- NotFoundException,
Output,
OutputGroup,
OutputSettings,
@@ -256,7 +255,17 @@ import {
XavcHdProfileSettings,
XavcSettings,
} from "../models/models_1";
-import { Policy, ReservationPlanSettings, ResourceTags, TooManyRequestsException } from "../models/models_2";
+import {
+ BadRequestException,
+ ConflictException,
+ ForbiddenException,
+ InternalServerErrorException,
+ NotFoundException,
+ Policy,
+ ReservationPlanSettings,
+ ResourceTags,
+ TooManyRequestsException,
+} from "../models/models_2";
/**
* serializeAws_restJson1AssociateCertificateCommand
@@ -315,6 +324,7 @@ export const se_CreateJobCommand = async (
billingTagsSource: [, , `BillingTagsSource`],
clientRequestToken: [true, (_) => _ ?? generateIdempotencyToken(), `ClientRequestToken`],
hopDestinations: [, (_) => se___listOfHopDestination(_, context), `HopDestinations`],
+ jobEngineVersion: [, , `JobEngineVersion`],
jobTemplate: [, , `JobTemplate`],
priority: [, , `Priority`],
queue: [, , `Queue`],
@@ -696,6 +706,25 @@ export const se_ListTagsForResourceCommand = async (
return b.build();
};
+/**
+ * serializeAws_restJson1ListVersionsCommand
+ */
+export const se_ListVersionsCommand = async (
+ input: ListVersionsCommandInput,
+ context: __SerdeContext
+): Promise<__HttpRequest> => {
+ const b = rb(input, context);
+ const headers: any = {};
+ b.bp("/2017-08-29/versions");
+ const query: any = map({
+ [_mR]: [() => input.MaxResults !== void 0, () => input[_MR]!.toString()],
+ [_nT]: [, input[_NT]!],
+ });
+ let body: any;
+ b.m("GET").h(headers).q(query).b(body);
+ return b.build();
+};
+
/**
* serializeAws_restJson1PutPolicyCommand
*/
@@ -1306,6 +1335,28 @@ export const de_ListTagsForResourceCommand = async (
return contents;
};
+/**
+ * deserializeAws_restJson1ListVersionsCommand
+ */
+export const de_ListVersionsCommand = async (
+ output: __HttpResponse,
+ context: __SerdeContext
+): Promise => {
+ if (output.statusCode !== 200 && output.statusCode >= 300) {
+ return de_CommandError(output, context);
+ }
+ const contents: any = map({
+ $metadata: deserializeMetadata(output),
+ });
+ const data: Record = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body");
+ const doc = take(data, {
+ NextToken: [, __expectString, `nextToken`],
+ Versions: [, (_) => de___listOfJobEngineVersion(_, context), `versions`],
+ });
+ Object.assign(contents, doc);
+ return contents;
+};
+
/**
* deserializeAws_restJson1PutPolicyCommand
*/
@@ -1879,6 +1930,17 @@ const se___listOfVideoOverlayInputClipping = (input: VideoOverlayInputClipping[]
});
};
+/**
+ * serializeAws_restJson1__listOfVideoOverlayTransition
+ */
+const se___listOfVideoOverlayTransition = (input: VideoOverlayTransition[], context: __SerdeContext): any => {
+ return input
+ .filter((e: any) => e != null)
+ .map((entry) => {
+ return se_VideoOverlayTransition(entry, context);
+ });
+};
+
// se___mapOf__string omitted.
/**
@@ -2838,6 +2900,7 @@ const se_FileGroupSettings = (input: FileGroupSettings, context: __SerdeContext)
*/
const se_FileSourceSettings = (input: FileSourceSettings, context: __SerdeContext): any => {
return take(input, {
+ byteRateLimit: [, , `ByteRateLimit`],
convert608To708: [, , `Convert608To708`],
convertPaintToPop: [, , `ConvertPaintToPop`],
framerate: [, (_) => se_CaptionSourceFramerate(_, context), `Framerate`],
@@ -2929,6 +2992,7 @@ const se_H264Settings = (input: H264Settings, context: __SerdeContext): any => {
qvbrSettings: [, (_) => se_H264QvbrSettings(_, context), `QvbrSettings`],
rateControlMode: [, , `RateControlMode`],
repeatPps: [, , `RepeatPps`],
+ saliencyAwareEncoding: [, , `SaliencyAwareEncoding`],
scanTypeConversionMode: [, , `ScanTypeConversionMode`],
sceneChangeDetect: [, , `SceneChangeDetect`],
slices: [, , `Slices`],
@@ -4190,8 +4254,11 @@ const se_VideoDescription = (input: VideoDescription, context: __SerdeContext):
const se_VideoOverlay = (input: VideoOverlay, context: __SerdeContext): any => {
return take(input, {
endTimecode: [, , `EndTimecode`],
+ initialPosition: [, (_) => se_VideoOverlayPosition(_, context), `InitialPosition`],
input: [, (_) => se_VideoOverlayInput(_, context), `Input`],
+ playback: [, , `Playback`],
startTimecode: [, , `StartTimecode`],
+ transitions: [, (_) => se___listOfVideoOverlayTransition(_, context), `Transitions`],
});
};
@@ -4217,6 +4284,30 @@ const se_VideoOverlayInputClipping = (input: VideoOverlayInputClipping, context:
});
};
+/**
+ * serializeAws_restJson1VideoOverlayPosition
+ */
+const se_VideoOverlayPosition = (input: VideoOverlayPosition, context: __SerdeContext): any => {
+ return take(input, {
+ height: [, , `Height`],
+ unit: [, , `Unit`],
+ width: [, , `Width`],
+ xPosition: [, , `XPosition`],
+ yPosition: [, , `YPosition`],
+ });
+};
+
+/**
+ * serializeAws_restJson1VideoOverlayTransition
+ */
+const se_VideoOverlayTransition = (input: VideoOverlayTransition, context: __SerdeContext): any => {
+ return take(input, {
+ endPosition: [, (_) => se_VideoOverlayPosition(_, context), `EndPosition`],
+ endTimecode: [, , `EndTimecode`],
+ startTimecode: [, , `StartTimecode`],
+ });
+};
+
/**
* serializeAws_restJson1VideoPreprocessor
*/
@@ -4697,6 +4788,18 @@ const de___listOfJob = (output: any, context: __SerdeContext): Job[] => {
return retVal;
};
+/**
+ * deserializeAws_restJson1__listOfJobEngineVersion
+ */
+const de___listOfJobEngineVersion = (output: any, context: __SerdeContext): JobEngineVersion[] => {
+ const retVal = (output || [])
+ .filter((e: any) => e != null)
+ .map((entry: any) => {
+ return de_JobEngineVersion(entry, context);
+ });
+ return retVal;
+};
+
/**
* deserializeAws_restJson1__listOfJobTemplate
*/
@@ -4843,6 +4946,18 @@ const de___listOfVideoOverlayInputClipping = (output: any, context: __SerdeConte
return retVal;
};
+/**
+ * deserializeAws_restJson1__listOfVideoOverlayTransition
+ */
+const de___listOfVideoOverlayTransition = (output: any, context: __SerdeContext): VideoOverlayTransition[] => {
+ const retVal = (output || [])
+ .filter((e: any) => e != null)
+ .map((entry: any) => {
+ return de_VideoOverlayTransition(entry, context);
+ });
+ return retVal;
+};
+
/**
* deserializeAws_restJson1__listOfWarningGroup
*/
@@ -5838,6 +5953,7 @@ const de_FileGroupSettings = (output: any, context: __SerdeContext): FileGroupSe
*/
const de_FileSourceSettings = (output: any, context: __SerdeContext): FileSourceSettings => {
return take(output, {
+ ByteRateLimit: [, __expectString, `byteRateLimit`],
Convert608To708: [, __expectString, `convert608To708`],
ConvertPaintToPop: [, __expectString, `convertPaintToPop`],
Framerate: [, (_: any) => de_CaptionSourceFramerate(_, context), `framerate`],
@@ -5929,6 +6045,7 @@ const de_H264Settings = (output: any, context: __SerdeContext): H264Settings =>
QvbrSettings: [, (_: any) => de_H264QvbrSettings(_, context), `qvbrSettings`],
RateControlMode: [, __expectString, `rateControlMode`],
RepeatPps: [, __expectString, `repeatPps`],
+ SaliencyAwareEncoding: [, __expectString, `saliencyAwareEncoding`],
ScanTypeConversionMode: [, __expectString, `scanTypeConversionMode`],
SceneChangeDetect: [, __expectString, `sceneChangeDetect`],
Slices: [, __expectInt32, `slices`],
@@ -6340,6 +6457,8 @@ const de_Job = (output: any, context: __SerdeContext): Job => {
ErrorMessage: [, __expectString, `errorMessage`],
HopDestinations: [, (_: any) => de___listOfHopDestination(_, context), `hopDestinations`],
Id: [, __expectString, `id`],
+ JobEngineVersionRequested: [, __expectString, `jobEngineVersionRequested`],
+ JobEngineVersionUsed: [, __expectString, `jobEngineVersionUsed`],
JobPercentComplete: [, __expectInt32, `jobPercentComplete`],
JobTemplate: [, __expectString, `jobTemplate`],
Messages: [, (_: any) => de_JobMessages(_, context), `messages`],
@@ -6359,6 +6478,16 @@ const de_Job = (output: any, context: __SerdeContext): Job => {
}) as any;
};
+/**
+ * deserializeAws_restJson1JobEngineVersion
+ */
+const de_JobEngineVersion = (output: any, context: __SerdeContext): JobEngineVersion => {
+ return take(output, {
+ ExpirationDate: [, (_: any) => __expectNonNull(__parseEpochTimestamp(__expectNumber(_))), `expirationDate`],
+ Version: [, __expectString, `version`],
+ }) as any;
+};
+
/**
* deserializeAws_restJson1JobMessages
*/
@@ -7375,8 +7504,11 @@ const de_VideoDetail = (output: any, context: __SerdeContext): VideoDetail => {
const de_VideoOverlay = (output: any, context: __SerdeContext): VideoOverlay => {
return take(output, {
EndTimecode: [, __expectString, `endTimecode`],
+ InitialPosition: [, (_: any) => de_VideoOverlayPosition(_, context), `initialPosition`],
Input: [, (_: any) => de_VideoOverlayInput(_, context), `input`],
+ Playback: [, __expectString, `playback`],
StartTimecode: [, __expectString, `startTimecode`],
+ Transitions: [, (_: any) => de___listOfVideoOverlayTransition(_, context), `transitions`],
}) as any;
};
@@ -7402,6 +7534,30 @@ const de_VideoOverlayInputClipping = (output: any, context: __SerdeContext): Vid
}) as any;
};
+/**
+ * deserializeAws_restJson1VideoOverlayPosition
+ */
+const de_VideoOverlayPosition = (output: any, context: __SerdeContext): VideoOverlayPosition => {
+ return take(output, {
+ Height: [, __expectInt32, `height`],
+ Unit: [, __expectString, `unit`],
+ Width: [, __expectInt32, `width`],
+ XPosition: [, __expectInt32, `xPosition`],
+ YPosition: [, __expectInt32, `yPosition`],
+ }) as any;
+};
+
+/**
+ * deserializeAws_restJson1VideoOverlayTransition
+ */
+const de_VideoOverlayTransition = (output: any, context: __SerdeContext): VideoOverlayTransition => {
+ return take(output, {
+ EndPosition: [, (_: any) => de_VideoOverlayPosition(_, context), `endPosition`],
+ EndTimecode: [, __expectString, `endTimecode`],
+ StartTimecode: [, __expectString, `startTimecode`],
+ }) as any;
+};
+
/**
* deserializeAws_restJson1VideoPreprocessor
*/
diff --git a/codegen/sdk-codegen/aws-models/mediaconvert.json b/codegen/sdk-codegen/aws-models/mediaconvert.json
index ed80bd118cb85..4c654a50f3d23 100644
--- a/codegen/sdk-codegen/aws-models/mediaconvert.json
+++ b/codegen/sdk-codegen/aws-models/mediaconvert.json
@@ -72,7 +72,7 @@
}
},
"traits": {
- "smithy.api#documentation": "AAC Profile."
+ "smithy.api#documentation": "Specify the AAC profile. For the widest player compatibility and where higher bitrates are acceptable: Keep the default profile, LC (AAC-LC) For improved audio performance at lower bitrates: Choose HEV1 or HEV2. HEV1 (AAC-HE v1) adds spectral band replication to improve speech audio at low bitrates. HEV2 (AAC-HE v2) adds parametric stereo, which optimizes for encoding stereo audio at very low bitrates."
}
},
"com.amazonaws.mediaconvert#AacCodingMode": {
@@ -130,7 +130,7 @@
}
},
"traits": {
- "smithy.api#documentation": "Rate Control Mode."
+ "smithy.api#documentation": "Specify the AAC rate control mode. For a constant bitrate: Choose CBR. Your AAC output bitrate will be equal to the value that you choose for Bitrate. For a variable bitrate: Choose VBR. Your AAC output bitrate will vary according to your audio content and the value that you choose for Bitrate quality."
}
},
"com.amazonaws.mediaconvert#AacRawFormat": {
@@ -173,7 +173,7 @@
"CodecProfile": {
"target": "com.amazonaws.mediaconvert#AacCodecProfile",
"traits": {
- "smithy.api#documentation": "AAC Profile.",
+ "smithy.api#documentation": "Specify the AAC profile. For the widest player compatibility and where higher bitrates are acceptable: Keep the default profile, LC (AAC-LC) For improved audio performance at lower bitrates: Choose HEV1 or HEV2. HEV1 (AAC-HE v1) adds spectral band replication to improve speech audio at low bitrates. HEV2 (AAC-HE v2) adds parametric stereo, which optimizes for encoding stereo audio at very low bitrates.",
"smithy.api#jsonName": "codecProfile"
}
},
@@ -187,7 +187,7 @@
"RateControlMode": {
"target": "com.amazonaws.mediaconvert#AacRateControlMode",
"traits": {
- "smithy.api#documentation": "Rate Control Mode.",
+ "smithy.api#documentation": "Specify the AAC rate control mode. For a constant bitrate: Choose CBR. Your AAC output bitrate will be equal to the value that you choose for Bitrate. For a variable bitrate: Choose VBR. Your AAC output bitrate will vary according to your audio content and the value that you choose for Bitrate quality.",
"smithy.api#jsonName": "rateControlMode"
}
},
@@ -201,7 +201,7 @@
"SampleRate": {
"target": "com.amazonaws.mediaconvert#__integerMin8000Max96000",
"traits": {
- "smithy.api#documentation": "Specify the Sample rate in Hz. Valid sample rates depend on the Profile and Coding mode that you select. The following list shows valid sample rates for each Profile and Coding mode. * LC Profile, Coding mode 1.0, 2.0, and Receiver Mix: 8000, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 88200, 96000. * LC Profile, Coding mode 5.1: 32000, 44100, 48000, 96000. * HEV1 Profile, Coding mode 1.0 and Receiver Mix: 22050, 24000, 32000, 44100, 48000. * HEV1 Profile, Coding mode 2.0 and 5.1: 32000, 44100, 48000, 96000. * HEV2 Profile, Coding mode 2.0: 22050, 24000, 32000, 44100, 48000.",
+ "smithy.api#documentation": "Specify the AAC sample rate in samples per second (Hz). Valid sample rates depend on the AAC profile and Coding mode that you select. For a list of supported sample rates, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/aac-support.html",
"smithy.api#jsonName": "sampleRate"
}
},
@@ -215,7 +215,7 @@
"VbrQuality": {
"target": "com.amazonaws.mediaconvert#AacVbrQuality",
"traits": {
- "smithy.api#documentation": "VBR Quality Level - Only used if rate_control_mode is VBR.",
+ "smithy.api#documentation": "Specify the quality of your variable bitrate (VBR) AAC audio. For a list of approximate VBR bitrates, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/aac-support.html#aac_vbr",
"smithy.api#jsonName": "vbrQuality"
}
}
@@ -273,7 +273,7 @@
}
},
"traits": {
- "smithy.api#documentation": "VBR Quality Level - Only used if rate_control_mode is VBR."
+ "smithy.api#documentation": "Specify the quality of your variable bitrate (VBR) AAC audio. For a list of approximate VBR bitrates, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/aac-support.html#aac_vbr"
}
},
"com.amazonaws.mediaconvert#Ac3BitstreamMode": {
@@ -1508,10 +1508,16 @@
"traits": {
"smithy.api#enumValue": "FRAME"
}
+ },
+ "FORCE": {
+ "target": "smithy.api#Unit",
+ "traits": {
+ "smithy.api#enumValue": "FORCE"
+ }
}
},
"traits": {
- "smithy.api#documentation": "Apply audio timing corrections to help synchronize audio and video in your output. To apply timing corrections, your input must meet the following requirements: * Container: MP4, or MOV, with an accurate time-to-sample (STTS) table. * Audio track: AAC. Choose from the following audio timing correction settings: * Disabled (Default): Apply no correction. * Auto: Recommended for most inputs. MediaConvert analyzes the audio timing in your input and determines which correction setting to use, if needed. * Track: Adjust the duration of each audio frame by a constant amount to align the audio track length with STTS duration. Track-level correction does not affect pitch, and is recommended for tonal audio content such as music. * Frame: Adjust the duration of each audio frame by a variable amount to align audio frames with STTS timestamps. No corrections are made to already-aligned frames. Frame-level correction may affect the pitch of corrected frames, and is recommended for atonal audio content such as speech or percussion."
+ "smithy.api#documentation": "Apply audio timing corrections to help synchronize audio and video in your output. To apply timing corrections, your input must meet the following requirements: * Container: MP4, or MOV, with an accurate time-to-sample (STTS) table. * Audio track: AAC. Choose from the following audio timing correction settings: * Disabled (Default): Apply no correction. * Auto: Recommended for most inputs. MediaConvert analyzes the audio timing in your input and determines which correction setting to use, if needed. * Track: Adjust the duration of each audio frame by a constant amount to align the audio track length with STTS duration. Track-level correction does not affect pitch, and is recommended for tonal audio content such as music. * Frame: Adjust the duration of each audio frame by a variable amount to align audio frames with STTS timestamps. No corrections are made to already-aligned frames. Frame-level correction may affect the pitch of corrected frames, and is recommended for atonal audio content such as speech or percussion. * Force: Apply audio duration correction, either Track or Frame depending on your input, regardless of the accuracy of your input's STTS table. Your output audio and video may not be aligned or it may contain audio artifacts."
}
},
"com.amazonaws.mediaconvert#AudioLanguageCodeControl": {
@@ -1689,7 +1695,7 @@
"AudioDurationCorrection": {
"target": "com.amazonaws.mediaconvert#AudioDurationCorrection",
"traits": {
- "smithy.api#documentation": "Apply audio timing corrections to help synchronize audio and video in your output. To apply timing corrections, your input must meet the following requirements: * Container: MP4, or MOV, with an accurate time-to-sample (STTS) table. * Audio track: AAC. Choose from the following audio timing correction settings: * Disabled (Default): Apply no correction. * Auto: Recommended for most inputs. MediaConvert analyzes the audio timing in your input and determines which correction setting to use, if needed. * Track: Adjust the duration of each audio frame by a constant amount to align the audio track length with STTS duration. Track-level correction does not affect pitch, and is recommended for tonal audio content such as music. * Frame: Adjust the duration of each audio frame by a variable amount to align audio frames with STTS timestamps. No corrections are made to already-aligned frames. Frame-level correction may affect the pitch of corrected frames, and is recommended for atonal audio content such as speech or percussion.",
+ "smithy.api#documentation": "Apply audio timing corrections to help synchronize audio and video in your output. To apply timing corrections, your input must meet the following requirements: * Container: MP4, or MOV, with an accurate time-to-sample (STTS) table. * Audio track: AAC. Choose from the following audio timing correction settings: * Disabled (Default): Apply no correction. * Auto: Recommended for most inputs. MediaConvert analyzes the audio timing in your input and determines which correction setting to use, if needed. * Track: Adjust the duration of each audio frame by a constant amount to align the audio track length with STTS duration. Track-level correction does not affect pitch, and is recommended for tonal audio content such as music. * Frame: Adjust the duration of each audio frame by a variable amount to align audio frames with STTS timestamps. No corrections are made to already-aligned frames. Frame-level correction may affect the pitch of corrected frames, and is recommended for atonal audio content such as speech or percussion. * Force: Apply audio duration correction, either Track or Frame depending on your input, regardless of the accuracy of your input's STTS table. Your output audio and video may not be aligned or it may contain audio artifacts.",
"smithy.api#jsonName": "audioDurationCorrection"
}
},
@@ -3481,6 +3487,26 @@
"smithy.api#documentation": "Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 100 captions selectors per input."
}
},
+ "com.amazonaws.mediaconvert#CaptionSourceByteRateLimit": {
+ "type": "enum",
+ "members": {
+ "ENABLED": {
+ "target": "smithy.api#Unit",
+ "traits": {
+ "smithy.api#enumValue": "ENABLED"
+ }
+ },
+ "DISABLED": {
+ "target": "smithy.api#Unit",
+ "traits": {
+ "smithy.api#enumValue": "DISABLED"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "Choose whether to limit the byte rate at which your SCC input captions are inserted into your output. To not limit the caption rate: We recommend that you keep the default value, Disabled. MediaConvert inserts captions in your output according to the byte rates listed in the EIA-608 specification, typically 2 or 3 caption bytes per frame depending on your output frame rate. To limit your output caption rate: Choose Enabled. Choose this option if your downstream systems require a maximum of 2 caption bytes per frame. Note that this setting has no effect when your output frame rate is 30 or 60."
+ }
+ },
"com.amazonaws.mediaconvert#CaptionSourceConvertPaintOnToPopOn": {
"type": "enum",
"members": {
@@ -5224,6 +5250,12 @@
"smithy.api#enumValue": "MXF"
}
},
+ "OGG": {
+ "target": "smithy.api#Unit",
+ "traits": {
+ "smithy.api#enumValue": "OGG"
+ }
+ },
"WEBM": {
"target": "smithy.api#Unit",
"traits": {
@@ -5336,6 +5368,13 @@
"smithy.api#jsonName": "hopDestinations"
}
},
+ "JobEngineVersion": {
+ "target": "com.amazonaws.mediaconvert#__string",
+ "traits": {
+ "smithy.api#documentation": "Use Job engine versions to run jobs for your production workflow on one version, while you test and validate the latest version. To specify a Job engine version: Enter a date in a YYYY-MM-DD format. For a list of valid Job engine versions, submit a ListVersions request. To not specify a Job engine version: Leave blank.",
+ "smithy.api#jsonName": "jobEngineVersion"
+ }
+ },
"JobTemplate": {
"target": "com.amazonaws.mediaconvert#__string",
"traits": {
@@ -8917,6 +8956,13 @@
"com.amazonaws.mediaconvert#FileSourceSettings": {
"type": "structure",
"members": {
+ "ByteRateLimit": {
+ "target": "com.amazonaws.mediaconvert#CaptionSourceByteRateLimit",
+ "traits": {
+ "smithy.api#documentation": "Choose whether to limit the byte rate at which your SCC input captions are inserted into your output. To not limit the caption rate: We recommend that you keep the default value, Disabled. MediaConvert inserts captions in your output according to the byte rates listed in the EIA-608 specification, typically 2 or 3 caption bytes per frame depending on your output frame rate. To limit your output caption rate: Choose Enabled. Choose this option if your downstream systems require a maximum of 2 caption bytes per frame. Note that this setting has no effect when your output frame rate is 30 or 60.",
+ "smithy.api#jsonName": "byteRateLimit"
+ }
+ },
"Convert608To708": {
"target": "com.amazonaws.mediaconvert#FileSourceConvert608To708",
"traits": {
@@ -10005,6 +10051,26 @@
"smithy.api#documentation": "Places a PPS header on each encoded picture, even if repeated."
}
},
+ "com.amazonaws.mediaconvert#H264SaliencyAwareEncoding": {
+ "type": "enum",
+ "members": {
+ "DISABLED": {
+ "target": "smithy.api#Unit",
+ "traits": {
+ "smithy.api#enumValue": "DISABLED"
+ }
+ },
+ "PREFERRED": {
+ "target": "smithy.api#Unit",
+ "traits": {
+ "smithy.api#enumValue": "PREFERRED"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "Specify whether to apply Saliency aware encoding to your output. Use to improve the perceptual video quality of your output by allocating more encoding bits to the prominent or noticeable parts of your content. To apply saliency aware encoding, when possible: We recommend that you choose Preferred. The effects of Saliency aware encoding are best seen in lower bitrate outputs. When you choose Preferred, note that Saliency aware encoding will only apply to outputs that are 720p or higher in resolution. To not apply saliency aware encoding, prioritizing encoding speed over perceptual video quality: Choose Disabled."
+ }
+ },
"com.amazonaws.mediaconvert#H264ScanTypeConversionMode": {
"type": "enum",
"members": {
@@ -10218,7 +10284,7 @@
"MinIInterval": {
"target": "com.amazonaws.mediaconvert#__integerMin0Max30",
"traits": {
- "smithy.api#documentation": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs.",
+ "smithy.api#documentation": "Specify the minimum number of frames allowed between two IDR-frames in your output. This includes frames created at the start of a GOP or a scene change. Use Min I-Interval to improve video compression by varying GOP size when two IDR-frames would be created near each other. For example, if a regular cadence-driven IDR-frame would fall within 5 frames of a scene-change IDR-frame, and you set Min I-interval to 5, then the encoder would only write an IDR-frame for the scene-change. In this way, one GOP is shortened or extended. If a cadence-driven IDR-frame would be further than 5 frames from a scene-change IDR-frame, then the encoder leaves all IDR-frames in place. To use an automatically determined interval: We recommend that you keep this value blank. This allows for MediaConvert to use an optimal setting according to the characteristics of your input video, and results in better video compression. To manually specify an interval: Enter a value from 1 to 30. Use when your downstream systems have specific GOP size requirements. To disable GOP size variance: Enter 0. MediaConvert will only create IDR-frames at the start of your output's cadence-driven GOP. Use when your downstream systems require a regular GOP size.",
"smithy.api#jsonName": "minIInterval"
}
},
@@ -10285,6 +10351,13 @@
"smithy.api#jsonName": "repeatPps"
}
},
+ "SaliencyAwareEncoding": {
+ "target": "com.amazonaws.mediaconvert#H264SaliencyAwareEncoding",
+ "traits": {
+ "smithy.api#documentation": "Specify whether to apply Saliency aware encoding to your output. Use to improve the perceptual video quality of your output by allocating more encoding bits to the prominent or noticeable parts of your content. To apply saliency aware encoding, when possible: We recommend that you choose Preferred. The effects of Saliency aware encoding are best seen in lower bitrate outputs. When you choose Preferred, note that Saliency aware encoding will only apply to outputs that are 720p or higher in resolution. To not apply saliency aware encoding, prioritizing encoding speed over perceptual video quality: Choose Disabled.",
+ "smithy.api#jsonName": "saliencyAwareEncoding"
+ }
+ },
"ScanTypeConversionMode": {
"target": "com.amazonaws.mediaconvert#H264ScanTypeConversionMode",
"traits": {
@@ -11227,7 +11300,7 @@
"MinIInterval": {
"target": "com.amazonaws.mediaconvert#__integerMin0Max30",
"traits": {
- "smithy.api#documentation": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs.",
+ "smithy.api#documentation": "Specify the minimum number of frames allowed between two IDR-frames in your output. This includes frames created at the start of a GOP or a scene change. Use Min I-Interval to improve video compression by varying GOP size when two IDR-frames would be created near each other. For example, if a regular cadence-driven IDR-frame would fall within 5 frames of a scene-change IDR-frame, and you set Min I-interval to 5, then the encoder would only write an IDR-frame for the scene-change. In this way, one GOP is shortened or extended. If a cadence-driven IDR-frame would be further than 5 frames from a scene-change IDR-frame, then the encoder leaves all IDR-frames in place. To use an automatically determined interval: We recommend that you keep this value blank. This allows for MediaConvert to use an optimal setting according to the characteristics of your input video, and results in better video compression. To manually specify an interval: Enter a value from 1 to 30. Use when your downstream systems have specific GOP size requirements. To disable GOP size variance: Enter 0. MediaConvert will only create IDR-frames at the start of your output's cadence-driven GOP. Use when your downstream systems require a regular GOP size.",
"smithy.api#jsonName": "minIInterval"
}
},
@@ -13706,6 +13779,20 @@
"smithy.api#jsonName": "id"
}
},
+ "JobEngineVersionRequested": {
+ "target": "com.amazonaws.mediaconvert#__string",
+ "traits": {
+ "smithy.api#documentation": "The Job engine version that you requested for your job. Valid versions are in a YYYY-MM-DD format.",
+ "smithy.api#jsonName": "jobEngineVersionRequested"
+ }
+ },
+ "JobEngineVersionUsed": {
+ "target": "com.amazonaws.mediaconvert#__string",
+ "traits": {
+ "smithy.api#documentation": "The Job engine version that your job used. Job engine versions are in a YYYY-MM-DD format. When you request an expired version, the response for this property will be empty. Requests to create jobs with an expired version result in a regular job, as if no specific Job engine version was requested. When you request an invalid version, the response for this property will be empty. Requests to create jobs with an invalid version result in a 400 error message, and no job is created.",
+ "smithy.api#jsonName": "jobEngineVersionUsed"
+ }
+ },
"JobPercentComplete": {
"target": "com.amazonaws.mediaconvert#__integer",
"traits": {
@@ -13827,6 +13914,28 @@
"smithy.api#documentation": "Each job converts an input file into an output file or files. For more information, see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html"
}
},
+ "com.amazonaws.mediaconvert#JobEngineVersion": {
+ "type": "structure",
+ "members": {
+ "ExpirationDate": {
+ "target": "com.amazonaws.mediaconvert#__timestampUnix",
+ "traits": {
+ "smithy.api#documentation": "The date that this Job engine version expires. Requests to create jobs with an expired version result in a regular job, as if no specific Job engine version was requested.",
+ "smithy.api#jsonName": "expirationDate"
+ }
+ },
+ "Version": {
+ "target": "com.amazonaws.mediaconvert#__string",
+ "traits": {
+ "smithy.api#documentation": "Use Job engine versions to run jobs for your production workflow on one version, while you test and validate the latest version. Job engine versions are in a YYYY-MM-DD format.",
+ "smithy.api#jsonName": "version"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "Use Job engine versions to run jobs for your production workflow on one version, while you test and validate the latest version. Job engine versions are in a YYYY-MM-DD format."
+ }
+ },
"com.amazonaws.mediaconvert#JobMessages": {
"type": "structure",
"members": {
@@ -16006,6 +16115,93 @@
"smithy.api#output": {}
}
},
+ "com.amazonaws.mediaconvert#ListVersions": {
+ "type": "operation",
+ "input": {
+ "target": "com.amazonaws.mediaconvert#ListVersionsRequest"
+ },
+ "output": {
+ "target": "com.amazonaws.mediaconvert#ListVersionsResponse"
+ },
+ "errors": [
+ {
+ "target": "com.amazonaws.mediaconvert#BadRequestException"
+ },
+ {
+ "target": "com.amazonaws.mediaconvert#ConflictException"
+ },
+ {
+ "target": "com.amazonaws.mediaconvert#ForbiddenException"
+ },
+ {
+ "target": "com.amazonaws.mediaconvert#InternalServerErrorException"
+ },
+ {
+ "target": "com.amazonaws.mediaconvert#NotFoundException"
+ },
+ {
+ "target": "com.amazonaws.mediaconvert#TooManyRequestsException"
+ }
+ ],
+ "traits": {
+ "smithy.api#documentation": "Retrieve a JSON array of all available Job engine versions and the date they expire.",
+ "smithy.api#http": {
+ "method": "GET",
+ "uri": "/2017-08-29/versions",
+ "code": 200
+ },
+ "smithy.api#paginated": {
+ "inputToken": "NextToken",
+ "outputToken": "NextToken",
+ "items": "Versions",
+ "pageSize": "MaxResults"
+ }
+ }
+ },
+ "com.amazonaws.mediaconvert#ListVersionsRequest": {
+ "type": "structure",
+ "members": {
+ "MaxResults": {
+ "target": "com.amazonaws.mediaconvert#__integerMin1Max20",
+ "traits": {
+ "smithy.api#documentation": "Optional. Number of valid Job engine versions, up to twenty, that will be returned at one time.",
+ "smithy.api#httpQuery": "maxResults"
+ }
+ },
+ "NextToken": {
+ "target": "com.amazonaws.mediaconvert#__string",
+ "traits": {
+ "smithy.api#documentation": "Optional. Use this string, provided with the response to a previous request, to request the next batch of Job engine versions.",
+ "smithy.api#httpQuery": "nextToken"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#input": {}
+ }
+ },
+ "com.amazonaws.mediaconvert#ListVersionsResponse": {
+ "type": "structure",
+ "members": {
+ "NextToken": {
+ "target": "com.amazonaws.mediaconvert#__string",
+ "traits": {
+ "smithy.api#documentation": "Optional. Use this string, provided with the response to a previous request, to request the next batch of Job engine versions.",
+ "smithy.api#jsonName": "nextToken"
+ }
+ },
+ "Versions": {
+ "target": "com.amazonaws.mediaconvert#__listOfJobEngineVersion",
+ "traits": {
+ "smithy.api#documentation": "Retrieve a JSON array of all available Job engine versions and the date they expire.",
+ "smithy.api#jsonName": "versions"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#output": {}
+ }
+ },
"com.amazonaws.mediaconvert#M2tsAudioBufferModel": {
"type": "enum",
"members": {
@@ -16992,6 +17188,9 @@
{
"target": "com.amazonaws.mediaconvert#ListTagsForResource"
},
+ {
+ "target": "com.amazonaws.mediaconvert#ListVersions"
+ },
{
"target": "com.amazonaws.mediaconvert#PutPolicy"
},
@@ -19261,7 +19460,7 @@
"MinIInterval": {
"target": "com.amazonaws.mediaconvert#__integerMin0Max30",
"traits": {
- "smithy.api#documentation": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. When you specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs.",
+ "smithy.api#documentation": "Specify the minimum number of frames allowed between two IDR-frames in your output. This includes frames created at the start of a GOP or a scene change. Use Min I-Interval to improve video compression by varying GOP size when two IDR-frames would be created near each other. For example, if a regular cadence-driven IDR-frame would fall within 5 frames of a scene-change IDR-frame, and you set Min I-interval to 5, then the encoder would only write an IDR-frame for the scene-change. In this way, one GOP is shortened or extended. If a cadence-driven IDR-frame would be further than 5 frames from a scene-change IDR-frame, then the encoder leaves all IDR-frames in place. To manually specify an interval: Enter a value from 1 to 30. Use when your downstream systems have specific GOP size requirements. To disable GOP size variance: Enter 0. MediaConvert will only create IDR-frames at the start of your output's cadence-driven GOP. Use when your downstream systems require a regular GOP size.",
"smithy.api#jsonName": "minIInterval"
}
},
@@ -23941,10 +24140,17 @@
"EndTimecode": {
"target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092",
"traits": {
- "smithy.api#documentation": "Enter the end timecode in the underlying input video for this overlay. Your overlay will be active through this frame. To display your video overlay for the duration of the underlying video: Leave blank. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for the underlying Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your overlay to end ten minutes into the video, enter 01:10:00:00.",
+ "smithy.api#documentation": "Enter the end timecode in the base input video for this overlay. Your overlay will be active through this frame. To display your video overlay for the duration of the base input video: Leave blank. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS isthe second, and FF is the frame number. When entering this value, take into account your choice for the base input video's timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your overlay to end ten minutes into the video, enter 01:10:00:00.",
"smithy.api#jsonName": "endTimecode"
}
},
+ "InitialPosition": {
+ "target": "com.amazonaws.mediaconvert#VideoOverlayPosition",
+ "traits": {
+ "smithy.api#documentation": "Specify the Initial position of your video overlay. To specify the Initial position of your video overlay, including distance from the left or top edge of the base input video's frame, or size: Enter a value for X position, Y position, Width, or Height. To use the full frame of the base input video: Leave blank.",
+ "smithy.api#jsonName": "initialPosition"
+ }
+ },
"Input": {
"target": "com.amazonaws.mediaconvert#VideoOverlayInput",
"traits": {
@@ -23952,12 +24158,26 @@
"smithy.api#jsonName": "input"
}
},
+ "Playback": {
+ "target": "com.amazonaws.mediaconvert#VideoOverlayPlayBackMode",
+ "traits": {
+ "smithy.api#documentation": "Specify whether your video overlay repeats or plays only once. To repeat your video overlay on a loop: Keep the default value, Repeat. Your overlay will repeat for the duration of the base input video. To playback your video overlay only once: Choose Once. With either option, you can end playback at a time that you specify by entering a value for End timecode.",
+ "smithy.api#jsonName": "playback"
+ }
+ },
"StartTimecode": {
"target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092",
"traits": {
- "smithy.api#documentation": "Enter the start timecode in the underlying input video for this overlay. Your overlay will be active starting with this frame. To display your video overlay starting at the beginning of the underlying video: Leave blank. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for the underlying Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your overlay to begin five minutes into the video, enter 01:05:00:00.",
+ "smithy.api#documentation": "Enter the start timecode in the base input video for this overlay. Your overlay will be active starting with this frame. To display your video overlay starting at the beginning of the base input video: Leave blank. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for the base input video's timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your overlay to begin five minutes into the video, enter 01:05:00:00.",
"smithy.api#jsonName": "startTimecode"
}
+ },
+ "Transitions": {
+ "target": "com.amazonaws.mediaconvert#__listOfVideoOverlayTransition",
+ "traits": {
+ "smithy.api#documentation": "Specify one or more transitions for your video overlay. Use Transitions to reposition or resize your overlay over time. To use the same position and size for the duration of your video overlay: Leave blank. To specify a Transition: Enter a value for Start timecode, End Timecode, X Position, Y Position, Width, or Height.",
+ "smithy.api#jsonName": "transitions"
+ }
}
},
"traits": {
@@ -23970,7 +24190,7 @@
"FileInput": {
"target": "com.amazonaws.mediaconvert#__stringPatternS3Https",
"traits": {
- "smithy.api#documentation": "Specify the input file S3, HTTP, or HTTPS URI for your video overlay. For consistency in color and formatting in your output video image, we recommend that you specify a video with similar characteristics as the underlying input video.",
+ "smithy.api#documentation": "Specify the input file S3, HTTP, or HTTPS URL for your video overlay.\nTo specify one or more Transitions for your base input video instead: Leave blank.",
"smithy.api#jsonName": "fileInput"
}
},
@@ -24004,14 +24224,14 @@
"type": "structure",
"members": {
"EndTimecode": {
- "target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092",
+ "target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092090909",
"traits": {
"smithy.api#documentation": "Specify the timecode of the last frame to include in your video overlay's clip. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for Timecode source.",
"smithy.api#jsonName": "endTimecode"
}
},
"StartTimecode": {
- "target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092",
+ "target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092090909",
"traits": {
"smithy.api#documentation": "Specify the timecode of the first frame to include in your video overlay's clip. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for Timecode source.",
"smithy.api#jsonName": "startTimecode"
@@ -24022,6 +24242,118 @@
"smithy.api#documentation": "To transcode only portions of your video overlay, include one input clip for each part of your video overlay that you want in your output."
}
},
+ "com.amazonaws.mediaconvert#VideoOverlayPlayBackMode": {
+ "type": "enum",
+ "members": {
+ "ONCE": {
+ "target": "smithy.api#Unit",
+ "traits": {
+ "smithy.api#enumValue": "ONCE"
+ }
+ },
+ "REPEAT": {
+ "target": "smithy.api#Unit",
+ "traits": {
+ "smithy.api#enumValue": "REPEAT"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "Specify whether your video overlay repeats or plays only once. To repeat your video overlay on a loop: Keep the default value, Repeat. Your overlay will repeat for the duration of the base input video. To playback your video overlay only once: Choose Once. With either option, you can end playback at a time that you specify by entering a value for End timecode."
+ }
+ },
+ "com.amazonaws.mediaconvert#VideoOverlayPosition": {
+ "type": "structure",
+ "members": {
+ "Height": {
+ "target": "com.amazonaws.mediaconvert#__integerMinNegative1Max2147483647",
+ "traits": {
+ "smithy.api#documentation": "To scale your video overlay to the same height as the base input video: Leave blank. To scale the height of your video overlay to a different height: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 360 and choose Pixels, your video overlay will be rendered with a height of 360. When you enter 50, choose Percentage, and your overlay's source has a height of 1080, your video overlay will be rendered with a height of 540. To scale your overlay to a specific height while automatically maintaining its original aspect ratio, enter a value for Height and leave Width blank.",
+ "smithy.api#jsonName": "height"
+ }
+ },
+ "Unit": {
+ "target": "com.amazonaws.mediaconvert#VideoOverlayUnit",
+ "traits": {
+ "smithy.api#documentation": "Specify the Unit type to use when you enter a value for X position, Y position, Width, or Height. You can choose Pixels or Percentage. Leave blank to use the default value, Pixels.",
+ "smithy.api#jsonName": "unit"
+ }
+ },
+ "Width": {
+ "target": "com.amazonaws.mediaconvert#__integerMinNegative1Max2147483647",
+ "traits": {
+ "smithy.api#documentation": "To scale your video overlay to the same width as the base input video: Leave blank. To scale the width of your video overlay to a different width: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 640 and choose Pixels, your video overlay will scale to a height of 640 pixels. When you enter 50, choose Percentage, and your overlay's source has a width of 1920, your video overlay will scale to a width of 960. To scale your overlay to a specific width while automatically maintaining its original aspect ratio, enter a value for Width and leave Height blank.",
+ "smithy.api#jsonName": "width"
+ }
+ },
+ "XPosition": {
+ "target": "com.amazonaws.mediaconvert#__integerMinNegative2147483648Max2147483647",
+ "traits": {
+ "smithy.api#documentation": "To position the left edge of your video overlay along the left edge of the base input video's frame: Keep blank, or enter 0. To position the left edge of your video overlay to the right, relative to the left edge of the base input video's frame: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 10 and choose Pixels, your video overlay will be positioned 10 pixels from the left edge of the base input video's frame. When you enter 10, choose Percentage, and your base input video is 1920x1080, your video overlay will be positioned 192 pixels from the left edge of the base input video's frame.",
+ "smithy.api#jsonName": "xPosition"
+ }
+ },
+ "YPosition": {
+ "target": "com.amazonaws.mediaconvert#__integerMinNegative2147483648Max2147483647",
+ "traits": {
+ "smithy.api#documentation": "To position the top edge of your video overlay along the top edge of the base input video's frame: Keep blank, or enter 0. To position the top edge of your video overlay down, relative to the top edge of the base input video's frame: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 10 and choose Pixels, your video overlay will be positioned 10 pixels from the top edge of the base input video's frame. When you enter 10, choose Percentage, and your underlying video is 1920x1080, your video overlay will be positioned 108 pixels from the top edge of the base input video's frame.",
+ "smithy.api#jsonName": "yPosition"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "position of video overlay"
+ }
+ },
+ "com.amazonaws.mediaconvert#VideoOverlayTransition": {
+ "type": "structure",
+ "members": {
+ "EndPosition": {
+ "target": "com.amazonaws.mediaconvert#VideoOverlayPosition",
+ "traits": {
+ "smithy.api#documentation": "Specify the ending position for this transition, relative to the base input video's frame. Your video overlay will move smoothly to this position, beginning at this transition's Start timecode and ending at this transition's End timecode.",
+ "smithy.api#jsonName": "endPosition"
+ }
+ },
+ "EndTimecode": {
+ "target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092",
+ "traits": {
+ "smithy.api#documentation": "Specify the timecode for when this transition ends. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for Timecode source.",
+ "smithy.api#jsonName": "endTimecode"
+ }
+ },
+ "StartTimecode": {
+ "target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092",
+ "traits": {
+ "smithy.api#documentation": "Specify the timecode for when this transition begins. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for Timecode source.",
+ "smithy.api#jsonName": "startTimecode"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "Specify one or more Transitions for your video overlay. Use Transitions to reposition or resize your overlay over time. To use the same position and size for the duration of your video overlay: Leave blank. To specify a Transition: Enter a value for Start timecode, End Timecode, X Position, Y Position, Width, or Height."
+ }
+ },
+ "com.amazonaws.mediaconvert#VideoOverlayUnit": {
+ "type": "enum",
+ "members": {
+ "PIXELS": {
+ "target": "smithy.api#Unit",
+ "traits": {
+ "smithy.api#enumValue": "PIXELS"
+ }
+ },
+ "PERCENTAGE": {
+ "target": "smithy.api#Unit",
+ "traits": {
+ "smithy.api#enumValue": "PERCENTAGE"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "Specify the Unit type to use when you enter a value for X position, Y position, Width, or Height. You can choose Pixels or Percentage. Leave blank to use the default value, Pixels."
+ }
+ },
"com.amazonaws.mediaconvert#VideoPreprocessor": {
"type": "structure",
"members": {
@@ -26522,6 +26854,15 @@
}
}
},
+ "com.amazonaws.mediaconvert#__integerMinNegative1Max2147483647": {
+ "type": "integer",
+ "traits": {
+ "smithy.api#range": {
+ "min": -1,
+ "max": 2147483647
+ }
+ }
+ },
"com.amazonaws.mediaconvert#__integerMinNegative1Max3": {
"type": "integer",
"traits": {
@@ -26711,6 +27052,12 @@
"target": "com.amazonaws.mediaconvert#Job"
}
},
+ "com.amazonaws.mediaconvert#__listOfJobEngineVersion": {
+ "type": "list",
+ "member": {
+ "target": "com.amazonaws.mediaconvert#JobEngineVersion"
+ }
+ },
"com.amazonaws.mediaconvert#__listOfJobTemplate": {
"type": "list",
"member": {
@@ -26789,6 +27136,12 @@
"target": "com.amazonaws.mediaconvert#VideoOverlayInputClipping"
}
},
+ "com.amazonaws.mediaconvert#__listOfVideoOverlayTransition": {
+ "type": "list",
+ "member": {
+ "target": "com.amazonaws.mediaconvert#VideoOverlayTransition"
+ }
+ },
"com.amazonaws.mediaconvert#__listOfWarningGroup": {
"type": "list",
"member": {