From d04d55a9743e1909fef4743b4e42b311cc5ff37a Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Thu, 11 Jun 2020 04:36:18 -0700 Subject: [PATCH] fix(dataproc): update the API #### dataproc:v1 The following keys were changed: - schemas.BasicYarnAutoscalingConfig.properties.scaleDownFactor.description - schemas.BasicYarnAutoscalingConfig.properties.scaleUpFactor.description - schemas.PySparkJob.properties.archiveUris.description - schemas.PySparkJob.properties.fileUris.description - schemas.SparkJob.properties.archiveUris.description - schemas.SparkJob.properties.fileUris.description - schemas.SparkRJob.properties.archiveUris.description - schemas.SparkRJob.properties.fileUris.description #### dataproc:v1beta2 The following keys were changed: - schemas.BasicYarnAutoscalingConfig.properties.scaleDownFactor.description - schemas.BasicYarnAutoscalingConfig.properties.scaleUpFactor.description - schemas.PySparkJob.properties.archiveUris.description - schemas.PySparkJob.properties.fileUris.description - schemas.SoftwareConfig.properties.optionalComponents.enumDescriptions - schemas.SoftwareConfig.properties.optionalComponents.items.enum - schemas.SparkJob.properties.archiveUris.description - schemas.SparkJob.properties.fileUris.description - schemas.SparkRJob.properties.archiveUris.description - schemas.SparkRJob.properties.fileUris.description - schemas.WorkflowTemplate.description --- discovery/dataproc-v1.json | 18 +++++++++--------- discovery/dataproc-v1beta2.json | 24 ++++++++++-------------- src/apis/dataproc/v1.ts | 16 ++++++++-------- src/apis/dataproc/v1beta2.ts | 18 +++++++++--------- 4 files changed, 36 insertions(+), 40 deletions(-) diff --git a/discovery/dataproc-v1.json b/discovery/dataproc-v1.json index 35569c23fa..392212dbcc 100644 --- a/discovery/dataproc-v1.json +++ b/discovery/dataproc-v1.json @@ -2129,7 +2129,7 @@ } } }, - "revision": "20200511", + "revision": "20200528", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2211,7 +2211,7 @@ "type": "string" }, "scaleDownFactor": { - "description": "Required. Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job.Bounds: 0.0, 1.0.", + "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works for more information.Bounds: 0.0, 1.0.", "format": "double", "type": "number" }, @@ -2221,7 +2221,7 @@ "type": "number" }, "scaleUpFactor": { - "description": "Required. Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.", + "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works for more information.Bounds: 0.0, 1.0.", "format": "double", "type": "number" }, @@ -3616,7 +3616,7 @@ "id": "PySparkJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -3630,7 +3630,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -3800,7 +3800,7 @@ "id": "SparkJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -3814,7 +3814,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -3854,7 +3854,7 @@ "id": "SparkRJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -3868,7 +3868,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of R drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, diff --git a/discovery/dataproc-v1beta2.json b/discovery/dataproc-v1beta2.json index 629b37e295..2a3ef8bcf9 100644 --- a/discovery/dataproc-v1beta2.json +++ b/discovery/dataproc-v1beta2.json @@ -2242,7 +2242,7 @@ } } }, - "revision": "20200511", + "revision": "20200528", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2324,7 +2324,7 @@ "type": "string" }, "scaleDownFactor": { - "description": "Required. Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job.Bounds: 0.0, 1.0.", + "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works for more information.Bounds: 0.0, 1.0.", "format": "double", "type": "number" }, @@ -2334,7 +2334,7 @@ "type": "number" }, "scaleUpFactor": { - "description": "Required. Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.", + "description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works for more information.Bounds: 0.0, 1.0.", "format": "double", "type": "number" }, @@ -3795,7 +3795,7 @@ "id": "PySparkJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -3809,7 +3809,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -3944,9 +3944,7 @@ "enumDescriptions": [ "Unspecified component.", "The Anaconda python distribution.", - "Docker", "The Druid query engine.", - "Flink", "HBase.", "The Hive Web HCatalog (the REST service for accessing HCatalog).", "The Jupyter Notebook.", @@ -3961,9 +3959,7 @@ "enum": [ "COMPONENT_UNSPECIFIED", "ANACONDA", - "DOCKER", "DRUID", - "FLINK", "HBASE", "HIVE_WEBHCAT", "JUPYTER", @@ -3993,7 +3989,7 @@ "id": "SparkJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -4007,7 +4003,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -4047,7 +4043,7 @@ "id": "SparkRJob", "properties": { "archiveUris": { - "description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + "description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { "type": "string" }, @@ -4061,7 +4057,7 @@ "type": "array" }, "fileUris": { - "description": "Optional. HCFS URIs of files to be copied to the working directory of R drivers and distributed tasks. Useful for naively parallel tasks.", + "description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, @@ -4395,7 +4391,7 @@ "type": "object" }, "WorkflowTemplate": { - "description": "A Dataproc workflow template resource.", + "description": "A Dataproc workflow template resource. Next ID: 11", "id": "WorkflowTemplate", "properties": { "createTime": { diff --git a/src/apis/dataproc/v1.ts b/src/apis/dataproc/v1.ts index 3f69b4a250..261ad3de80 100644 --- a/src/apis/dataproc/v1.ts +++ b/src/apis/dataproc/v1.ts @@ -193,7 +193,7 @@ export namespace dataproc_v1 { */ gracefulDecommissionTimeout?: string | null; /** - * Required. Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job.Bounds: 0.0, 1.0. + * Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works for more information.Bounds: 0.0, 1.0. */ scaleDownFactor?: number | null; /** @@ -201,7 +201,7 @@ export namespace dataproc_v1 { */ scaleDownMinWorkerFraction?: number | null; /** - * Required. Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0. + * Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works for more information.Bounds: 0.0, 1.0. */ scaleUpFactor?: number | null; /** @@ -1246,7 +1246,7 @@ export namespace dataproc_v1 { */ export interface Schema$PySparkJob { /** - * Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip. + * Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[] | null; /** @@ -1254,7 +1254,7 @@ export namespace dataproc_v1 { */ args?: string[] | null; /** - * Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[] | null; /** @@ -1353,7 +1353,7 @@ export namespace dataproc_v1 { */ export interface Schema$SparkJob { /** - * Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + * Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[] | null; /** @@ -1361,7 +1361,7 @@ export namespace dataproc_v1 { */ args?: string[] | null; /** - * Optional. HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[] | null; /** @@ -1390,7 +1390,7 @@ export namespace dataproc_v1 { */ export interface Schema$SparkRJob { /** - * Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + * Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[] | null; /** @@ -1398,7 +1398,7 @@ export namespace dataproc_v1 { */ args?: string[] | null; /** - * Optional. HCFS URIs of files to be copied to the working directory of R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[] | null; /** diff --git a/src/apis/dataproc/v1beta2.ts b/src/apis/dataproc/v1beta2.ts index 572d3b7857..6205906593 100644 --- a/src/apis/dataproc/v1beta2.ts +++ b/src/apis/dataproc/v1beta2.ts @@ -193,7 +193,7 @@ export namespace dataproc_v1beta2 { */ gracefulDecommissionTimeout?: string | null; /** - * Required. Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job.Bounds: 0.0, 1.0. + * Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works for more information.Bounds: 0.0, 1.0. */ scaleDownFactor?: number | null; /** @@ -201,7 +201,7 @@ export namespace dataproc_v1beta2 { */ scaleDownMinWorkerFraction?: number | null; /** - * Required. Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0. + * Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works for more information.Bounds: 0.0, 1.0. */ scaleUpFactor?: number | null; /** @@ -1297,7 +1297,7 @@ export namespace dataproc_v1beta2 { */ export interface Schema$PySparkJob { /** - * Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip. + * Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[] | null; /** @@ -1305,7 +1305,7 @@ export namespace dataproc_v1beta2 { */ args?: string[] | null; /** - * Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[] | null; /** @@ -1404,7 +1404,7 @@ export namespace dataproc_v1beta2 { */ export interface Schema$SparkJob { /** - * Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + * Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[] | null; /** @@ -1412,7 +1412,7 @@ export namespace dataproc_v1beta2 { */ args?: string[] | null; /** - * Optional. HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[] | null; /** @@ -1441,7 +1441,7 @@ export namespace dataproc_v1beta2 { */ export interface Schema$SparkRJob { /** - * Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + * Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */ archiveUris?: string[] | null; /** @@ -1449,7 +1449,7 @@ export namespace dataproc_v1beta2 { */ args?: string[] | null; /** - * Optional. HCFS URIs of files to be copied to the working directory of R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */ fileUris?: string[] | null; /** @@ -1682,7 +1682,7 @@ export namespace dataproc_v1beta2 { stepId?: string | null; } /** - * A Dataproc workflow template resource. + * A Dataproc workflow template resource. Next ID: 11 */ export interface Schema$WorkflowTemplate { /**