diff --git a/.ci/containers/contributor-checker/check-contributor.sh b/.ci/containers/contributor-checker/check-contributor.sh
index 97e59bac6743..ae28e3941fe2 100755
--- a/.ci/containers/contributor-checker/check-contributor.sh
+++ b/.ci/containers/contributor-checker/check-contributor.sh
@@ -52,7 +52,7 @@ fi
# This is where you add people to the random-assignee rotation. This list
# might not equal the list of users who do not need to have an assigne.
# If you add people to this list, please also add them to the re-request review list above
-ASSIGNEE=$(shuf -n 1 <(printf "c2thorn\nslevenick\nscottsuarez\nshuyama1\nmegan07\nmelinath"))
+ASSIGNEE=$(shuf -n 1 <(printf "rileykarson\nc2thorn\nslevenick\nscottsuarez\nshuyama1\nmegan07\nmelinath"))
comment=$(cat << EOF
Hello! I am a robot who works on Magic Modules PRs.
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 000000000000..c589260f79e6
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1 @@
+tpgtools/overrides @rileykarson
diff --git a/mmv1/products/apigee/api.yaml b/mmv1/products/apigee/api.yaml
index 3b3897e0eb6e..999970936176 100644
--- a/mmv1/products/apigee/api.yaml
+++ b/mmv1/products/apigee/api.yaml
@@ -253,6 +253,9 @@ objects:
base_url: 'environments'
create_url: '{{org_id}}/environments'
self_link: '{{org_id}}/environments/{{name}}'
+ update_url: '{{org_id}}/environments/{{name}}'
+ update_verb: :PATCH
+ update_mask: true
async: !ruby/object:Api::OpAsync
operation: !ruby/object:Api::OpAsync::Operation
path: 'name'
@@ -294,6 +297,7 @@ objects:
description: |
The resource ID of the environment.
required: true
+ input: true
- !ruby/object:Api::Type::String
name: 'displayName'
description: |
@@ -331,6 +335,29 @@ objects:
- "PROGRAMMABLE"
- "CONFIGURABLE"
input: true
+ - !ruby/object:Api::Type::NestedObject
+ name: 'nodeConfig'
+ description: |
+ NodeConfig for setting the min/max number of nodes associated with the environment.
+ properties:
+ - !ruby/object:Api::Type::String
+ name: 'minNodeCount'
+ description: |
+ The minimum total number of gateway nodes that the is reserved for all instances that
+ has the specified environment. If not specified, the default is determined by the
+ recommended minimum number of nodes for that gateway.
+ - !ruby/object:Api::Type::String
+ name: 'maxNodeCount'
+ description: |
+ The maximum total number of gateway nodes that the is reserved for all instances that
+ has the specified environment. If not specified, the default is determined by the
+ recommended maximum number of nodes for that gateway.
+ - !ruby/object:Api::Type::String
+ name: 'currentAggregateNodeCount'
+ description: |
+ The current total number of gateway nodes that each environment currently has across
+ all instances.
+ output: true
references: !ruby/object:Api::Resource::ReferenceLinks
guides:
'Creating an environment':
diff --git a/mmv1/products/apigee/terraform.yaml b/mmv1/products/apigee/terraform.yaml
index 571297238f51..937a30d883e8 100644
--- a/mmv1/products/apigee/terraform.yaml
+++ b/mmv1/products/apigee/terraform.yaml
@@ -176,11 +176,24 @@ overrides: !ruby/object:Overrides::ResourceOverrides
skip_docs: true
# Resource creation race
skip_vcr: true
+ - !ruby/object:Provider::Terraform::Examples
+ name: "apigee_environment_nodeconfig_test"
+ primary_resource_id: "apigee_environment"
+ primary_resource_name: "fmt.Sprintf(\"organizations/tf-test%s\", context[\"random_suffix\"]), fmt.Sprintf(\"tf-test%s\", context[\"random_suffix\"])"
+ test_env_vars:
+ org_id: :ORG_ID
+ billing_account: :BILLING_ACCT
+ skip_docs: true
+ min_version: beta
+ # Resource creation race
+ skip_vcr: true
properties:
deploymentType: !ruby/object:Overrides::Terraform::PropertyOverride
default_from_api: true
apiProxyType: !ruby/object:Overrides::Terraform::PropertyOverride
default_from_api: true
+ nodeConfig: !ruby/object:Overrides::Terraform::PropertyOverride
+ default_from_api: true
timeouts: !ruby/object:Api::Timeouts
insert_minutes: 30
delete_minutes: 30
diff --git a/mmv1/products/certificatemanager/terraform.yaml b/mmv1/products/certificatemanager/terraform.yaml
index 8d37ce1fcbd6..4ef626f18c40 100644
--- a/mmv1/products/certificatemanager/terraform.yaml
+++ b/mmv1/products/certificatemanager/terraform.yaml
@@ -15,8 +15,6 @@
overrides: !ruby/object:Overrides::ResourceOverrides
DnsAuthorization: !ruby/object:Overrides::Terraform::ResourceOverride
docs: !ruby/object:Provider::Terraform::Docs
- warning: |
- These resources require allow-listing to use, and are not openly available to all Cloud customers. Engage with your Cloud account team to discuss how to onboard.
autogen_async: true
import_format: ["projects/{{project}}/locations/global/dnsAuthorizations/{{name}}"]
examples:
@@ -28,8 +26,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides
zone_name: "my-zone"
Certificate: !ruby/object:Overrides::Terraform::ResourceOverride
docs: !ruby/object:Provider::Terraform::Docs
- warning: |
- These resources require allow-listing to use, and are not openly available to all Cloud customers. Engage with your Cloud account team to discuss how to onboard.
autogen_async: true
import_format: ["projects/{{project}}/locations/global/certificates/{{name}}"]
examples:
@@ -59,8 +55,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides
constants: templates/terraform/constants/cert_manager.erb
CertificateMap: !ruby/object:Overrides::Terraform::ResourceOverride
docs: !ruby/object:Provider::Terraform::Docs
- warning: |
- These resources require allow-listing to use, and are not openly available to all Cloud customers. Engage with your Cloud account team to discuss how to onboard.
autogen_async: true
import_format: ["projects/{{project}}/locations/global/certificateMaps/{{name}}"]
examples:
@@ -74,8 +68,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides
default_from_api: true
CertificateMapEntry: !ruby/object:Overrides::Terraform::ResourceOverride
docs: !ruby/object:Provider::Terraform::Docs
- warning: |
- These resources require allow-listing to use, and are not openly available to all Cloud customers. Engage with your Cloud account team to discuss how to onboard.
autogen_async: true
examples:
- !ruby/object:Provider::Terraform::Examples
diff --git a/mmv1/products/cloudrun/terraform.yaml b/mmv1/products/cloudrun/terraform.yaml
index 971ac8cc801f..4ec28fcdccab 100644
--- a/mmv1/products/cloudrun/terraform.yaml
+++ b/mmv1/products/cloudrun/terraform.yaml
@@ -164,7 +164,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides
scheduled_cloud_run_job: "scheduled-cloud-run-job"
test_env_vars:
project: :PROJECT_NAME
- min_version: beta
skip_docs: true
- !ruby/object:Provider::Terraform::Examples
name: "cloud_run_service_secret_environment_variables"
diff --git a/mmv1/products/dns/api.yaml b/mmv1/products/dns/api.yaml
index 8d4e81c6fbbc..567bec9e4a99 100644
--- a/mmv1/products/dns/api.yaml
+++ b/mmv1/products/dns/api.yaml
@@ -501,7 +501,7 @@ objects:
name: 'ResponsePolicy'
kind: 'dns#responsePolicy'
description: |
- A Response Policy is a collection of selectors that apply to queries
+ A Response Policy is a collection of selectors that apply to queries
made against one or more Virtual Private Cloud networks.
base_url: 'projects/{{project}}/responsePolicies'
self_link: 'projects/{{project}}/responsePolicies/{{response_policy_name}}'
@@ -575,6 +575,8 @@ objects:
update_verb: :PATCH
- !ruby/object:Api::Type::NestedObject
name: 'localData'
+ conflicts:
+ - behavior
description: |
Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name;
in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed.
@@ -625,3 +627,10 @@ objects:
name: 'rrdatas'
description: |
As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1)
+ - !ruby/object:Api::Type::String
+ name: 'behavior'
+ conflicts:
+ - local_data
+ min_version: beta
+ description: Answer this query with a behavior rather than DNS data. Acceptable values are 'behaviorUnspecified', and 'bypassResponsePolicy'
+ update_verb: :PATCH
diff --git a/mmv1/products/monitoring/terraform.yaml b/mmv1/products/monitoring/terraform.yaml
index ca1ee7d9c616..6e98932e93fa 100644
--- a/mmv1/products/monitoring/terraform.yaml
+++ b/mmv1/products/monitoring/terraform.yaml
@@ -63,6 +63,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides
NotificationChannel: !ruby/object:Overrides::Terraform::ResourceOverride
id_format: "{{name}}"
import_format: ["{{name}}"]
+ delete_url: 'v3/{{name}}?force={{force_delete}}'
mutex: stackdriver/notifications/{{project}}
error_retry_predicates: ["isMonitoringConcurrentEditError"]
examples:
@@ -78,6 +79,17 @@ overrides: !ruby/object:Overrides::ResourceOverrides
skip_test: true
vars:
display_name: "Sensitive Notification Channel test"
+ virtual_fields:
+ - !ruby/object:Api::Type::Boolean
+ name: 'force_delete'
+ url_param_only: true
+ default_value: false
+ description: |
+ If true, the notification channel will be deleted regardless
+ of its use in alert policies (the policies will be updated
+ to remove the channel). If false, channels that are still
+ referenced by an existing alerting policy will fail to be
+ deleted in a delete operation.
custom_code: !ruby/object:Provider::Terraform::CustomCode
resource_definition: templates/terraform/resource_definition/monitoring_notification_channel.erb
encoder: templates/terraform/encoders/monitoring_notification_channel.go.erb
diff --git a/mmv1/templates/terraform/examples/apigee_environment_nodeconfig_test.tf.erb b/mmv1/templates/terraform/examples/apigee_environment_nodeconfig_test.tf.erb
new file mode 100644
index 000000000000..a4efdafb7613
--- /dev/null
+++ b/mmv1/templates/terraform/examples/apigee_environment_nodeconfig_test.tf.erb
@@ -0,0 +1,127 @@
+resource "google_project" "project" {
+ provider = google-beta
+
+ project_id = "tf-test%{random_suffix}"
+ name = "tf-test%{random_suffix}"
+ org_id = "<%= ctx[:test_env_vars]['org_id'] %>"
+ billing_account = "<%= ctx[:test_env_vars]['billing_account'] %>"
+}
+
+resource "google_project_service" "apigee" {
+ provider = google-beta
+
+ project = google_project.project.project_id
+ service = "apigee.googleapis.com"
+}
+
+resource "google_project_service" "compute" {
+ provider = google-beta
+
+ project = google_project.project.project_id
+ service = "compute.googleapis.com"
+}
+
+resource "google_project_service" "servicenetworking" {
+ provider = google-beta
+
+ project = google_project.project.project_id
+ service = "servicenetworking.googleapis.com"
+}
+
+resource "google_project_service" "kms" {
+ provider = google-beta
+
+ project = google_project.project.project_id
+ service = "cloudkms.googleapis.com"
+}
+
+resource "google_compute_network" "apigee_network" {
+ provider = google-beta
+
+ name = "apigee-network"
+ project = google_project.project.project_id
+ depends_on = [google_project_service.compute]
+}
+
+resource "google_compute_global_address" "apigee_range" {
+ provider = google-beta
+
+ name = "apigee-range"
+ purpose = "VPC_PEERING"
+ address_type = "INTERNAL"
+ prefix_length = 16
+ network = google_compute_network.apigee_network.id
+ project = google_project.project.project_id
+}
+
+resource "google_service_networking_connection" "apigee_vpc_connection" {
+ provider = google-beta
+
+ network = google_compute_network.apigee_network.id
+ service = "servicenetworking.googleapis.com"
+ reserved_peering_ranges = [google_compute_global_address.apigee_range.name]
+ depends_on = [google_project_service.servicenetworking]
+}
+
+resource "google_kms_key_ring" "apigee_keyring" {
+ provider = google-beta
+
+ name = "apigee-keyring"
+ location = "us-central1"
+ project = google_project.project.project_id
+ depends_on = [google_project_service.kms]
+}
+
+resource "google_kms_crypto_key" "apigee_key" {
+ provider = google-beta
+
+ name = "apigee-key"
+ key_ring = google_kms_key_ring.apigee_keyring.id
+}
+
+resource "google_project_service_identity" "apigee_sa" {
+ provider = google-beta
+
+ project = google_project.project.project_id
+ service = google_project_service.apigee.service
+}
+
+resource "google_kms_crypto_key_iam_binding" "apigee_sa_keyuser" {
+ provider = google-beta
+
+ crypto_key_id = google_kms_crypto_key.apigee_key.id
+ role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"
+
+ members = [
+ "serviceAccount:${google_project_service_identity.apigee_sa.email}",
+ ]
+}
+
+resource "google_apigee_organization" "apigee_org" {
+ provider = google-beta
+
+ analytics_region = "us-central1"
+ project_id = google_project.project.project_id
+ authorized_network = google_compute_network.apigee_network.id
+ billing_type = "PAYG"
+ runtime_database_encryption_key_name = google_kms_crypto_key.apigee_key.id
+
+ depends_on = [
+ google_service_networking_connection.apigee_vpc_connection,
+ google_project_service.apigee,
+ google_kms_crypto_key_iam_binding.apigee_sa_keyuser,
+ ]
+}
+
+resource "google_apigee_environment" "<%= ctx[:primary_resource_id] %>" {
+ provider = google-beta
+
+ org_id = google_apigee_organization.apigee_org.id
+ name = "tf-test%{random_suffix}"
+ description = "Apigee Environment"
+ display_name = "tf-test%{random_suffix}"
+ node_config {
+ min_node_count = "3"
+ max_node_count = "5"
+ }
+}
diff --git a/mmv1/templates/terraform/examples/healthcare_fhir_store_notification_config.tf.erb b/mmv1/templates/terraform/examples/healthcare_fhir_store_notification_config.tf.erb
index fb9dea6f8d94..35871e58be33 100644
--- a/mmv1/templates/terraform/examples/healthcare_fhir_store_notification_config.tf.erb
+++ b/mmv1/templates/terraform/examples/healthcare_fhir_store_notification_config.tf.erb
@@ -13,8 +13,8 @@ resource "google_healthcare_fhir_store" "default" {
}
notification_configs {
- pubsubTopic = "gs://${google_pubsub_topic.topic.name}"
- sendFullResource = true
+ pubsub_topic = "gs://${google_pubsub_topic.topic.name}"
+ send_full_resource = true
}
}
diff --git a/mmv1/templates/terraform/examples/notification_channel_basic.tf.erb b/mmv1/templates/terraform/examples/notification_channel_basic.tf.erb
index b5b4a5a8d7ad..3657de15d68e 100644
--- a/mmv1/templates/terraform/examples/notification_channel_basic.tf.erb
+++ b/mmv1/templates/terraform/examples/notification_channel_basic.tf.erb
@@ -4,4 +4,5 @@ resource "google_monitoring_notification_channel" "<%= ctx[:primary_resource_id]
labels = {
email_address = "fake_email@blahblah.com"
}
+ force_delete = false
}
diff --git a/mmv1/templates/terraform/examples/storage_object_lifecycle_setting.tf.erb b/mmv1/templates/terraform/examples/storage_object_lifecycle_setting.tf.erb
index 69da3f64048b..908b65b93e4c 100644
--- a/mmv1/templates/terraform/examples/storage_object_lifecycle_setting.tf.erb
+++ b/mmv1/templates/terraform/examples/storage_object_lifecycle_setting.tf.erb
@@ -1,5 +1,6 @@
# [START storage_create_lifecycle_setting_tf]
resource "google_storage_bucket" "<%= ctx[:primary_resource_id] %>" {
+ provider = google-beta
name = "<%= ctx[:vars]['example_bucket'] %>"
location = "US"
uniform_bucket_level_access = true
diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_compute_instance.go.erb b/mmv1/third_party/terraform/data_sources/data_source_google_compute_instance.go.erb
index 03396998f795..50aebf7b025c 100644
--- a/mmv1/third_party/terraform/data_sources/data_source_google_compute_instance.go.erb
+++ b/mmv1/third_party/terraform/data_sources/data_source_google_compute_instance.go.erb
@@ -192,6 +192,6 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{
if err := d.Set("name", instance.Name); err != nil {
return fmt.Errorf("Error setting name: %s", err)
}
- d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, instance.Zone, instance.Name))
+ d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, GetResourceNameFromSelfLink(instance.Zone), instance.Name))
return nil
}
diff --git a/mmv1/third_party/terraform/resources/resource_apigee_environment_nodeconfig_test.go.erb b/mmv1/third_party/terraform/resources/resource_apigee_environment_nodeconfig_test.go.erb
new file mode 100644
index 000000000000..7ceabae32f4d
--- /dev/null
+++ b/mmv1/third_party/terraform/resources/resource_apigee_environment_nodeconfig_test.go.erb
@@ -0,0 +1,180 @@
+<% autogen_exception -%>
+package google
+<% unless version == 'ga' -%>
+
+import (
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+)
+
+func TestAccApigeeEnvironment_apigeeEnvironmentNodeconfigTestExampleUpdate(t *testing.T) {
+ skipIfVcr(t)
+ t.Parallel()
+
+ context := map[string]interface{}{
+ "org_id": getTestOrgFromEnv(t),
+ "billing_account": getTestBillingAccountFromEnv(t),
+ "random_suffix": randString(t, 10),
+ }
+
+ vcrTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProvidersOiCS,
+ CheckDestroy: testAccCheckApigeeEnvironmentDestroyProducer(t),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccApigeeEnvironment_apigeeEnvironmentNodeconfigTestExample(context),
+ },
+ {
+ ResourceName: "google_apigee_environment.apigee_environment",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{"org_id"},
+ },
+ {
+ Config: testAccApigeeEnvironment_apigeeEnvironmentNodeconfigTestExampleUpdate(context),
+ },
+ {
+ ResourceName: "google_apigee_environment.apigee_environment",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{"org_id"},
+ },
+ },
+ })
+}
+
+func testAccApigeeEnvironment_apigeeEnvironmentNodeconfigTestExampleUpdate(context map[string]interface{}) string {
+ return Nprintf(`
+resource "google_project" "project" {
+ provider = google-beta
+
+ project_id = "tf-test%{random_suffix}"
+ name = "tf-test%{random_suffix}"
+ org_id = "%{org_id}"
+ billing_account = "%{billing_account}"
+}
+
+resource "google_project_service" "apigee" {
+ provider = google-beta
+
+ project = google_project.project.project_id
+ service = "apigee.googleapis.com"
+}
+
+resource "google_project_service" "compute" {
+ provider = google-beta
+
+ project = google_project.project.project_id
+ service = "compute.googleapis.com"
+}
+
+resource "google_project_service" "servicenetworking" {
+ provider = google-beta
+
+ project = google_project.project.project_id
+ service = "servicenetworking.googleapis.com"
+}
+
+resource "google_project_service" "kms" {
+ provider = google-beta
+
+ project = google_project.project.project_id
+ service = "cloudkms.googleapis.com"
+}
+
+resource "google_compute_network" "apigee_network" {
+ provider = google-beta
+
+ name = "apigee-network"
+ project = google_project.project.project_id
+ depends_on = [google_project_service.compute]
+}
+
+resource "google_compute_global_address" "apigee_range" {
+ provider = google-beta
+
+ name = "apigee-range"
+ purpose = "VPC_PEERING"
+ address_type = "INTERNAL"
+ prefix_length = 16
+ network = google_compute_network.apigee_network.id
+ project = google_project.project.project_id
+}
+
+resource "google_service_networking_connection" "apigee_vpc_connection" {
+ provider = google-beta
+
+ network = google_compute_network.apigee_network.id
+ service = "servicenetworking.googleapis.com"
+ reserved_peering_ranges = [google_compute_global_address.apigee_range.name]
+ depends_on = [google_project_service.servicenetworking]
+}
+
+resource "google_kms_key_ring" "apigee_keyring" {
+ provider = google-beta
+
+ name = "apigee-keyring"
+ location = "us-central1"
+ project = google_project.project.project_id
+ depends_on = [google_project_service.kms]
+}
+
+resource "google_kms_crypto_key" "apigee_key" {
+ provider = google-beta
+
+ name = "apigee-key"
+ key_ring = google_kms_key_ring.apigee_keyring.id
+}
+
+resource "google_project_service_identity" "apigee_sa" {
+ provider = google-beta
+
+ project = google_project.project.project_id
+ service = google_project_service.apigee.service
+}
+
+resource "google_kms_crypto_key_iam_binding" "apigee_sa_keyuser" {
+ provider = google-beta
+
+ crypto_key_id = google_kms_crypto_key.apigee_key.id
+ role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"
+
+ members = [
+ "serviceAccount:${google_project_service_identity.apigee_sa.email}",
+ ]
+}
+
+resource "google_apigee_organization" "apigee_org" {
+ provider = google-beta
+
+ analytics_region = "us-central1"
+ project_id = google_project.project.project_id
+ authorized_network = google_compute_network.apigee_network.id
+ billing_type = "PAYG"
+ runtime_database_encryption_key_name = google_kms_crypto_key.apigee_key.id
+
+ depends_on = [
+ google_service_networking_connection.apigee_vpc_connection,
+ google_project_service.apigee,
+ google_kms_crypto_key_iam_binding.apigee_sa_keyuser,
+ ]
+}
+
+resource "google_apigee_environment" "apigee_environment" {
+ provider = google-beta
+
+ org_id = google_apigee_organization.apigee_org.id
+ name = "tf-test%{random_suffix}"
+ description = "Apigee Environment"
+ display_name = "tf-test%{random_suffix}"
+ node_config {
+ min_node_count = "4"
+ max_node_count = "5"
+ }
+}
+`, context)
+}
+
+<% end -%>
diff --git a/mmv1/third_party/terraform/resources/resource_compute_instance.go.erb b/mmv1/third_party/terraform/resources/resource_compute_instance.go.erb
index 6ddcd7c5709e..de0bbb615fdd 100644
--- a/mmv1/third_party/terraform/resources/resource_compute_instance.go.erb
+++ b/mmv1/third_party/terraform/resources/resource_compute_instance.go.erb
@@ -751,6 +751,14 @@ func resourceComputeInstance() *schema.Resource {
AtLeastOneOf: []string{"advanced_machine_features.0.enable_nested_virtualization","advanced_machine_features.0.threads_per_core"},
Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`,
},
+<% unless version == 'ga' -%>
+ "visible_core_count": {
+ Type: schema.TypeInt,
+ Optional: true,
+ AtLeastOneOf: []string{"advanced_machine_features.0.enable_nested_virtualization","advanced_machine_features.0.threads_per_core","advanced_machine_features.0.visible_core_count"},
+ Description: `The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width.`,
+ },
+<% end -%>
},
},
},
diff --git a/mmv1/third_party/terraform/resources/resource_compute_instance_template.go.erb b/mmv1/third_party/terraform/resources/resource_compute_instance_template.go.erb
index 662a49795b7b..28c236007904 100644
--- a/mmv1/third_party/terraform/resources/resource_compute_instance_template.go.erb
+++ b/mmv1/third_party/terraform/resources/resource_compute_instance_template.go.erb
@@ -677,6 +677,14 @@ func resourceComputeInstanceTemplate() *schema.Resource {
ForceNew: true,
Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`,
},
+<% unless version == 'ga' -%>
+ "visible_core_count": {
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ Description: `The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width.`,
+ },
+<% end -%>
},
},
},
diff --git a/mmv1/third_party/terraform/resources/resource_container_cluster.go.erb b/mmv1/third_party/terraform/resources/resource_container_cluster.go.erb
index 4e0c2fd14173..7f220d7732c9 100755
--- a/mmv1/third_party/terraform/resources/resource_container_cluster.go.erb
+++ b/mmv1/third_party/terraform/resources/resource_container_cluster.go.erb
@@ -1330,6 +1330,23 @@ func resourceContainerCluster() *schema.Resource {
},
<% end -%>
+ "service_external_ips_config": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Optional: true,
+ Computed: true,
+ Description: `If set, and enabled=true, services with external ips field will not be blocked`,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enabled": {
+ Type: schema.TypeBool,
+ Required: true,
+ Description: `When enabled, services with exterenal ips specified will be allowed.`,
+ },
+ },
+ },
+ },
+
"mesh_certificates": {
Type: schema.TypeList,
MaxItems: 1,
@@ -1466,6 +1483,25 @@ func resourceContainerCluster() *schema.Resource {
Computed: true,
},
+<% unless version == "ga" -%>
+ "cost_management_config": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Optional: true,
+ Computed: true,
+ Description: `Cost management configuration for the cluster.`,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enabled": {
+ Type: schema.TypeBool,
+ Required: true,
+ Description: `Whether to enable GKE cost allocation. When you enable GKE cost allocation, the cluster name and namespace of your GKE workloads appear in the labels field of the billing export to BigQuery. Defaults to false.`,
+ },
+ },
+ },
+ },
+<% end -%>
+
"resource_usage_export_config": {
Type: schema.TypeList,
MaxItems: 1,
@@ -1666,6 +1702,9 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
ResourceLabels: expandStringMap(d, "resource_labels"),
<% unless version == 'ga' -%>
NodePoolAutoConfig: expandNodePoolAutoConfig(d.Get("node_pool_auto_config")),
+<% end -%>
+<% unless version == 'ga' -%>
+ CostManagementConfig: expandCostManagementConfig(d.Get("cost_management_config")),
<% end -%>
}
@@ -1755,6 +1794,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
cluster.VerticalPodAutoscaling = expandVerticalPodAutoscaling(v)
}
+ if v, ok := d.GetOk("service_external_ips_config"); ok {
+ cluster.NetworkConfig.ServiceExternalIpsConfig = expandServiceExternalIpsConfig(v)
+ }
+
if v, ok := d.GetOk("mesh_certificates"); ok {
cluster.MeshCertificates = expandMeshCertificates(v)
}
@@ -2045,6 +2088,11 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
if err := d.Set("enable_l4_ilb_subsetting", cluster.NetworkConfig.EnableL4ilbSubsetting); err != nil {
return fmt.Errorf("Error setting enable_l4_ilb_subsetting: %s", err)
}
+<% end -%>
+<% unless version == 'ga' %>
+ if err := d.Set("cost_management_config", flattenManagementConfig(cluster.CostManagementConfig)); err != nil {
+ return fmt.Errorf("Error setting cost_management_config: %s", err)
+ }
<% end -%>
if err := d.Set("confidential_nodes", flattenConfidentialNodes(cluster.ConfidentialNodes)); err != nil {
return err
@@ -2118,6 +2166,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
}
<% end -%>
+ if err := d.Set("service_external_ips_config", flattenServiceExternalIpsConfig(cluster.NetworkConfig.ServiceExternalIpsConfig)); err != nil {
+ return err
+ }
+
if err := d.Set("mesh_certificates", flattenMeshCertificates(cluster.MeshCertificates)); err != nil {
return err
}
@@ -2455,6 +2507,25 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
}
<% end -%>
+<% unless version == 'ga' -%>
+ if d.HasChange("cost_management_config") {
+ c := d.Get("cost_management_config")
+ req := &container.UpdateClusterRequest{
+ Update: &container.ClusterUpdate{
+ DesiredCostManagementConfig: expandCostManagementConfig(c),
+ },
+ }
+
+ updateF := updateFunc(req, "updating cost management config")
+ // Call update serially.
+ if err := lockedCall(lockKey, updateF); err != nil {
+ return err
+ }
+
+ log.Printf("[INFO] GKE cluster %s cost management config has been updated", d.Id())
+ }
+
+<% end -%>
if d.HasChange("authenticator_groups_config") {
req := &container.UpdateClusterRequest{
Update: &container.ClusterUpdate{
@@ -2836,6 +2907,33 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
}
}
+ if d.HasChange("service_external_ips_config") {
+ c := d.Get("service_external_ips_config")
+ req := &container.UpdateClusterRequest{
+ Update: &container.ClusterUpdate{
+ DesiredServiceExternalIpsConfig: expandServiceExternalIpsConfig(c),
+ },
+ }
+
+ updateF := func() error {
+ name := containerClusterFullName(project, location, clusterName)
+ clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req)
+ if config.UserProjectOverride {
+ clusterUpdateCall.Header().Add("X-Goog-User-Project", project)
+ }
+ op, err := clusterUpdateCall.Do()
+ if err != nil {
+ return err
+ }
+ // Wait until it's updated
+ return containerOperationWait(config, op, project, location, "updating GKE cluster service externalips config", userAgent, d.Timeout(schema.TimeoutUpdate))
+ }
+ if err := lockedCall(lockKey, updateF); err != nil {
+ return err
+ }
+ log.Printf("[INFO] GKE cluster %s service externalips config has been updated", d.Id())
+ }
+
if d.HasChange("mesh_certificates") {
c := d.Get("mesh_certificates")
req := &container.UpdateClusterRequest{
@@ -3769,6 +3867,18 @@ func expandVerticalPodAutoscaling(configured interface{}) *container.VerticalPod
}
}
+func expandServiceExternalIpsConfig(configured interface{}) *container.ServiceExternalIPsConfig {
+ l := configured.([]interface{})
+ if len(l) == 0 {
+ return nil
+ }
+ config := l[0].(map[string]interface{})
+ return &container.ServiceExternalIPsConfig{
+ Enabled: config["enabled"].(bool),
+ ForceSendFields: []string{"Enabled"},
+ }
+}
+
func expandMeshCertificates(configured interface{}) *container.MeshCertificates {
l := configured.([]interface{})
if len(l) == 0 {
@@ -3883,6 +3993,22 @@ func expandDefaultMaxPodsConstraint(v interface{}) *container.MaxPodsConstraint
MaxPodsPerNode: int64(v.(int)),
}
}
+
+<% unless version == 'ga' -%>
+func expandCostManagementConfig(configured interface{}) *container.CostManagementConfig {
+ l := configured.([]interface{})
+ if len(l) == 0 {
+ return nil
+ }
+
+ config := l[0].(map[string]interface{})
+ return &container.CostManagementConfig{
+ Enabled: config["enabled"].(bool),
+ ForceSendFields: []string{"Enabled"},
+ }
+}
+<% end -%>
+
func expandResourceUsageExportConfig(configured interface{}) *container.ResourceUsageExportConfig {
l := configured.([]interface{})
if len(l) == 0 || l[0] == nil {
@@ -3955,7 +4081,7 @@ func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig
EnableComponents: convertStringArr(enable_components),
}
}
-<% if version == 'beta' -%>
+<% unless version == 'ga' -%>
if v, ok := config["managed_prometheus"]; ok && len(v.([]interface{})) > 0 {
managed_prometheus := v.([]interface{})[0].(map[string]interface{})
mc.ManagedPrometheusConfig = &container.ManagedPrometheusConfig{
@@ -4513,6 +4639,17 @@ func flattenResourceUsageExportConfig(c *container.ResourceUsageExportConfig) []
}
}
+func flattenServiceExternalIpsConfig(c *container.ServiceExternalIPsConfig) []map[string]interface{} {
+ if c == nil {
+ return nil
+ }
+ return []map[string]interface{}{
+ {
+ "enabled": c.Enabled,
+ },
+ }
+}
+
func flattenMeshCertificates(c *container.MeshCertificates) []map[string]interface{} {
if c == nil {
return nil
@@ -4524,6 +4661,19 @@ func flattenMeshCertificates(c *container.MeshCertificates) []map[string]interfa
}
}
+<% unless version == 'ga' -%>
+func flattenManagementConfig(c *container.CostManagementConfig) []map[string]interface{} {
+ if c == nil {
+ return nil
+ }
+ return []map[string]interface{}{
+ {
+ "enabled": c.Enabled,
+ },
+ }
+}
+
+<% end -%>
func flattenDatabaseEncryption(c *container.DatabaseEncryption) []map[string]interface{} {
if c == nil {
return nil
@@ -4570,7 +4720,7 @@ func flattenMonitoringConfig(c *container.MonitoringConfig) []map[string]interfa
if c.ComponentConfig != nil {
result["enable_components"] = c.ComponentConfig.EnableComponents
}
-<% if version == 'beta' -%>
+<% unless version == 'ga' -%>
if c.ManagedPrometheusConfig != nil {
result["managed_prometheus"] = flattenManagedPrometheusConfig(c.ManagedPrometheusConfig)
}
@@ -4578,7 +4728,7 @@ func flattenMonitoringConfig(c *container.MonitoringConfig) []map[string]interfa
return []map[string]interface{}{result}
}
-<% if version == 'beta' -%>
+<% unless version == 'ga' -%>
func flattenManagedPrometheusConfig(c *container.ManagedPrometheusConfig) []map[string]interface{} {
return []map[string]interface{}{
{
diff --git a/mmv1/third_party/terraform/resources/resource_logging_sink.go b/mmv1/third_party/terraform/resources/resource_logging_sink.go
index 83cbf8b259f5..24a23e5c5a2a 100644
--- a/mmv1/third_party/terraform/resources/resource_logging_sink.go
+++ b/mmv1/third_party/terraform/resources/resource_logging_sink.go
@@ -45,7 +45,7 @@ func resourceLoggingSinkSchema() map[string]*schema.Schema {
"exclusions": {
Type: schema.TypeList,
Optional: true,
- Description: `Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported.`,
+ Description: `Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion's filters, it will not be exported.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
diff --git a/mmv1/third_party/terraform/resources/data_source_google_folders_test.go b/mmv1/third_party/terraform/tests/data_source_google_folders_test.go
similarity index 100%
rename from mmv1/third_party/terraform/resources/data_source_google_folders_test.go
rename to mmv1/third_party/terraform/tests/data_source_google_folders_test.go
diff --git a/mmv1/third_party/terraform/resources/resource_cloud_identity_group_sweeper_test.go.erb b/mmv1/third_party/terraform/tests/resource_cloud_identity_group_sweeper_test.go.erb
similarity index 100%
rename from mmv1/third_party/terraform/resources/resource_cloud_identity_group_sweeper_test.go.erb
rename to mmv1/third_party/terraform/tests/resource_cloud_identity_group_sweeper_test.go.erb
diff --git a/mmv1/third_party/terraform/resources/resource_cloudfunction2_function_test.go.erb b/mmv1/third_party/terraform/tests/resource_cloudfunction2_function_test.go
similarity index 92%
rename from mmv1/third_party/terraform/resources/resource_cloudfunction2_function_test.go.erb
rename to mmv1/third_party/terraform/tests/resource_cloudfunction2_function_test.go
index 54a9efe0ccc6..cb50a746c6b4 100644
--- a/mmv1/third_party/terraform/resources/resource_cloudfunction2_function_test.go.erb
+++ b/mmv1/third_party/terraform/tests/resource_cloudfunction2_function_test.go
@@ -1,6 +1,4 @@
-<% autogen_exception -%>
package google
-<% unless version == 'ga' -%>
import (
"testing"
@@ -18,7 +16,7 @@ func TestAccCloudFunctions2Function_update(t *testing.T) {
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
- Providers: testAccProvidersOiCS,
+ Providers: testAccProviders,
CheckDestroy: testAccCheckCloudfunctions2functionDestroyProducer(t),
Steps: []resource.TestStep{
{
@@ -55,21 +53,18 @@ func TestAccCloudFunctions2Function_update(t *testing.T) {
func testAccCloudfunctions2function_basic(context map[string]interface{}) string {
return Nprintf(`
resource "google_storage_bucket" "bucket" {
- provider = google-beta
name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}"
location = "US"
uniform_bucket_level_access = true
}
resource "google_storage_bucket_object" "object" {
- provider = google-beta
name = "function-source.zip"
bucket = google_storage_bucket.bucket.name
source = "%{zip_path}"
}
resource "google_cloudfunctions2_function" "terraform-test2" {
- provider = google-beta
name = "tf-test-test-function%{random_suffix}"
location = "us-central1"
description = "a new function"
@@ -97,21 +92,18 @@ resource "google_cloudfunctions2_function" "terraform-test2" {
func testAccCloudFunctions2Function_test_update(context map[string]interface{}) string {
return Nprintf(`
resource "google_storage_bucket" "bucket" {
- provider = google-beta
name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}"
location = "US"
uniform_bucket_level_access = true
}
resource "google_storage_bucket_object" "object" {
- provider = google-beta
name = "function-source.zip"
bucket = google_storage_bucket.bucket.name
source = "%{zip_path}"
}
resource "google_cloudfunctions2_function" "terraform-test2" {
- provider = google-beta
name = "tf-test-test-function%{random_suffix}"
location = "us-central1"
description = "an updated function"
@@ -139,21 +131,18 @@ resource "google_cloudfunctions2_function" "terraform-test2" {
func testAccCloudFunctions2Function_test_redeploy(context map[string]interface{}) string {
return Nprintf(`
resource "google_storage_bucket" "bucket" {
- provider = google-beta
name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}"
location = "US"
uniform_bucket_level_access = true
}
resource "google_storage_bucket_object" "object" {
- provider = google-beta
name = "function-source.zip"
bucket = google_storage_bucket.bucket.name
source = "%{zip_path}"
}
resource "google_cloudfunctions2_function" "terraform-test2" {
- provider = google-beta
name = "tf-test-test-function%{random_suffix}"
location = "us-west1"
description = "function test"
@@ -189,18 +178,18 @@ func TestAccCloudFunctions2Function_fullUpdate(t *testing.T) {
t.Parallel()
context := map[string]interface{}{
- "project" : getTestProjectFromEnv(),
- "zip_path" : "./test-fixtures/cloudfunctions2/function-source-eventarc-gcs.zip",
- "random_suffix" : randString(t, 10),
+ "project": getTestProjectFromEnv(),
+ "zip_path": "./test-fixtures/cloudfunctions2/function-source-eventarc-gcs.zip",
+ "random_suffix": randString(t, 10),
}
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
- Providers: testAccProvidersOiCS,
+ Providers: testAccProviders,
CheckDestroy: testAccCheckCloudfunctions2functionDestroyProducer(t),
Steps: []resource.TestStep{
{
- // Re-use config from the generated tests
+ // Re-use config from the generated tests
Config: testAccCloudfunctions2function_cloudfunctions2BasicAuditlogsExample(context),
},
{
@@ -219,21 +208,18 @@ func testAccCloudfunctions2function_cloudfunctions2BasicAuditlogsExample_update(
# https://cloud.google.com/eventarc/docs/path-patterns
resource "google_storage_bucket" "source-bucket" {
- provider = google-beta
name = "tf-test-gcf-source-bucket%{random_suffix}"
location = "US"
uniform_bucket_level_access = true
}
resource "google_storage_bucket_object" "object" {
- provider = google-beta
name = "function-source.zip"
bucket = google_storage_bucket.source-bucket.name
source = "%{zip_path}" # Add path to the zipped function source code
}
resource "google_service_account" "account" {
- provider = google-beta
account_id = "tf-test-gcf-sa%{random_suffix}"
display_name = "Test Service Account - used for both the cloud function and eventarc trigger in the test"
}
@@ -242,7 +228,6 @@ resource "google_service_account" "account" {
# Here we use Audit Logs to monitor the bucket so path patterns can be used in the example of
# google_cloudfunctions2_function below (Audit Log events have path pattern support)
resource "google_storage_bucket" "audit-log-bucket" {
- provider = google-beta
name = "tf-test-gcf-auditlog-bucket%{random_suffix}"
location = "us-central1" # The trigger must be in the same location as the bucket
uniform_bucket_level_access = true
@@ -250,14 +235,12 @@ resource "google_storage_bucket" "audit-log-bucket" {
# Permissions on the service account used by the function and Eventarc trigger
resource "google_project_iam_member" "invoking" {
- provider = google-beta
project = "%{project}"
role = "roles/run.invoker"
member = "serviceAccount:${google_service_account.account.email}"
}
resource "google_project_iam_member" "event-receiving" {
- provider = google-beta
project = "%{project}"
role = "roles/eventarc.eventReceiver"
member = "serviceAccount:${google_service_account.account.email}"
@@ -265,7 +248,6 @@ resource "google_project_iam_member" "event-receiving" {
}
resource "google_project_iam_member" "artifactregistry-reader" {
- provider = google-beta
project = "%{project}"
role = "roles/artifactregistry.reader"
member = "serviceAccount:${google_service_account.account.email}"
@@ -273,7 +255,6 @@ resource "google_project_iam_member" "artifactregistry-reader" {
}
resource "google_cloudfunctions2_function" "function" {
- provider = google-beta
depends_on = [
google_project_iam_member.event-receiving,
google_project_iam_member.artifactregistry-reader,
@@ -330,5 +311,3 @@ resource "google_cloudfunctions2_function" "function" {
}
# [END functions_v2_basic_auditlogs]`, context)
}
-
-<% end -%>
diff --git a/mmv1/third_party/terraform/resources/resource_compute_instance_iam_test.go b/mmv1/third_party/terraform/tests/resource_compute_instance_iam_test.go
similarity index 100%
rename from mmv1/third_party/terraform/resources/resource_compute_instance_iam_test.go
rename to mmv1/third_party/terraform/tests/resource_compute_instance_iam_test.go
diff --git a/mmv1/third_party/terraform/tests/resource_compute_instance_template_test.go.erb b/mmv1/third_party/terraform/tests/resource_compute_instance_template_test.go.erb
index 27743148998b..5bf9f9a106a6 100644
--- a/mmv1/third_party/terraform/tests/resource_compute_instance_template_test.go.erb
+++ b/mmv1/third_party/terraform/tests/resource_compute_instance_template_test.go.erb
@@ -2536,6 +2536,9 @@ resource "google_compute_instance_template" "foobar" {
advanced_machine_features {
threads_per_core = 1
enable_nested_virtualization = true
+<% unless version == "ga" -%>
+ visible_core_count = 1
+<% end -%>
}
scheduling {
diff --git a/mmv1/third_party/terraform/tests/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/tests/resource_compute_instance_test.go.erb
index 6a1dc36a3833..7483ff87d64b 100644
--- a/mmv1/third_party/terraform/tests/resource_compute_instance_test.go.erb
+++ b/mmv1/third_party/terraform/tests/resource_compute_instance_test.go.erb
@@ -1260,6 +1260,19 @@ func TestAccComputeInstance_private_image_family(t *testing.T) {
}
<% unless version == 'ga' -%>
func TestAccComputeInstance_networkPerformanceConfig(t *testing.T) {
+ // This test /should/ be passing but the reason it's failing
+ // is very non-obvious and requires further investigation
+ //
+ // It's been failing in teamcity for > 90d so there is no
+ // starting point or obvious reason to potentially pivot off
+ //
+ // For whoever decides to investigate this. It looks like
+ // at the time the failure is due to a failure to start
+ // the compute instance after a config update. This results
+ // in it /unable to find the resource/ as the start operation
+ // never completes successful. I suspect a bad configuration
+ // but am unsure.
+ skipIfVcr(t)
t.Parallel()
var instance compute.Instance
@@ -4742,6 +4755,9 @@ resource "google_compute_instance" "foobar" {
advanced_machine_features {
threads_per_core = 1
enable_nested_virtualization = true
+<% unless version == 'ga' -%>
+ visible_core_count = 1
+<% end -%>
}
allow_stopping_for_update = true
}
diff --git a/mmv1/third_party/terraform/tests/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/tests/resource_container_cluster_test.go.erb
index 8f78a2571a26..f09d2846630b 100755
--- a/mmv1/third_party/terraform/tests/resource_container_cluster_test.go.erb
+++ b/mmv1/third_party/terraform/tests/resource_container_cluster_test.go.erb
@@ -2624,6 +2624,37 @@ func TestAccContainerCluster_errorNoClusterCreated(t *testing.T) {
})
}
+func TestAccContainerCluster_withExternalIpsConfig(t *testing.T) {
+ t.Parallel()
+
+ clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
+ pid := getTestProjectFromEnv()
+
+ vcrTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccContainerCluster_withExternalIpsConfig(pid, clusterName, true),
+ },
+ {
+ ResourceName: "google_container_cluster.with_external_ips_config",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ {
+ Config: testAccContainerCluster_withExternalIpsConfig(pid, clusterName, false),
+ },
+ {
+ ResourceName: "google_container_cluster.with_external_ips_config",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
func TestAccContainerCluster_withMeshCertificatesConfig(t *testing.T) {
t.Parallel()
@@ -2666,6 +2697,39 @@ func TestAccContainerCluster_withMeshCertificatesConfig(t *testing.T) {
})
}
+<% unless version == 'ga' -%>
+func TestAccContainerCluster_withCostManagementConfig(t *testing.T) {
+ t.Parallel()
+
+ clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
+ pid := getTestProjectFromEnv()
+
+ vcrTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccContainerCluster_updateCostManagementConfig(pid, clusterName, true),
+ },
+ {
+ ResourceName: "google_container_cluster.with_cost_management_config",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ {
+ Config: testAccContainerCluster_updateCostManagementConfig(pid, clusterName, false),
+ },
+ {
+ ResourceName: "google_container_cluster.with_cost_management_config",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+<% end -%>
func TestAccContainerCluster_withDatabaseEncryption(t *testing.T) {
t.Parallel()
@@ -5476,6 +5540,22 @@ resource "google_container_cluster" "with_resource_labels" {
`, location)
}
+func testAccContainerCluster_withExternalIpsConfig(projectID string, clusterName string, enabled bool) string {
+ return fmt.Sprintf(`
+ data "google_project" "project" {
+ project_id = "%s"
+ }
+
+ resource "google_container_cluster" "with_external_ips_config" {
+ name = "%s"
+ location = "us-central1-a"
+ initial_node_count = 1
+ service_external_ips_config {
+ enabled = %v
+ }
+ }`, projectID, clusterName, enabled)
+}
+
func testAccContainerCluster_withMeshCertificatesConfigEnabled(projectID string, clusterName string) string {
return fmt.Sprintf(`
data "google_project" "project" {
@@ -5517,6 +5597,24 @@ func testAccContainerCluster_updateMeshCertificatesConfig(projectID string, clus
}`, projectID, clusterName, enabled)
}
+<% unless version == 'ga' -%>
+func testAccContainerCluster_updateCostManagementConfig(projectID string, clusterName string, enabled bool) string {
+ return fmt.Sprintf(`
+ data "google_project" "project" {
+ project_id = "%s"
+ }
+
+ resource "google_container_cluster" "with_cost_management_config" {
+ name = "%s"
+ location = "us-central1-a"
+ initial_node_count = 1
+ cost_management_config {
+ enabled = %v
+ }
+ }`, projectID, clusterName, enabled)
+}
+
+<% end -%>
func testAccContainerCluster_withDatabaseEncryption(clusterName string, kmsData bootstrappedKMS) string {
return fmt.Sprintf(`
data "google_project" "project" {
@@ -6019,4 +6117,4 @@ func TestValidateNodePoolAutoConfig(t *testing.T) {
}
}
}
-<% end -%>
\ No newline at end of file
+<% end -%>
diff --git a/mmv1/third_party/terraform/tests/resource_dns_response_policy_rule_test.go.erb b/mmv1/third_party/terraform/tests/resource_dns_response_policy_rule_test.go.erb
index bdde9efb232a..4f6f1dd07abf 100644
--- a/mmv1/third_party/terraform/tests/resource_dns_response_policy_rule_test.go.erb
+++ b/mmv1/third_party/terraform/tests/resource_dns_response_policy_rule_test.go.erb
@@ -39,6 +39,117 @@ func TestAccDNSResponsePolicyRule_update(t *testing.T) {
})
}
+func TestAccDNSResponsePolicyRuleBehavior_update(t *testing.T) {
+ t.Parallel()
+
+ responsePolicyRuleSuffix := randString(t, 10)
+
+ vcrTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProvidersOiCS,
+ CheckDestroy: testAccCheckDNSResponsePolicyRuleDestroyProducer(t),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccDnsResponsePolicyRuleBehavior_unspecified(responsePolicyRuleSuffix, "network-1"),
+ },
+ {
+ ResourceName: "google_dns_response_policy_rule.example-response-policy-rule-behavior",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ {
+ Config: testAccDnsResponsePolicyRuleBehavior_byPass(responsePolicyRuleSuffix, "network-1"),
+ },
+ {
+ ResourceName: "google_dns_response_policy_rule.example-response-policy-rule-behavior",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ {
+ Config: testAccDnsResponsePolicyRuleBehavior_unspecified(responsePolicyRuleSuffix, "network-1"),
+ },
+ {
+ ResourceName: "google_dns_response_policy_rule.example-response-policy-rule-behavior",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func testAccDnsResponsePolicyRuleBehavior_unspecified(suffix, network string) string {
+ return fmt.Sprintf(`
+
+resource "google_compute_network" "network-1" {
+ provider = google-beta
+
+ name = "tf-test-network-1-%s"
+ auto_create_subnetworks = false
+}
+
+resource "google_dns_response_policy" "response-policy" {
+ provider = google-beta
+
+ response_policy_name = "tf-test-response-policy-%s"
+
+ networks {
+ network_url = google_compute_network.%s.self_link
+ }
+}
+
+resource "google_dns_response_policy_rule" "example-response-policy-rule-behavior" {
+ provider = google-beta
+
+ response_policy = google_dns_response_policy.response-policy.response_policy_name
+ rule_name = "tf-test-response-policy-rule-%s"
+ dns_name = "dns.example.com."
+
+ local_data {
+ local_datas {
+ name = "dns.example.com."
+ type = "A"
+ ttl = 300
+ rrdatas = ["192.0.2.91"]
+ }
+ }
+}
+
+`, suffix, suffix, network, suffix)
+}
+
+func testAccDnsResponsePolicyRuleBehavior_byPass(suffix, network string) string {
+ return fmt.Sprintf(`
+
+resource "google_compute_network" "network-1" {
+ provider = google-beta
+
+ name = "tf-test-network-1-%s"
+ auto_create_subnetworks = false
+}
+
+resource "google_dns_response_policy" "response-policy" {
+ provider = google-beta
+
+ response_policy_name = "tf-test-response-policy-%s"
+
+ networks {
+ network_url = google_compute_network.%s.self_link
+ }
+}
+
+resource "google_dns_response_policy_rule" "example-response-policy-rule-behavior" {
+ provider = google-beta
+
+ behavior = "bypassResponsePolicy"
+ dns_name = "dns.example.com."
+ rule_name = "tf-test-response-policy-rule-%s"
+ response_policy = google_dns_response_policy.response-policy.response_policy_name
+
+}
+`, suffix, suffix, network, suffix)
+}
+
+
func testAccDnsResponsePolicyRule_privateUpdate(suffix, network string) string {
return fmt.Sprintf(`
resource "google_compute_network" "network-1" {
@@ -80,7 +191,7 @@ resource "google_dns_response_policy_rule" "example-response-policy-rule" {
rrdatas = ["192.0.2.91"]
}
}
-}
+}
`, suffix, suffix, suffix, network, suffix)
}
<% end -%>
diff --git a/mmv1/third_party/terraform/utils/compute_instance_helpers.go.erb b/mmv1/third_party/terraform/utils/compute_instance_helpers.go.erb
index 2535ef668f8b..915426715a4e 100644
--- a/mmv1/third_party/terraform/utils/compute_instance_helpers.go.erb
+++ b/mmv1/third_party/terraform/utils/compute_instance_helpers.go.erb
@@ -401,6 +401,9 @@ func expandAdvancedMachineFeatures(d TerraformResourceData) *compute.AdvancedMac
return &compute.AdvancedMachineFeatures{
EnableNestedVirtualization: d.Get(prefix + ".enable_nested_virtualization").(bool),
ThreadsPerCore: int64(d.Get(prefix + ".threads_per_core").(int)),
+<% unless version == 'ga' -%>
+ VisibleCoreCount: int64(d.Get(prefix + ".visible_core_count").(int)),
+<% end -%>
}
}
@@ -411,6 +414,9 @@ func flattenAdvancedMachineFeatures(AdvancedMachineFeatures *compute.AdvancedMac
return []map[string]interface{}{{
"enable_nested_virtualization": AdvancedMachineFeatures.EnableNestedVirtualization,
"threads_per_core": AdvancedMachineFeatures.ThreadsPerCore,
+<% unless version == 'ga' -%>
+ "visible_core_count": AdvancedMachineFeatures.VisibleCoreCount,
+<% end -%>
}}
}
diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown
index 54c0512e310e..80ae4e0ea189 100644
--- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown
+++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown
@@ -414,6 +414,8 @@ specified, then this instance will have no external IPv6 Internet access. Struct
* `threads_per_core` (Optional) he number of threads per physical core. To disable [simultaneous multithreading (SMT)](https://cloud.google.com/compute/docs/instances/disabling-smt) set this to 1.
+* `visible_core_count` (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The number of physical cores to expose to an instance. [visible cores info (VC)](https://cloud.google.com/compute/docs/instances/customize-visible-cores).
+
The `reservation_affinity` block supports:
* `type` - (Required) The type of reservation from which this instance can consume resources.
diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown
index 87a6d02a95c1..ee1228c9cee3 100644
--- a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown
+++ b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown
@@ -577,7 +577,9 @@ The `specific_reservation` block supports:
* `enable_nested_virtualization` (Optional) Defines whether the instance should have [nested virtualization](#on_host_maintenance) enabled. Defaults to false.
-* `threads_per_core` (Optional) he number of threads per physical core. To disable [simultaneous multithreading (SMT)](https://cloud.google.com/compute/docs/instances/disabling-smt) set this to 1.
+* `threads_per_core` (Optional) The number of threads per physical core. To disable [simultaneous multithreading (SMT)](https://cloud.google.com/compute/docs/instances/disabling-smt) set this to 1.
+
+* `visible_core_count` (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The number of physical cores to expose to an instance. [visible cores info (VC)](https://cloud.google.com/compute/docs/instances/customize-visible-cores).
## Attributes Reference
diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown
index 203854b02e73..75eb35a243c8 100755
--- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown
+++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown
@@ -137,6 +137,9 @@ for more details. Structure is [documented below](#nested_cluster_autoscaling).
* `binary_authorization` - (Optional) Configuration options for the Binary
Authorization feature. Structure is [documented below](#nested_binary_authorization).
+* `service_external_ips_config` - (Optional)
+ Structure is [documented below](#nested_service_external_ips_config).
+
* `mesh_certificates` - (Optional)
Structure is [documented below](#nested_mesh_encryption).
@@ -427,6 +430,10 @@ addons_config {
and `PROJECT_SINGLETON_POLICY_ENFORCE`. `PROJECT_SINGLETON_POLICY_ENFORCE` is functionally equivalent to the
deprecated `enable_binary_authorization` parameter being set to `true`.
+The `service_external_ips_config` block supports:
+
+* `enabled` - (Required) Controls whether external ips specified by a service will be allowed. It is enabled by default.
+
The `mesh_certificates` block supports:
* `enable_certificates` - (Required) Controls the issuance of workload mTLS certificates. It is enabled by default. Workload Identity is required, see [workload_config](#nested_workload_identity_config).
diff --git a/mmv1/third_party/terraform/website/docs/r/logging_billing_account_sink.html.markdown b/mmv1/third_party/terraform/website/docs/r/logging_billing_account_sink.html.markdown
index afacbe331938..0f013332f93b 100644
--- a/mmv1/third_party/terraform/website/docs/r/logging_billing_account_sink.html.markdown
+++ b/mmv1/third_party/terraform/website/docs/r/logging_billing_account_sink.html.markdown
@@ -53,12 +53,12 @@ The following arguments are supported:
* `destination` - (Required) The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, a BigQuery dataset or a Cloud Logging bucket. Examples:
-```
-"storage.googleapis.com/[GCS_BUCKET]"
-"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]"
-"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]"
-"logging.googleapis.com/projects/[PROJECT_ID]]/locations/global/buckets/[BUCKET_ID]"
-```
+
+ - `storage.googleapis.com/[GCS_BUCKET]`
+ - `bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]`
+ - `pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]`
+ - `logging.googleapis.com/projects/[PROJECT_ID]]/locations/global/buckets/[BUCKET_ID]`
+
The writer associated with the sink must have access to write to the above resource.
* `filter` - (Optional) The filter to apply when exporting logs. Only log entries that match the filter are exported.
@@ -71,13 +71,13 @@ The following arguments are supported:
* `bigquery_options` - (Optional) Options that affect sinks exporting data to BigQuery. Structure [documented below](#nested_bigquery_options).
-* `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is [documented below](#nested_exclusions).
+* `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both `filter` and one of `exclusions.filter`, it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is [documented below](#nested_exclusions).
The `bigquery_options` block supports:
* `use_partitioned_tables` - (Required) Whether to use [BigQuery's partition tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
By default, Logging creates dated tables based on the log entries' timestamps, e.g. syslog_20170523. With partitioned
- tables the date suffix is no longer present and [special query syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables)
+ tables, the date suffix is no longer present and [special query syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables)
has to be used instead. In both cases, tables are sharded based on UTC timezone.
The `exclusions` block supports:
diff --git a/mmv1/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown b/mmv1/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown
index 54410e887079..613f0fb227b9 100644
--- a/mmv1/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown
+++ b/mmv1/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown
@@ -54,17 +54,17 @@ The following arguments are supported:
* `name` - (Required) The name of the logging sink.
-* `folder` - (Required) The folder to be exported to the sink. Note that either [FOLDER_ID] or "folders/[FOLDER_ID]" is
+* `folder` - (Required) The folder to be exported to the sink. Note that either `[FOLDER_ID]` or `folders/[FOLDER_ID]` is
accepted.
* `destination` - (Required) The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, a BigQuery dataset or a Cloud Logging bucket. Examples:
-```
-"storage.googleapis.com/[GCS_BUCKET]"
-"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]"
-"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]"
-"logging.googleapis.com/projects/[PROJECT_ID]]/locations/global/buckets/[BUCKET_ID]"
-```
+
+ - `storage.googleapis.com/[GCS_BUCKET]`
+ - `bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]`
+ - `pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]`
+ - `logging.googleapis.com/projects/[PROJECT_ID]]/locations/global/buckets/[BUCKET_ID]`
+
The writer associated with the sink must have access to write to the above resource.
* `filter` - (Optional) The filter to apply when exporting logs. Only log entries that match the filter are exported.
@@ -80,13 +80,13 @@ The following arguments are supported:
* `bigquery_options` - (Optional) Options that affect sinks exporting data to BigQuery. Structure [documented below](#nested_bigquery_options).
-* `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is [documented below](#nested_exclusions).
+* `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both `filter` and one of `exclusions.filter`, it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is [documented below](#nested_exclusions).
The `bigquery_options` block supports:
* `use_partitioned_tables` - (Required) Whether to use [BigQuery's partition tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
By default, Logging creates dated tables based on the log entries' timestamps, e.g. syslog_20170523. With partitioned
- tables the date suffix is no longer present and [special query syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables)
+ tables, the date suffix is no longer present and [special query syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables)
has to be used instead. In both cases, tables are sharded based on UTC timezone.
The `exclusions` block supports:
diff --git a/mmv1/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown b/mmv1/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown
index 6e3507ee5f42..adb78043043f 100644
--- a/mmv1/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown
+++ b/mmv1/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown
@@ -49,12 +49,12 @@ The following arguments are supported:
* `destination` - (Required) The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, a BigQuery dataset or a Cloud Logging bucket. Examples:
-```
-"storage.googleapis.com/[GCS_BUCKET]"
-"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]"
-"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]"
-"logging.googleapis.com/projects/[PROJECT_ID]]/locations/global/buckets/[BUCKET_ID]"
-```
+
+ - `storage.googleapis.com/[GCS_BUCKET]`
+ - `bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]`
+ - `pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]`
+ - `logging.googleapis.com/projects/[PROJECT_ID]]/locations/global/buckets/[BUCKET_ID]`
+
The writer associated with the sink must have access to write to the above resource.
* `filter` - (Optional) The filter to apply when exporting logs. Only log entries that match the filter are exported.
@@ -70,7 +70,7 @@ The following arguments are supported:
* `bigquery_options` - (Optional) Options that affect sinks exporting data to BigQuery. Structure [documented below](#nested_bigquery_options).
-* `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is [documented below](#nested_exclusions).
+* `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both `filter` and one of `exclusions.filter`, it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is [documented below](#nested_exclusions).
The `bigquery_options` block supports:
diff --git a/mmv1/third_party/terraform/website/docs/r/logging_project_sink.html.markdown b/mmv1/third_party/terraform/website/docs/r/logging_project_sink.html.markdown
index 9ed89ed35209..06062c682a66 100644
--- a/mmv1/third_party/terraform/website/docs/r/logging_project_sink.html.markdown
+++ b/mmv1/third_party/terraform/website/docs/r/logging_project_sink.html.markdown
@@ -37,9 +37,10 @@ resource "google_logging_project_sink" "my-sink" {
```
A more complete example follows: this creates a compute instance, as well as a log sink that logs all activity to a
-cloud storage bucket. Because we are using `unique_writer_identity`, we must grant it access to the bucket. Note that
-this grant requires the "Project IAM Admin" IAM role (`roles/resourcemanager.projectIamAdmin`) granted to the credentials
-used with terraform.
+cloud storage bucket. Because we are using `unique_writer_identity`, we must grant it access to the bucket.
+
+Note that this grant requires the "Project IAM Admin" IAM role (`roles/resourcemanager.projectIamAdmin`) granted to the
+credentials used with Terraform.
```hcl
# Our logged compute instance
@@ -122,12 +123,12 @@ The following arguments are supported:
* `destination` - (Required) The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, a BigQuery dataset or a Cloud Logging bucket . Examples:
-```
-"storage.googleapis.com/[GCS_BUCKET]"
-"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]"
-"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]"
-"logging.googleapis.com/projects/[PROJECT_ID]]/locations/global/buckets/[BUCKET_ID]"
-```
+
+ - `storage.googleapis.com/[GCS_BUCKET]`
+ - `bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]`
+ - `pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]`
+ - `logging.googleapis.com/projects/[PROJECT_ID]]/locations/global/buckets/[BUCKET_ID]`
+
The writer associated with the sink must have access to write to the above resource.
* `filter` - (Optional) The filter to apply when exporting logs. Only log entries that match the filter are exported.
@@ -148,12 +149,12 @@ The following arguments are supported:
* `bigquery_options` - (Optional) Options that affect sinks exporting data to BigQuery. Structure [documented below](#nested_bigquery_options).
-* `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is [documented below](#nested_exclusions).
+* `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both `filter` and one of `exclusions.filter`, it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is [documented below](#nested_exclusions).
The `bigquery_options` block supports:
* `use_partitioned_tables` - (Required) Whether to use [BigQuery's partition tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
- By default, Logging creates dated tables based on the log entries' timestamps, e.g. syslog_20170523. With partitioned
+ By default, Logging creates dated tables based on the log entries' timestamps, e.g. `syslog_20170523`. With partitioned
tables the date suffix is no longer present and [special query syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables)
has to be used instead. In both cases, tables are sharded based on UTC timezone.