diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.tmpl b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.tmpl index e1fa43b702d5..c3e9ee9cc63d 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.tmpl +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.tmpl @@ -549,6 +549,24 @@ func ResourceStorageBucket() *schema.Resource { }, }, }, + "hierarchical_namespace": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + DiffSuppressFunc: hierachicalNamespaceDiffSuppress, + Description: `The bucket's HNS support, which defines bucket can organize folders in logical file system structure`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Set this enabled flag to true when folders with logical files structure. Default value is false.`, + }, + }, + }, + }, }, UseJSONNumber: true, } @@ -696,6 +714,10 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error sb.SoftDeletePolicy = expandBucketSoftDeletePolicy(v.([]interface{})) } + if v, ok := d.GetOk("hierarchical_namespace"); ok { + sb.HierarchicalNamespace = expandBucketHierachicalNamespace(v.([]interface{})) + } + var res *storage.Bucket err = transport_tpg.Retry(transport_tpg.RetryOptions{ @@ -1294,6 +1316,38 @@ func flattenBucketSoftDeletePolicy(softDeletePolicy *storage.BucketSoftDeletePol return policies } +func expandBucketHierachicalNamespace(configured interface{}) *storage.BucketHierarchicalNamespace { + configuredHierachicalNamespace := configured.([]interface{}) + if len(configuredHierachicalNamespace) == 0 { + return nil + } + configuredHierachicalNamespacePolicy := configuredHierachicalNamespace[0].(map[string]interface{}) + hierachicalNamespacePolicy := &storage.BucketHierarchicalNamespace{ + Enabled: (configuredHierachicalNamespacePolicy["enabled"].(bool)), + } + hierachicalNamespacePolicy.ForceSendFields = append(hierachicalNamespacePolicy.ForceSendFields, "Enabled") + return hierachicalNamespacePolicy +} + +func flattenBucketHierarchicalNamespacePolicy(hierachicalNamespacePolicy *storage.BucketHierarchicalNamespace) []map[string]interface{} { + policies := make([]map[string]interface{}, 0, 1) + if hierachicalNamespacePolicy == nil { + // a null object returned from the API is equivalent to a block with enabled = false + // to handle this consistently, always write a null response as a hydrated block with false + defaultPolicy :=map[string]interface{}{ + "enabled": false, + } + + policies = append(policies, defaultPolicy) + return policies + } + policy := map[string]interface{}{ + "enabled": hierachicalNamespacePolicy.Enabled, + } + policies = append(policies, policy) + return policies +} + func expandBucketVersioning(configured interface{}) *storage.BucketVersioning { versionings := configured.([]interface{}) if len(versionings) == 0 { @@ -1885,6 +1939,9 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res if err := d.Set("soft_delete_policy", flattenBucketSoftDeletePolicy(res.SoftDeletePolicy)); err != nil { return fmt.Errorf("Error setting soft_delete_policy: %s", err) } + if err := d.Set("hierarchical_namespace", flattenBucketHierarchicalNamespacePolicy(res.HierarchicalNamespace)); err != nil { + return fmt.Errorf("Error setting hierarchical namespace: %s", err) + } if res.IamConfiguration != nil && res.IamConfiguration.UniformBucketLevelAccess != nil { if err := d.Set("uniform_bucket_level_access", res.IamConfiguration.UniformBucketLevelAccess.Enabled); err != nil { return fmt.Errorf("Error setting uniform_bucket_level_access: %s", err) @@ -1914,3 +1971,14 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res d.SetId(res.Id) return nil } + +func hierachicalNamespaceDiffSuppress(k, old, new string, r *schema.ResourceData) bool { + if k == "hierarchical_namespace.#" && old == "1" && new == "0" { + o, _ := r.GetChange("hierarchical_namespace.0.enabled") + if !o.(bool) { + return true + } + } + + return false +} diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.tmpl b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.tmpl index 19f6a9feae4d..b5df505fc00c 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.tmpl +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.tmpl @@ -9,6 +9,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" @@ -1545,6 +1546,147 @@ func TestAccStorageBucket_SoftDeletePolicy(t *testing.T) { }) } +// testcase to create HNS bucket and +// forcenew to recreate the bucket if HNS set to false +func TestAccStorageBucket_basic_hns(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic_hns(bucketName, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "hierarchical_namespace.0.enabled", "true"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basic_hns_with_data(bucketName, false), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_uniformBucketAccessOnly(bucketName, true), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_hns_force_destroy(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic_hns_with_data(bucketName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketPutFolderItem(t, bucketName), + ), + }, + }, + }) +} + +func testAccCheckStorageBucketPutFolderItem(t *testing.T, bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + data := bytes.NewBufferString("test") + dataReader := bytes.NewReader(data.Bytes()) + folderName := fmt.Sprintf("tf-test/tf-test-folder-%d/", acctest.RandInt(t)) + emptyfolderName := fmt.Sprintf("tf-test/tf-test-folder-%d/", acctest.RandInt(t)) + object := &storage.Object{Name: folderName + "bucketDestroyTestFile"} + + folder := storage.Folder{ + Bucket: bucketName, + Name: folderName, + } + + emptyFolder := storage.Folder{ + Bucket: bucketName, + Name: emptyfolderName, + } + + if res, err := config.NewStorageClient(config.UserAgent).Folders.Insert(bucketName, &folder).Recursive(true).Do(); err == nil { + log.Printf("[INFO] Created folder %v at location %v\n\n", res.Name, res.SelfLink) + } else { + return fmt.Errorf("Folders.Insert failed: %v", err) + } + + // This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails + if res, err := config.NewStorageClient(config.UserAgent).Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil { + log.Printf("[INFO] Created object %v at location %v\n\n", res.Name, res.SelfLink) + } else { + return fmt.Errorf("Objects.Insert failed: %v", err) + } + + if res, err := config.NewStorageClient(config.UserAgent).Folders.Insert(bucketName, &emptyFolder).Recursive(true).Do(); err == nil { + log.Printf("[INFO] Created folder %v at location %v\n\n", res.Name, res.SelfLink) + } else { + return fmt.Errorf("Folders.Insert failed: %v", err) + } + + return nil + } +} + +func testAccStorageBucket_basic_hns(bucketName string, enabled bool) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true + hierarchical_namespace { + enabled = %t + } +} +`, bucketName, enabled) +} + +func testAccStorageBucket_basic_hns_with_data(bucketName string, enabled bool) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true + hierarchical_namespace { + enabled = %t + } + force_destroy= true +} +`, bucketName, enabled) +} + func testAccCheckStorageBucketExists(t *testing.T, n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown b/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown index db1b5e7e8aed..a509e780d2ba 100644 --- a/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown @@ -103,6 +103,20 @@ resource "google_storage_bucket" "auto-expire" { } ``` +## Example Usage - Enabling hierarchical namespace + +```hcl +resource "google_storage_bucket" "auto-expire" { + name = "hns-enabled-bucket" + location = "US" + force_destroy = true + + hierarchical_namespace = { + enabled = true + } +} +``` + ## Argument Reference The following arguments are supported: @@ -157,6 +171,8 @@ The following arguments are supported: * `soft_delete_policy` - (Optional, Computed) The bucket's soft delete policy, which defines the period of time that soft-deleted objects will be retained, and cannot be permanently deleted. If the block is not provided, Server side value will be kept which means removal of block won't generate any terraform change. Structure is [documented below](#nested_soft_delete_policy). +* `hierarchical_namespace` - (Optional, ForceNew) The bucket's hierarchical namespace policy, which defines the bucket capability to handle folders in logical structure. Structure is [documented below](#nested_hierarchical_namespace). + The `lifecycle_rule` block supports: * `action` - (Required) The Lifecycle Rule's action configuration. A single block of this type is supported. Structure is [documented below](#nested_action). @@ -269,6 +285,12 @@ The following arguments are supported: * `effective_time` - (Computed) Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format. +The `hierarchical_namespace` block supports: + +* `enabled` - (Optional) Enable hierarchical namespace for the bucket. +To use this flag, you must also use --uniform-bucket-level-access + + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are