Skip to content

Commit

Permalink
feat: add HNS support for storage bucket (GoogleCloudPlatform#11852)
Browse files Browse the repository at this point in the history
Co-authored-by: Riley Karson <[email protected]>
  • Loading branch information
2 people authored and varshatumburu committed Oct 19, 2024
1 parent c40f6c8 commit f7d4144
Show file tree
Hide file tree
Showing 3 changed files with 232 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -549,6 +549,24 @@ func ResourceStorageBucket() *schema.Resource {
},
},
},
"hierarchical_namespace": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
ForceNew: true,
DiffSuppressFunc: hierachicalNamespaceDiffSuppress,
Description: `The bucket's HNS support, which defines bucket can organize folders in logical file system structure`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"enabled": {
Type: schema.TypeBool,
Required: true,
ForceNew: true,
Description: `Set this enabled flag to true when folders with logical files structure. Default value is false.`,
},
},
},
},
},
UseJSONNumber: true,
}
Expand Down Expand Up @@ -696,6 +714,10 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error
sb.SoftDeletePolicy = expandBucketSoftDeletePolicy(v.([]interface{}))
}

if v, ok := d.GetOk("hierarchical_namespace"); ok {
sb.HierarchicalNamespace = expandBucketHierachicalNamespace(v.([]interface{}))
}

var res *storage.Bucket

err = transport_tpg.Retry(transport_tpg.RetryOptions{
Expand Down Expand Up @@ -1294,6 +1316,38 @@ func flattenBucketSoftDeletePolicy(softDeletePolicy *storage.BucketSoftDeletePol
return policies
}

func expandBucketHierachicalNamespace(configured interface{}) *storage.BucketHierarchicalNamespace {
configuredHierachicalNamespace := configured.([]interface{})
if len(configuredHierachicalNamespace) == 0 {
return nil
}
configuredHierachicalNamespacePolicy := configuredHierachicalNamespace[0].(map[string]interface{})
hierachicalNamespacePolicy := &storage.BucketHierarchicalNamespace{
Enabled: (configuredHierachicalNamespacePolicy["enabled"].(bool)),
}
hierachicalNamespacePolicy.ForceSendFields = append(hierachicalNamespacePolicy.ForceSendFields, "Enabled")
return hierachicalNamespacePolicy
}

func flattenBucketHierarchicalNamespacePolicy(hierachicalNamespacePolicy *storage.BucketHierarchicalNamespace) []map[string]interface{} {
policies := make([]map[string]interface{}, 0, 1)
if hierachicalNamespacePolicy == nil {
// a null object returned from the API is equivalent to a block with enabled = false
// to handle this consistently, always write a null response as a hydrated block with false
defaultPolicy :=map[string]interface{}{
"enabled": false,
}

policies = append(policies, defaultPolicy)
return policies
}
policy := map[string]interface{}{
"enabled": hierachicalNamespacePolicy.Enabled,
}
policies = append(policies, policy)
return policies
}

func expandBucketVersioning(configured interface{}) *storage.BucketVersioning {
versionings := configured.([]interface{})
if len(versionings) == 0 {
Expand Down Expand Up @@ -1885,6 +1939,9 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res
if err := d.Set("soft_delete_policy", flattenBucketSoftDeletePolicy(res.SoftDeletePolicy)); err != nil {
return fmt.Errorf("Error setting soft_delete_policy: %s", err)
}
if err := d.Set("hierarchical_namespace", flattenBucketHierarchicalNamespacePolicy(res.HierarchicalNamespace)); err != nil {
return fmt.Errorf("Error setting hierarchical namespace: %s", err)
}
if res.IamConfiguration != nil && res.IamConfiguration.UniformBucketLevelAccess != nil {
if err := d.Set("uniform_bucket_level_access", res.IamConfiguration.UniformBucketLevelAccess.Enabled); err != nil {
return fmt.Errorf("Error setting uniform_bucket_level_access: %s", err)
Expand Down Expand Up @@ -1914,3 +1971,14 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res
d.SetId(res.Id)
return nil
}

func hierachicalNamespaceDiffSuppress(k, old, new string, r *schema.ResourceData) bool {
if k == "hierarchical_namespace.#" && old == "1" && new == "0" {
o, _ := r.GetChange("hierarchical_namespace.0.enabled")
if !o.(bool) {
return true
}
}

return false
}
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"time"

"github.com/hashicorp/terraform-plugin-testing/helper/resource"
"github.com/hashicorp/terraform-plugin-testing/plancheck"
"github.com/hashicorp/terraform-plugin-testing/terraform"

"github.com/hashicorp/terraform-provider-google/google/acctest"
Expand Down Expand Up @@ -1545,6 +1546,147 @@ func TestAccStorageBucket_SoftDeletePolicy(t *testing.T) {
})
}

// testcase to create HNS bucket and
// forcenew to recreate the bucket if HNS set to false
func TestAccStorageBucket_basic_hns(t *testing.T) {
t.Parallel()

bucketName := acctest.TestBucketName(t)
acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccStorageBucketDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccStorageBucket_basic_hns(bucketName, true),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "hierarchical_namespace.0.enabled", "true"),
),
},
{
ResourceName: "google_storage_bucket.bucket",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"force_destroy"},
},
{
Config: testAccStorageBucket_basic_hns_with_data(bucketName, false),
},
{
ResourceName: "google_storage_bucket.bucket",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"force_destroy"},
},
{
Config: testAccStorageBucket_uniformBucketAccessOnly(bucketName, true),
ConfigPlanChecks: resource.ConfigPlanChecks{
PreApply: []plancheck.PlanCheck{
plancheck.ExpectEmptyPlan(),
},
},
},
{
ResourceName: "google_storage_bucket.bucket",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"force_destroy"},
},
},
})
}

func TestAccStorageBucket_hns_force_destroy(t *testing.T) {
t.Parallel()

bucketName := acctest.TestBucketName(t)

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccStorageBucketDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccStorageBucket_basic_hns_with_data(bucketName, true),
Check: resource.ComposeTestCheckFunc(
testAccCheckStorageBucketPutFolderItem(t, bucketName),
),
},
},
})
}

func testAccCheckStorageBucketPutFolderItem(t *testing.T, bucketName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := acctest.GoogleProviderConfig(t)

data := bytes.NewBufferString("test")
dataReader := bytes.NewReader(data.Bytes())
folderName := fmt.Sprintf("tf-test/tf-test-folder-%d/", acctest.RandInt(t))
emptyfolderName := fmt.Sprintf("tf-test/tf-test-folder-%d/", acctest.RandInt(t))
object := &storage.Object{Name: folderName + "bucketDestroyTestFile"}

folder := storage.Folder{
Bucket: bucketName,
Name: folderName,
}

emptyFolder := storage.Folder{
Bucket: bucketName,
Name: emptyfolderName,
}

if res, err := config.NewStorageClient(config.UserAgent).Folders.Insert(bucketName, &folder).Recursive(true).Do(); err == nil {
log.Printf("[INFO] Created folder %v at location %v\n\n", res.Name, res.SelfLink)
} else {
return fmt.Errorf("Folders.Insert failed: %v", err)
}

// This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails
if res, err := config.NewStorageClient(config.UserAgent).Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil {
log.Printf("[INFO] Created object %v at location %v\n\n", res.Name, res.SelfLink)
} else {
return fmt.Errorf("Objects.Insert failed: %v", err)
}

if res, err := config.NewStorageClient(config.UserAgent).Folders.Insert(bucketName, &emptyFolder).Recursive(true).Do(); err == nil {
log.Printf("[INFO] Created folder %v at location %v\n\n", res.Name, res.SelfLink)
} else {
return fmt.Errorf("Folders.Insert failed: %v", err)
}

return nil
}
}

func testAccStorageBucket_basic_hns(bucketName string, enabled bool) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
location = "US"
uniform_bucket_level_access = true
hierarchical_namespace {
enabled = %t
}
}
`, bucketName, enabled)
}

func testAccStorageBucket_basic_hns_with_data(bucketName string, enabled bool) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
location = "US"
uniform_bucket_level_access = true
hierarchical_namespace {
enabled = %t
}
force_destroy= true
}
`, bucketName, enabled)
}

func testAccCheckStorageBucketExists(t *testing.T, n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,20 @@ resource "google_storage_bucket" "auto-expire" {
}
```

## Example Usage - Enabling hierarchical namespace

```hcl
resource "google_storage_bucket" "auto-expire" {
name = "hns-enabled-bucket"
location = "US"
force_destroy = true
hierarchical_namespace = {
enabled = true
}
}
```

## Argument Reference

The following arguments are supported:
Expand Down Expand Up @@ -157,6 +171,8 @@ The following arguments are supported:

* `soft_delete_policy` - (Optional, Computed) The bucket's soft delete policy, which defines the period of time that soft-deleted objects will be retained, and cannot be permanently deleted. If the block is not provided, Server side value will be kept which means removal of block won't generate any terraform change. Structure is [documented below](#nested_soft_delete_policy).

* `hierarchical_namespace` - (Optional, ForceNew) The bucket's hierarchical namespace policy, which defines the bucket capability to handle folders in logical structure. Structure is [documented below](#nested_hierarchical_namespace).

<a name="nested_lifecycle_rule"></a>The `lifecycle_rule` block supports:

* `action` - (Required) The Lifecycle Rule's action configuration. A single block of this type is supported. Structure is [documented below](#nested_action).
Expand Down Expand Up @@ -269,6 +285,12 @@ The following arguments are supported:

* `effective_time` - (Computed) Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format.

<a name="nested_hierarchical_namespace"></a>The `hierarchical_namespace` block supports:

* `enabled` - (Optional) Enable hierarchical namespace for the bucket.
To use this flag, you must also use --uniform-bucket-level-access


## Attributes Reference

In addition to the arguments listed above, the following computed attributes are
Expand Down

0 comments on commit f7d4144

Please sign in to comment.