Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurerm_storage_share - Fix a bug that using storage_account_id accidentally leaks to the wrong branch for creation #28063

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion internal/services/storage/storage_share_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,11 @@ func dataSourceStorageShareRead(d *pluginsdk.ResourceData, meta interface{}) err
if model := share.Model; model != nil {
if props := model.Properties; props != nil {
d.Set("quota", props.ShareQuota)
d.Set("acl", flattenStorageShareACLs(pointer.From(props.SignedIdentifiers)))
acl, err := flattenStorageShareACLs(pointer.From(props.SignedIdentifiers))
if err != nil {
return err
}
d.Set("acl", acl)
d.Set("metadata", FlattenMetaData(pointer.From(props.Metadata)))
}
}
Expand Down
177 changes: 105 additions & 72 deletions internal/services/storage/storage_share_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ func resourceStorageShare() *pluginsdk.Resource {
r.Schema["storage_account_name"] = &pluginsdk.Schema{
Type: pluginsdk.TypeString,
Optional: true,
Computed: true,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should not be setting this value if storage_account_id is set

Suggested change
Computed: true,

ForceNew: true,
ExactlyOneOf: []string{
"storage_account_name",
Expand All @@ -171,6 +172,7 @@ func resourceStorageShare() *pluginsdk.Resource {
r.Schema["storage_account_id"] = &pluginsdk.Schema{
Type: pluginsdk.TypeString,
Optional: true,
Computed: true,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should not be setting this value if storage_account_id is set.

Suggested change
Computed: true,

ForceNew: true,
ExactlyOneOf: []string{
"storage_account_name",
Expand All @@ -196,83 +198,91 @@ func resourceStorageShareCreate(d *pluginsdk.ResourceData, meta interface{}) err

if !features.FivePointOhBeta() {
storageClient := meta.(*clients.Client).Storage
if accountName := d.Get("storage_account_name").(string); accountName != "" {
shareName := d.Get("name").(string)
quota := d.Get("quota").(int)
metaDataRaw := d.Get("metadata").(map[string]interface{})
metaData := ExpandMetaData(metaDataRaw)
Comment on lines -199 to -203
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Changing this means that we will use the Data Plane client even in the cases that storage_account_id is specified in place of storage_account_name, which we specifically need to avoid for this change to separate the the two APIs. If storage_account_id is specified, we must use exclusively the Resource Manager API.


account, err := storageClient.FindAccount(ctx, subscriptionId, accountName)
accountName := d.Get("storage_account_name").(string)
if accountName == "" {
accountId, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string))
if err != nil {
return fmt.Errorf("retrieving Account %q for Share %q: %v", accountName, shareName, err)
}
if account == nil {
return fmt.Errorf("locating Storage Account %q", accountName)
return err
}
accountName = accountId.StorageAccountName
}

// Determine the file endpoint, so we can build a data plane ID
endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeFile)
if err != nil {
return fmt.Errorf("determining File endpoint: %v", err)
}
shareName := d.Get("name").(string)
quota := d.Get("quota").(int)
metaDataRaw := d.Get("metadata").(map[string]interface{})
metaData := ExpandMetaData(metaDataRaw)

// Parse the file endpoint as a data plane account ID
accountId, err := accounts.ParseAccountID(*endpoint, storageClient.StorageDomainSuffix)
if err != nil {
return fmt.Errorf("parsing Account ID: %v", err)
}
account, err := storageClient.FindAccount(ctx, subscriptionId, accountName)
if err != nil {
return fmt.Errorf("retrieving Account %q for Share %q: %v", accountName, shareName, err)
}
if account == nil {
return fmt.Errorf("locating Storage Account %q", accountName)
}

id := shares.NewShareID(*accountId, shareName)
// Determine the file endpoint, so we can build a data plane ID
endpoint, err := account.DataPlaneEndpoint(client.EndpointTypeFile)
if err != nil {
return fmt.Errorf("determining File endpoint: %v", err)
}

protocol := shares.ShareProtocol(d.Get("enabled_protocol").(string))
if protocol == shares.NFS {
// Only FileStorage (whose sku tier is Premium only) storage account is able to have NFS file shares.
// See: https://learn.microsoft.com/en-us/azure/storage/files/storage-files-quick-create-use-linux#applies-to
if account.Kind != storageaccounts.KindFileStorage {
return fmt.Errorf("NFS File Share is only supported for Storage Account with kind %q but got `%s`", string(storageaccounts.KindFileStorage), account.Kind)
}
}
// Parse the file endpoint as a data plane account ID
accountId, err := accounts.ParseAccountID(*endpoint, storageClient.StorageDomainSuffix)
if err != nil {
return fmt.Errorf("parsing Account ID: %v", err)
}

// The files API does not support bearer tokens (@manicminer, 2024-02-15)
fileSharesDataPlaneClient, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth())
if err != nil {
return fmt.Errorf("building File Share Client: %v", err)
}
id := shares.NewShareID(*accountId, shareName)

exists, err := fileSharesDataPlaneClient.Exists(ctx, shareName)
if err != nil {
return fmt.Errorf("checking for existing %s: %v", id, err)
}
if exists != nil && *exists {
return tf.ImportAsExistsError("azurerm_storage_share", id.ID())
protocol := shares.ShareProtocol(d.Get("enabled_protocol").(string))
if protocol == shares.NFS {
// Only FileStorage (whose sku tier is Premium only) storage account is able to have NFS file shares.
// See: https://learn.microsoft.com/en-us/azure/storage/files/storage-files-quick-create-use-linux#applies-to
if account.Kind != storageaccounts.KindFileStorage {
return fmt.Errorf("NFS File Share is only supported for Storage Account with kind %q but got `%s`", string(storageaccounts.KindFileStorage), account.Kind)
}
}

log.Printf("[INFO] Creating Share %q in Storage Account %q", shareName, accountName)
input := shares.CreateInput{
QuotaInGB: quota,
MetaData: metaData,
EnabledProtocol: protocol,
}
// The files API does not support bearer tokens (@manicminer, 2024-02-15)
fileSharesDataPlaneClient, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth())
if err != nil {
return fmt.Errorf("building File Share Client: %v", err)
}

if accessTier := d.Get("access_tier").(string); accessTier != "" {
tier := shares.AccessTier(accessTier)
input.AccessTier = &tier
}
exists, err := fileSharesDataPlaneClient.Exists(ctx, shareName)
if err != nil {
return fmt.Errorf("checking for existing %s: %v", id, err)
}
if exists != nil && *exists {
return tf.ImportAsExistsError("azurerm_storage_share", id.ID())
}

if err = fileSharesDataPlaneClient.Create(ctx, shareName, input); err != nil {
return fmt.Errorf("creating %s: %v", id, err)
}
log.Printf("[INFO] Creating Share %q in Storage Account %q", shareName, accountName)
input := shares.CreateInput{
QuotaInGB: quota,
MetaData: metaData,
EnabledProtocol: protocol,
}

d.SetId(id.ID())
if accessTier := d.Get("access_tier").(string); accessTier != "" {
tier := shares.AccessTier(accessTier)
input.AccessTier = &tier
}

aclsRaw := d.Get("acl").(*pluginsdk.Set).List()
acls := expandStorageShareACLsDeprecated(aclsRaw)
if err = fileSharesDataPlaneClient.UpdateACLs(ctx, shareName, shares.SetAclInput{SignedIdentifiers: acls}); err != nil {
return fmt.Errorf("setting ACLs for %s: %v", id, err)
}
if err = fileSharesDataPlaneClient.Create(ctx, shareName, input); err != nil {
return fmt.Errorf("creating %s: %v", id, err)
}

return resourceStorageShareRead(d, meta)
d.SetId(id.ID())

aclsRaw := d.Get("acl").(*pluginsdk.Set).List()
acls := expandStorageShareACLsDeprecated(aclsRaw)
if err = fileSharesDataPlaneClient.UpdateACLs(ctx, shareName, shares.SetAclInput{SignedIdentifiers: acls}); err != nil {
return fmt.Errorf("setting ACLs for %s: %v", id, err)
}

return resourceStorageShareRead(d, meta)
}

accountId, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string))
Expand Down Expand Up @@ -323,7 +333,7 @@ func resourceStorageShareRead(d *pluginsdk.ResourceData, meta interface{}) error
ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d)
defer cancel()

if !features.FivePointOhBeta() && !strings.HasPrefix(d.Id(), "/subscriptions/") {
if !features.FivePointOhBeta() {
Comment on lines -326 to +336
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As above, this must be evaluated to ensure that cases where the ID is the resource manager ID, we exclusively use the Resource Manager API.

storageClient := meta.(*clients.Client).Storage
id, err := shares.ParseShareID(d.Id(), storageClient.StorageDomainSuffix)
if err != nil {
Expand Down Expand Up @@ -357,7 +367,11 @@ func resourceStorageShareRead(d *pluginsdk.ResourceData, meta interface{}) error
}

d.Set("name", id.ShareName)

// Setting both
d.Set("storage_account_id", account.StorageAccountId.ID())
d.Set("storage_account_name", id.AccountId.AccountName)

d.Set("quota", props.QuotaGB)
d.Set("url", id.ID())
d.Set("enabled_protocol", string(props.EnabledProtocol))
Expand Down Expand Up @@ -410,15 +424,16 @@ func resourceStorageShareRead(d *pluginsdk.ResourceData, meta interface{}) error
}
d.Set("enabled_protocol", string(enabledProtocols))
d.Set("access_tier", string(pointer.From(props.AccessTier)))
d.Set("acl", flattenStorageShareACLs(pointer.From(props.SignedIdentifiers)))

acl, err := flattenStorageShareACLs(pointer.From(props.SignedIdentifiers))
if err != nil {
return err
}
d.Set("acl", acl)
d.Set("metadata", FlattenMetaData(pointer.From(props.Metadata)))
}
}

if !features.FivePointOhBeta() {
d.Set("resource_manager_id", id.ID())
}

// TODO - The following section for `url` will need to be updated to go-azure-sdk when the Giovanni Deprecation process has been completed
account, err := meta.(*clients.Client).Storage.FindAccount(ctx, subscriptionId, id.StorageAccountName)
if err != nil {
Expand Down Expand Up @@ -452,7 +467,7 @@ func resourceStorageShareUpdate(d *pluginsdk.ResourceData, meta interface{}) err
ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d)
defer cancel()

if !features.FivePointOhBeta() && !strings.HasPrefix(d.Id(), "/subscriptions/") {
if !features.FivePointOhBeta() {
id, err := shares.ParseShareID(d.Id(), storageClient.StorageDomainSuffix)
if err != nil {
return err
Expand Down Expand Up @@ -575,7 +590,7 @@ func resourceStorageShareDelete(d *pluginsdk.ResourceData, meta interface{}) err
ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d)
defer cancel()

if !features.FivePointOhBeta() && !strings.HasPrefix(d.Id(), "/subscriptions/") {
if !features.FivePointOhBeta() {
storageClient := meta.(*clients.Client).Storage
id, err := shares.ParseShareID(d.Id(), storageClient.StorageDomainSuffix)
if err != nil {
Expand Down Expand Up @@ -687,16 +702,34 @@ func expandStorageShareACLs(input []interface{}) *[]fileshares.SignedIdentifier
return pointer.To(results)
}

func flattenStorageShareACLs(input []fileshares.SignedIdentifier) []interface{} {
func flattenStorageShareACLs(input []fileshares.SignedIdentifier) ([]interface{}, error) {
result := make([]interface{}, 0)

for _, v := range input {
var (
start string
expiry string
)
if vv := v.AccessPolicy.StartTime; vv != nil {
t, err := time.Parse("2006-01-02T15:04:05Z", *vv)
if err != nil {
return nil, fmt.Errorf("parsing start time %q: %v", *vv, err)
}
start = t.Format("2006-01-02T15:04:05.0000000Z")
}
if vv := v.AccessPolicy.ExpiryTime; vv != nil {
t, err := time.Parse("2006-01-02T15:04:05Z", *vv)
if err != nil {
return nil, fmt.Errorf("parsing expiry time %q: %v", *vv, err)
}
expiry = t.Format("2006-01-02T15:04:05.0000000Z")
}
output := map[string]interface{}{
"id": v.Id,
"access_policy": []interface{}{
map[string]interface{}{
"start": v.AccessPolicy.StartTime,
"expiry": v.AccessPolicy.ExpiryTime,
"start": start,
"expiry": expiry,
"permissions": v.AccessPolicy.Permission,
},
},
Expand All @@ -705,5 +738,5 @@ func flattenStorageShareACLs(input []fileshares.SignedIdentifier) []interface{}
result = append(result, output)
}

return result
return result, nil
}
16 changes: 8 additions & 8 deletions internal/services/storage/storage_share_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -660,8 +660,8 @@ resource "azurerm_storage_share" "test" {

access_policy {
permissions = "rwd"
start = "2019-07-02T09:38:21Z"
expiry = "2019-07-02T10:38:21Z"
start = "2019-07-02T09:38:21.0000000Z"
expiry = "2019-07-02T10:38:21.0000000Z"
}
}

Expand Down Expand Up @@ -781,8 +781,8 @@ resource "azurerm_storage_share" "test" {

access_policy {
permissions = "rwd"
start = "2019-07-02T09:38:21Z"
expiry = "2019-07-02T10:38:21Z"
start = "2019-07-02T09:38:21.0000000Z"
expiry = "2019-07-02T10:38:21.0000000Z"
}
}
}
Expand Down Expand Up @@ -876,17 +876,17 @@ resource "azurerm_storage_share" "test" {

access_policy {
permissions = "rwd"
start = "2019-07-02T09:38:21Z"
expiry = "2019-07-02T10:38:21Z"
start = "2019-07-02T09:38:21.0000000Z"
expiry = "2019-07-02T10:38:21.0000000Z"
}
}
acl {
id = "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI"

access_policy {
permissions = "rwd"
start = "2019-07-02T09:38:21Z"
expiry = "2019-07-02T10:38:21Z"
start = "2019-07-02T09:38:21.0000000Z"
expiry = "2019-07-02T10:38:21.0000000Z"
}
}
}
Expand Down
Loading