Skip to content

Commit

Permalink
Merge pull request #38469 from hashicorp/b-fsx-aws-sdk-go-v2-migratio…
Browse files Browse the repository at this point in the history
…n-regressions

[WIP] Fix failing acceptance tests after AWS SDK for Go v2 migration
  • Loading branch information
ewbankkit authored Jul 22, 2024
2 parents 99eaa22 + 5ceace5 commit a506c01
Show file tree
Hide file tree
Showing 5 changed files with 109 additions and 100 deletions.
9 changes: 4 additions & 5 deletions internal/service/fsx/lustre_file_system.go
Original file line number Diff line number Diff line change
Expand Up @@ -334,18 +334,17 @@ func resourceLustreFileSystemStorageCapacityCustomizeDiff(_ context.Context, d *
func resourceLustreFileSystemMetadataConfigCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta any) error {
//metadata_configuration is only supported when deployment_type is persistent2
if v, ok := d.GetOk("metadata_configuration"); ok {
if len(v.([]any)) > 0 {
deploymentType := d.Get("deployment_type").(string)
if deploymentType != string(awstypes.LustreDeploymentTypePersistent2) {
return fmt.Errorf("metadata_configuration can only be set when deployment type is " + string(awstypes.LustreDeploymentTypePersistent2))
if len(v.([]interface{})) > 0 {
if deploymentType := awstypes.LustreDeploymentType(d.Get("deployment_type").(string)); deploymentType != awstypes.LustreDeploymentTypePersistent2 {
return fmt.Errorf("metadata_configuration can only be set when deployment type is %s", awstypes.LustreDeploymentTypePersistent2)
}
}
}

// we want to force a new resource if the new Iops is less than the old one
if d.HasChange("metadata_configuration") {
if v, ok := d.GetOk("metadata_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil {
if mode := d.Get("metadata_configuration.0.mode"); mode == awstypes.MetadataConfigurationModeUserProvisioned {
if mode := awstypes.MetadataConfigurationMode(d.Get("metadata_configuration.0.mode").(string)); mode == awstypes.MetadataConfigurationModeUserProvisioned {
o, n := d.GetChange("metadata_configuration")

oldV := o.([]interface{})
Expand Down
6 changes: 3 additions & 3 deletions internal/service/fsx/ontap_storage_virtual_machine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,18 +309,18 @@ func TestAccFSxONTAPStorageVirtualMachine_activeDirectoryJoin(t *testing.T) {
Steps: []resource.TestStep{
{
Config: testAccONTAPStorageVirtualMachineConfig_basic(rName),
Check: resource.ComposeTestCheckFunc(
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckONTAPStorageVirtualMachineExists(ctx, resourceName, &storageVirtualMachine1),
resource.TestCheckResourceAttr(resourceName, "active_directory_configuration.#", acctest.Ct0),
),
},
{
Config: testAccONTAPStorageVirtualMachineConfig_selfManagedActiveDirectory(rName, netBiosName, domainNetbiosName, domainName, domainPassword),
Check: resource.ComposeTestCheckFunc(
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckONTAPStorageVirtualMachineExists(ctx, resourceName, &storageVirtualMachine2),
testAccCheckONTAPStorageVirtualMachineNotRecreated(&storageVirtualMachine1, &storageVirtualMachine2),
resource.TestCheckResourceAttr(resourceName, "active_directory_configuration.#", acctest.Ct1),
resource.TestCheckResourceAttr(resourceName, "active_directory_configuration.0.netbios_name", netBiosName),
resource.TestCheckResourceAttr(resourceName, "active_directory_configuration.0.netbios_name", strings.ToUpper(netBiosName)),
resource.TestCheckResourceAttr(resourceName, "active_directory_configuration.0.self_managed_active_directory_configuration.0.domain_name", domainName),
resource.TestCheckResourceAttr(resourceName, "active_directory_configuration.0.self_managed_active_directory_configuration.0.file_system_administrators_group", "Admins"),
resource.TestCheckResourceAttr(resourceName, "active_directory_configuration.0.self_managed_active_directory_configuration.0.organizational_unit_distinguished_name", fmt.Sprintf("OU=computers,OU=%s", domainNetbiosName)),
Expand Down
97 changes: 48 additions & 49 deletions internal/service/fsx/openzfs_file_system.go
Original file line number Diff line number Diff line change
Expand Up @@ -703,72 +703,70 @@ func expandDiskIopsConfiguration(cfg []interface{}) *awstypes.DiskIopsConfigurat
return &out
}

func expandOpenZFSCreateRootVolumeConfiguration(cfg []interface{}) *awstypes.OpenZFSCreateRootVolumeConfiguration {
if len(cfg) < 1 {
func expandOpenZFSCreateRootVolumeConfiguration(tfList []interface{}) *awstypes.OpenZFSCreateRootVolumeConfiguration {
if len(tfList) < 1 {
return nil
}

conf := cfg[0].(map[string]interface{})

out := awstypes.OpenZFSCreateRootVolumeConfiguration{}
tfMap := tfList[0].(map[string]interface{})
apiObject := &awstypes.OpenZFSCreateRootVolumeConfiguration{}

if v, ok := conf["copy_tags_to_snapshots"].(bool); ok {
out.CopyTagsToSnapshots = aws.Bool(v)
if v, ok := tfMap["copy_tags_to_snapshots"].(bool); ok {
apiObject.CopyTagsToSnapshots = aws.Bool(v)
}

if v, ok := conf["data_compression_type"].(string); ok {
out.DataCompressionType = awstypes.OpenZFSDataCompressionType(v)
if v, ok := tfMap["data_compression_type"].(string); ok {
apiObject.DataCompressionType = awstypes.OpenZFSDataCompressionType(v)
}

if v, ok := conf["read_only"].(bool); ok {
out.ReadOnly = aws.Bool(v)
if v, ok := tfMap["nfs_exports"].([]interface{}); ok {
apiObject.NfsExports = expandOpenZFSNfsExports(v)
}

if v, ok := conf["record_size_kib"].(int); ok {
out.RecordSizeKiB = aws.Int32(int32(v))
if v, ok := tfMap["read_only"].(bool); ok {
apiObject.ReadOnly = aws.Bool(v)
}

if v, ok := conf["user_and_group_quotas"]; ok {
out.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List())
if v, ok := tfMap["record_size_kib"].(int); ok {
apiObject.RecordSizeKiB = aws.Int32(int32(v))
}

if v, ok := conf["nfs_exports"].([]interface{}); ok {
out.NfsExports = expandOpenZFSNfsExports(v)
if v, ok := tfMap["user_and_group_quotas"]; ok {
apiObject.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List())
}

return &out
return apiObject
}

func expandUpdateOpenZFSVolumeConfiguration(cfg []interface{}) *awstypes.UpdateOpenZFSVolumeConfiguration {
if len(cfg) < 1 {
func expandUpdateOpenZFSVolumeConfiguration(tfList []interface{}) *awstypes.UpdateOpenZFSVolumeConfiguration {
if len(tfList) < 1 {
return nil
}

conf := cfg[0].(map[string]interface{})

out := awstypes.UpdateOpenZFSVolumeConfiguration{}
tfMap := tfList[0].(map[string]interface{})
apiObject := &awstypes.UpdateOpenZFSVolumeConfiguration{}

if v, ok := conf["data_compression_type"].(string); ok {
out.DataCompressionType = awstypes.OpenZFSDataCompressionType(v)
if v, ok := tfMap["data_compression_type"].(string); ok {
apiObject.DataCompressionType = awstypes.OpenZFSDataCompressionType(v)
}

if v, ok := conf["read_only"].(bool); ok {
out.ReadOnly = aws.Bool(v)
if v, ok := tfMap["nfs_exports"].([]interface{}); ok {
apiObject.NfsExports = expandOpenZFSNfsExports(v)
}

if v, ok := conf["record_size_kib"].(int); ok {
out.RecordSizeKiB = aws.Int32(int32(v))
if v, ok := tfMap["read_only"].(bool); ok {
apiObject.ReadOnly = aws.Bool(v)
}

if v, ok := conf["user_and_group_quotas"]; ok {
out.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List())
if v, ok := tfMap["record_size_kib"].(int); ok {
apiObject.RecordSizeKiB = aws.Int32(int32(v))
}

if v, ok := conf["nfs_exports"].([]interface{}); ok {
out.NfsExports = expandOpenZFSNfsExports(v)
if v, ok := tfMap["user_and_group_quotas"]; ok {
apiObject.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List())
}

return &out
return apiObject
}

func flattenDiskIopsConfiguration(rs *awstypes.DiskIopsConfiguration) []interface{} {
Expand All @@ -785,30 +783,31 @@ func flattenDiskIopsConfiguration(rs *awstypes.DiskIopsConfiguration) []interfac
return []interface{}{m}
}

func flattenOpenZFSFileSystemRootVolume(rs *awstypes.Volume) []interface{} {
if rs == nil {
func flattenOpenZFSFileSystemRootVolume(apiObject *awstypes.Volume) []interface{} {
if apiObject == nil {
return []interface{}{}
}

m := make(map[string]interface{})
if rs.OpenZFSConfiguration.CopyTagsToSnapshots != nil {
m["copy_tags_to_snapshots"] = aws.ToBool(rs.OpenZFSConfiguration.CopyTagsToSnapshots)
tfMap := make(map[string]interface{})

if apiObject.OpenZFSConfiguration.CopyTagsToSnapshots != nil {
tfMap["copy_tags_to_snapshots"] = aws.ToBool(apiObject.OpenZFSConfiguration.CopyTagsToSnapshots)
}
m["data_compression_type"] = string(rs.OpenZFSConfiguration.DataCompressionType)
if rs.OpenZFSConfiguration.NfsExports != nil {
m["nfs_exports"] = flattenOpenZFSNfsExports(rs.OpenZFSConfiguration.NfsExports)
tfMap["data_compression_type"] = string(apiObject.OpenZFSConfiguration.DataCompressionType)
if apiObject.OpenZFSConfiguration.NfsExports != nil {
tfMap["nfs_exports"] = flattenOpenZFSNfsExports(apiObject.OpenZFSConfiguration.NfsExports)
}
if rs.OpenZFSConfiguration.ReadOnly != nil {
m["read_only"] = aws.ToBool(rs.OpenZFSConfiguration.ReadOnly)
if apiObject.OpenZFSConfiguration.ReadOnly != nil {
tfMap["read_only"] = aws.ToBool(apiObject.OpenZFSConfiguration.ReadOnly)
}
if rs.OpenZFSConfiguration.RecordSizeKiB != nil {
m["record_size_kib"] = aws.ToInt32(rs.OpenZFSConfiguration.RecordSizeKiB)
if apiObject.OpenZFSConfiguration.RecordSizeKiB != nil {
tfMap["record_size_kib"] = aws.ToInt32(apiObject.OpenZFSConfiguration.RecordSizeKiB)
}
if rs.OpenZFSConfiguration.UserAndGroupQuotas != nil {
m["user_and_group_quotas"] = flattenOpenZFSUserOrGroupQuotas(rs.OpenZFSConfiguration.UserAndGroupQuotas)
if apiObject.OpenZFSConfiguration.UserAndGroupQuotas != nil {
tfMap["user_and_group_quotas"] = flattenOpenZFSUserOrGroupQuotas(apiObject.OpenZFSConfiguration.UserAndGroupQuotas)
}

return []interface{}{m}
return []interface{}{tfMap}
}

func findOpenZFSFileSystemByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.FileSystem, error) {
Expand Down
2 changes: 2 additions & 0 deletions internal/service/fsx/openzfs_file_system_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ func init() {
func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc {
return acctest.ErrorCheckSkipMessagesContaining(t,
"Amazon FSx does not currently support OpenZFS file system creation in the following Availability Zones",
// "ServiceLimitExceeded: Account 123456789012 can have at most 10240 MB/s of throughput capacity total across file systems"
"throughput capacity total across file systems",
)
}

Expand Down
95 changes: 52 additions & 43 deletions internal/service/fsx/openzfs_volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -453,54 +453,58 @@ func expandOpenZFSUserOrGroupQuota(conf map[string]interface{}) *awstypes.OpenZF
return &out
}

func expandOpenZFSNfsExports(cfg []interface{}) []awstypes.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name
exports := []awstypes.OpenZFSNfsExport{}
func expandOpenZFSNfsExports(tfList []interface{}) []awstypes.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name
apiObjects := []awstypes.OpenZFSNfsExport{}

for _, export := range cfg {
expandedExport := expandOpenZFSNfsExport(export.(map[string]interface{}))
if expandedExport != nil {
exports = append(exports, *expandedExport)
for _, tfMapRaw := range tfList {
tfMap, ok := tfMapRaw.(map[string]interface{})
if !ok {
continue
}

apiObjects = append(apiObjects, expandOpenZFSNfsExport(tfMap))
}

return exports
return apiObjects
}

func expandOpenZFSNfsExport(cfg map[string]interface{}) *awstypes.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name
out := awstypes.OpenZFSNfsExport{}
func expandOpenZFSNfsExport(tfMap map[string]interface{}) awstypes.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name
apiObject := awstypes.OpenZFSNfsExport{}

if v, ok := cfg["client_configurations"]; ok {
out.ClientConfigurations = expandOpenZFSClientConfigurations(v.(*schema.Set).List())
if v, ok := tfMap["client_configurations"]; ok {
apiObject.ClientConfigurations = expandOpenZFSClientConfigurations(v.(*schema.Set).List())
}

return &out
return apiObject
}

func expandOpenZFSClientConfigurations(cfg []interface{}) []awstypes.OpenZFSClientConfiguration {
configurations := []awstypes.OpenZFSClientConfiguration{}
func expandOpenZFSClientConfigurations(tfList []interface{}) []awstypes.OpenZFSClientConfiguration {
apiObjects := []awstypes.OpenZFSClientConfiguration{}

for _, configuration := range cfg {
expandedConfiguration := expandOpenZFSClientConfiguration(configuration.(map[string]interface{}))
if expandedConfiguration != nil {
configurations = append(configurations, *expandedConfiguration)
for _, tfMapRaw := range tfList {
tfMap, ok := tfMapRaw.(map[string]interface{})
if !ok {
continue
}

apiObjects = append(apiObjects, expandOpenZFSClientConfiguration(tfMap))
}

return configurations
return apiObjects
}

func expandOpenZFSClientConfiguration(conf map[string]interface{}) *awstypes.OpenZFSClientConfiguration {
out := awstypes.OpenZFSClientConfiguration{}
func expandOpenZFSClientConfiguration(tfMap map[string]interface{}) awstypes.OpenZFSClientConfiguration {
apiObject := awstypes.OpenZFSClientConfiguration{}

if v, ok := conf["clients"].(string); ok && len(v) > 0 {
out.Clients = aws.String(v)
if v, ok := tfMap["clients"].(string); ok && len(v) > 0 {
apiObject.Clients = aws.String(v)
}

if v, ok := conf["options"].([]interface{}); ok {
out.Options = flex.ExpandStringValueList(v)
if v, ok := tfMap["options"].([]interface{}); ok {
apiObject.Options = flex.ExpandStringValueList(v)
}

return &out
return apiObject
}

func expandCreateOpenZFSOriginSnapshotConfiguration(cfg []interface{}) *awstypes.CreateOpenZFSOriginSnapshotConfiguration {
Expand All @@ -523,34 +527,39 @@ func expandCreateOpenZFSOriginSnapshotConfiguration(cfg []interface{}) *awstypes
return &out
}

func flattenOpenZFSNfsExports(rs []awstypes.OpenZFSNfsExport) []map[string]interface{} { // nosemgrep:ci.caps4-in-func-name
exports := make([]map[string]interface{}, 0)
func flattenOpenZFSNfsExports(apiObjects []awstypes.OpenZFSNfsExport) []interface{} { // nosemgrep:ci.caps4-in-func-name
tfList := make([]interface{}, 0)

for _, export := range rs {
cfg := make(map[string]interface{})
cfg["client_configurations"] = flattenOpenZFSClientConfigurations(export.ClientConfigurations)
exports = append(exports, cfg)
for _, apiObject := range apiObjects {
// The API may return '"NfsExports":[null]'.
if len(apiObject.ClientConfigurations) == 0 {
continue
}

tfMap := make(map[string]interface{})
tfMap["client_configurations"] = flattenOpenZFSClientConfigurations(apiObject.ClientConfigurations)
tfList = append(tfList, tfMap)
}

if len(exports) > 0 {
return exports
if len(tfList) > 0 {
return tfList
}

return nil
}

func flattenOpenZFSClientConfigurations(rs []awstypes.OpenZFSClientConfiguration) []map[string]interface{} {
configurations := make([]map[string]interface{}, 0)
func flattenOpenZFSClientConfigurations(apiObjects []awstypes.OpenZFSClientConfiguration) []interface{} {
tfList := make([]interface{}, 0)

for _, configuration := range rs {
cfg := make(map[string]interface{})
cfg["clients"] = aws.ToString(configuration.Clients)
cfg["options"] = flex.FlattenStringValueList(configuration.Options)
configurations = append(configurations, cfg)
for _, apiObject := range apiObjects {
tfMap := make(map[string]interface{})
tfMap["clients"] = aws.ToString(apiObject.Clients)
tfMap["options"] = apiObject.Options
tfList = append(tfList, tfMap)
}

if len(configurations) > 0 {
return configurations
if len(tfList) > 0 {
return tfList
}

return nil
Expand Down

0 comments on commit a506c01

Please sign in to comment.