Skip to content

Commit

Permalink
Merge pull request #36486 from kavan-aws/f-finspace_kx_dataview-dbmaint
Browse files Browse the repository at this point in the history
Add support for database maintenance for FinSpace Kx Dataviews
  • Loading branch information
jar-b authored Mar 21, 2024
2 parents aa290d5 + d616f8d commit 24470c1
Show file tree
Hide file tree
Showing 4 changed files with 159 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .changelog/36486.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/aws_finspace_kx_dataview: Add `read_write` and `segment_configuration.on_demand` arguments
```
21 changes: 21 additions & 0 deletions internal/service/finspace/kx_dataview.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,12 @@ func ResourceKxDataview() *schema.Resource {
},
Required: true,
},
"on_demand": {
Type: schema.TypeBool,
Default: false,
ForceNew: true,
Optional: true,
},
},
},
Optional: true,
Expand All @@ -126,6 +132,12 @@ func ResourceKxDataview() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"read_write": {
Type: schema.TypeBool,
Default: false,
ForceNew: true,
Optional: true,
},
names.AttrTags: tftags.TagsSchema(),
names.AttrTagsAll: tftags.TagsSchemaComputed(),
},
Expand Down Expand Up @@ -183,6 +195,10 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta
in.SegmentConfigurations = expandSegmentConfigurations(v.([]interface{}))
}

if v, ok := d.GetOk("read_write"); ok {
in.ReadWrite = v.(bool)
}

out, err := conn.CreateKxDataview(ctx, in)
if err != nil {
return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), err)
Expand Down Expand Up @@ -223,6 +239,7 @@ func resourceKxDataviewRead(ctx context.Context, d *schema.ResourceData, meta in
d.Set("database_name", out.DatabaseName)
d.Set("environment_id", out.EnvironmentId)
d.Set("az_mode", out.AzMode)
d.Set("read_write", out.ReadWrite)
if err := d.Set("segment_configurations", flattenSegmentConfigurations(out.SegmentConfigurations)); err != nil {
return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err)
}
Expand Down Expand Up @@ -414,6 +431,7 @@ func expandSegmentConfigurations(tfList []interface{}) []types.KxDataviewSegment
s = append(s, types.KxDataviewSegmentConfiguration{
VolumeName: aws.String(m["volume_name"].(string)),
DbPaths: expandDBPath(m["db_paths"].([]interface{})),
OnDemand: (m["on_demand"]).(bool),
})
}

Expand All @@ -430,6 +448,9 @@ func flattenSegmentConfiguration(apiObject *types.KxDataviewSegmentConfiguration
if v := apiObject.DbPaths; v != nil {
m["db_paths"] = v
}
if v := apiObject.OnDemand; v {
m["on_demand"] = v
}
return m
}

Expand Down
130 changes: 130 additions & 0 deletions internal/service/finspace/kx_dataview_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,66 @@ func TestAccFinSpaceKxDataview_disappears(t *testing.T) {
})
}

func TestAccFinSpaceKxDataview_readWrite(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
}

ctx := acctest.Context(t)
var dataview finspace.GetKxDataviewOutput
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
resourceName := "aws_finspace_kx_dataview.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() {
acctest.PreCheck(ctx, t)
acctest.PreCheckPartitionHasService(t, finspace.ServiceID)
},
ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckKxDataviewDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccKxDataviewConfig_readWrite(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckKxDataviewExists(ctx, resourceName, &dataview),
resource.TestCheckResourceAttr(resourceName, "name", rName),
),
},
},
})
}

func TestAccFinSpaceKxDataview_onDemand(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
}

ctx := acctest.Context(t)
var dataview finspace.GetKxDataviewOutput
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
resourceName := "aws_finspace_kx_dataview.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() {
acctest.PreCheck(ctx, t)
acctest.PreCheckPartitionHasService(t, finspace.ServiceID)
},
ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckKxDataviewDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccKxDataviewConfig_onDemand(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckKxDataviewExists(ctx, resourceName, &dataview),
resource.TestCheckResourceAttr(resourceName, "name", rName),
),
},
},
})
}

func TestAccFinSpaceKxDataview_tags(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
Expand Down Expand Up @@ -116,6 +176,7 @@ func TestAccFinSpaceKxDataview_tags(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"),
),
},

{
ResourceName: resourceName,
ImportState: true,
Expand Down Expand Up @@ -257,6 +318,75 @@ resource "aws_finspace_kx_dataview" "test" {
`, rName))
}

func testAccKxDataviewConfig_readWrite(rName string) string {
return acctest.ConfigCompose(
testAccKxDataviewConfigBase(rName),
fmt.Sprintf(`
resource "aws_finspace_kx_volume" "test" {
name = %[1]q
environment_id = aws_finspace_kx_environment.test.id
availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]]
az_mode = "SINGLE"
type = "NAS_1"
nas1_configuration {
size = 1200
type = "SSD_250"
}
}
resource "aws_finspace_kx_dataview" "test" {
name = %[1]q
environment_id = aws_finspace_kx_environment.test.id
database_name = aws_finspace_kx_database.test.name
auto_update = false
az_mode = "SINGLE"
availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0]
read_write = true
segment_configurations {
db_paths = ["/*"]
volume_name = aws_finspace_kx_volume.test.name
on_demand = false
}
}
`, rName))
}

func testAccKxDataviewConfig_onDemand(rName string) string {
return acctest.ConfigCompose(
testAccKxDataviewConfigBase(rName),
fmt.Sprintf(`
resource "aws_finspace_kx_volume" "test" {
name = %[1]q
environment_id = aws_finspace_kx_environment.test.id
availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]]
az_mode = "SINGLE"
type = "NAS_1"
nas1_configuration {
size = 1200
type = "SSD_250"
}
}
resource "aws_finspace_kx_dataview" "test" {
name = %[1]q
environment_id = aws_finspace_kx_environment.test.id
database_name = aws_finspace_kx_database.test.name
auto_update = false
az_mode = "SINGLE"
availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0]
segment_configurations {
db_paths = ["/*"]
volume_name = aws_finspace_kx_volume.test.name
on_demand = true
}
}
`, rName))
}

func testAccKxDataviewConfig_tags1(rName, key1, value1 string) string {
return acctest.ConfigCompose(
testAccKxDataviewConfigBase(rName),
Expand Down
5 changes: 5 additions & 0 deletions website/docs/r/finspace_kx_dataview.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,18 @@ The following arguments are optional:
* `availability_zone_id` - (Optional) The identifier of the availability zones. If attaching a volume, the volume must be in the same availability zone as the dataview that you are attaching to.
* `changeset_id` - (Optional) A unique identifier of the changeset of the database that you want to use to ingest data.
* `description` - (Optional) A description for the dataview.
* `read_write` - (Optional) The option to specify whether you want to make the dataview writable to perform database maintenance. The following are some considerations related to writable dataviews.
* You cannot create partial writable dataviews. When you create writeable dataviews you must provide the entire database path. You cannot perform updates on a writeable dataview. Hence, `auto_update` must be set as `false` if `read_write` is `true` for a dataview.
* You must also use a unique volume for creating a writeable dataview. So, if you choose a volume that is already in use by another dataview, the dataview creation fails.
* Once you create a dataview as writeable, you cannot change it to read-only. So, you cannot update the `read_write` parameter later.
* `segment_configurations` - (Optional) The configuration that contains the database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. If you do not explicitly specify any database path for a volume, they are accessible from the cluster through the default S3/object store segment. See [segment_configurations](#segment_configurations-argument-reference) below.
* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.

### `segment_configurations` Argument Reference

* `db_paths` - (Required) The database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume.
* `volume_name` - (Required) The name of the volume that you want to attach to a dataview. This volume must be in the same availability zone as the dataview that you are attaching to.
* `on_demand` - (Optional) Enables on-demand caching on the selected database path when a particular file or a column of the database is accessed. When on demand caching is **True**, dataviews perform minimal loading of files on the filesystem as needed. When it is set to **False**, everything is cached. The default value is **False**.

## Attribute Reference

Expand Down

0 comments on commit 24470c1

Please sign in to comment.