Skip to content

Commit

Permalink
provider/aws: Enable Redshift Cluster Logging (#7813)
Browse files Browse the repository at this point in the history
Fixes #7423

```
% make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRedshiftCluster_loggingEnabled'
==> Checking that code complies with gofmt requirements...
go generate $(go list ./... | grep -v /terraform/vendor/)
TF_ACC=1 go test ./builtin/providers/aws -v
-run=TestAccAWSRedshiftCluster_loggingEnabled -timeout 120m
=== RUN   TestAccAWSRedshiftCluster_loggingEnabled
--- PASS: TestAccAWSRedshiftCluster_loggingEnabled (675.21s)
PASS
ok      github.com/hashicorp/terraform/builtin/providers/aws    675.233s
```
  • Loading branch information
stack72 authored Jul 27, 2016
1 parent 4cdebd7 commit 3f83f0b
Show file tree
Hide file tree
Showing 3 changed files with 183 additions and 0 deletions.
86 changes: 86 additions & 0 deletions builtin/providers/aws/resource_aws_redshift_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,24 @@ func resourceAwsRedshiftCluster() *schema.Resource {
Set: schema.HashString,
},

"enable_logging": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},

"bucket_name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},

"s3_key_prefix": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},

"tags": tagsSchema(),
},
}
Expand Down Expand Up @@ -310,6 +328,16 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{})
return fmt.Errorf("[WARN] Error waiting for Redshift Cluster state to be \"available\": %s", err)
}

if _, ok := d.GetOk("enable_logging"); ok {

loggingErr := enableRedshiftClusterLogging(d, conn)
if loggingErr != nil {
log.Printf("[ERROR] Error Enabling Logging on Redshift Cluster: %s", err)
return loggingErr
}

}

return resourceAwsRedshiftClusterRead(d, meta)
}

Expand Down Expand Up @@ -346,6 +374,15 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er
return nil
}

log.Printf("[INFO] Reading Redshift Cluster Logging Status: %s", d.Id())
loggingStatus, loggingErr := conn.DescribeLoggingStatus(&redshift.DescribeLoggingStatusInput{
ClusterIdentifier: aws.String(d.Id()),
})

if loggingErr != nil {
return loggingErr
}

d.Set("master_username", rsc.MasterUsername)
d.Set("node_type", rsc.NodeType)
d.Set("allow_version_upgrade", rsc.AllowVersionUpgrade)
Expand Down Expand Up @@ -404,6 +441,10 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er
d.Set("cluster_revision_number", rsc.ClusterRevisionNumber)
d.Set("tags", tagsToMapRedshift(rsc.Tags))

d.Set("bucket_name", loggingStatus.BucketName)
d.Set("enable_logging", loggingStatus.LoggingEnabled)
d.Set("s3_key_prefix", loggingStatus.S3KeyPrefix)

return nil
}

Expand Down Expand Up @@ -553,11 +594,56 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{})
}
}

if d.HasChange("enable_logging") || d.HasChange("bucket_name") || d.HasChange("s3_key_prefix") {
var loggingErr error
if _, ok := d.GetOk("enable_logging"); ok {

log.Printf("[INFO] Enabling Logging for Redshift Cluster %q", d.Id())
loggingErr = enableRedshiftClusterLogging(d, conn)
if loggingErr != nil {
return loggingErr
}
} else {

log.Printf("[INFO] Disabling Logging for Redshift Cluster %q", d.Id())
_, loggingErr = conn.DisableLogging(&redshift.DisableLoggingInput{
ClusterIdentifier: aws.String(d.Id()),
})
if loggingErr != nil {
return loggingErr
}
}

d.SetPartial("enable_logging")
}

d.Partial(false)

return resourceAwsRedshiftClusterRead(d, meta)
}

func enableRedshiftClusterLogging(d *schema.ResourceData, conn *redshift.Redshift) error {
if _, ok := d.GetOk("bucket_name"); !ok {
return fmt.Errorf("bucket_name must be set when enabling logging for Redshift Clusters")
}

params := &redshift.EnableLoggingInput{
ClusterIdentifier: aws.String(d.Id()),
BucketName: aws.String(d.Get("bucket_name").(string)),
}

if v, ok := d.GetOk("s3_key_prefix"); ok {
params.S3KeyPrefix = aws.String(v.(string))
}

_, loggingErr := conn.EnableLogging(params)
if loggingErr != nil {
log.Printf("[ERROR] Error Enabling Logging on Redshift Cluster: %s", loggingErr)
return loggingErr
}
return nil
}

func resourceAwsRedshiftClusterDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).redshiftconn
log.Printf("[DEBUG] Destroying Redshift Cluster (%s)", d.Id())
Expand Down
93 changes: 93 additions & 0 deletions builtin/providers/aws/resource_aws_redshift_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,41 @@ func TestAccAWSRedshiftCluster_basic(t *testing.T) {
})
}

func TestAccAWSRedshiftCluster_loggingEnabled(t *testing.T) {
var v redshift.Cluster

ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
preConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_loggingEnabled, ri)
postConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_loggingDisabled, ri)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSRedshiftClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v),
resource.TestCheckResourceAttr(
"aws_redshift_cluster.default", "enable_logging", "true"),
resource.TestCheckResourceAttr(
"aws_redshift_cluster.default", "bucket_name", "tf-redshift-logging-test-bucket"),
),
},

resource.TestStep{
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v),
resource.TestCheckResourceAttr(
"aws_redshift_cluster.default", "enable_logging", "false"),
),
},
},
})
}

func TestAccAWSRedshiftCluster_iamRoles(t *testing.T) {
var v redshift.Cluster

Expand Down Expand Up @@ -399,6 +434,64 @@ resource "aws_redshift_cluster" "default" {
allow_version_upgrade = false
}`

var testAccAWSRedshiftClusterConfig_loggingDisabled = `
resource "aws_redshift_cluster" "default" {
cluster_identifier = "tf-redshift-cluster-%d"
availability_zone = "us-west-2a"
database_name = "mydb"
master_username = "foo_test"
master_password = "Mustbe8characters"
node_type = "dc1.large"
automated_snapshot_retention_period = 0
allow_version_upgrade = false
enable_logging = false
}
`

var testAccAWSRedshiftClusterConfig_loggingEnabled = `
resource "aws_s3_bucket" "bucket" {
bucket = "tf-redshift-logging-test-bucket"
force_destroy = true
policy = <<EOF
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "Stmt1376526643067",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::902366379725:user/logs"
},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::tf-redshift-logging-test-bucket/*"
},
{
"Sid": "Stmt137652664067",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::902366379725:user/logs"
},
"Action": "s3:GetBucketAcl",
"Resource": "arn:aws:s3:::tf-redshift-logging-test-bucket"
}
]
}
EOF
}
resource "aws_redshift_cluster" "default" {
cluster_identifier = "tf-redshift-cluster-%d"
availability_zone = "us-west-2a"
database_name = "mydb"
master_username = "foo_test"
master_password = "Mustbe8characters"
node_type = "dc1.large"
automated_snapshot_retention_period = 0
allow_version_upgrade = false
enable_logging = true
bucket_name = "${aws_s3_bucket.bucket.bucket}"
}`

var testAccAWSRedshiftClusterConfig_tags = `
resource "aws_redshift_cluster" "default" {
cluster_identifier = "tf-redshift-cluster-%d"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,10 @@ string.
* `skip_final_snapshot` - (Optional) Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted. Default is true.
* `final_snapshot_identifier` - (Optional) The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, `skip_final_snapshot` must be false.
* `iam_roles` - (Optional) A list of IAM Role ARNs to associate with the cluster. A Maximum of 10 can be associated to the cluster at any time.
* `enable_logging` - (Optional) Enables logging information such as queries and connection attempts, for the specified Amazon Redshift cluster. Defaults to `false`.
* `bucket_name` - (Optional, required when `enable_logging` is `true`) The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions.
For more information on the permissions required for the bucket, please read the AWS [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging)
* `s3_key_prefix` - (Optional) The prefix applied to the log file names.
* `tags` - (Optional) A mapping of tags to assign to the resource.


Expand Down

0 comments on commit 3f83f0b

Please sign in to comment.