Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add import support for kinesis firehose delivery stream #2082

Merged
merged 10 commits into from
Dec 20, 2017
33 changes: 33 additions & 0 deletions aws/import_aws_kinesis_firehose_delivery_stream_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
package aws

import (
"fmt"
"os"
"testing"

"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)

func TestAccAWSKinesisFirehoseDeliveryStream_importBasic(t *testing.T) {
resName := "aws_kinesis_firehose_delivery_stream.test_stream"
rInt := acctest.RandInt()
config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3basic,
rInt, os.Getenv("AWS_ACCOUNT_ID"), rInt, rInt, rInt)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy,
Steps: []resource.TestStep{
{
Config: config,
},
{
ResourceName: resName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
160 changes: 154 additions & 6 deletions aws/resource_aws_kinesis_firehose_delivery_stream.go
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
package aws

import (
"bytes"
"fmt"
"log"
"strings"
"time"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/firehose"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
Expand Down Expand Up @@ -161,13 +164,160 @@ func processingConfigurationSchema() *schema.Schema {
}
}

func cloudwatchLoggingOptionsHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%t-", m["enabled"].(bool)))
if m["enabled"].(bool) {
buf.WriteString(fmt.Sprintf("%s-", m["log_group_name"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["log_stream_name"].(string)))
}
return hashcode.String(buf.String())
}

func flattenCloudwatchLoggingOptions(clo firehose.CloudWatchLoggingOptions) *schema.Set {
cloudwatchLoggingOptions := map[string]interface{}{
"enabled": *clo.Enabled,
}
if *clo.Enabled {
cloudwatchLoggingOptions["log_group_name"] = *clo.LogGroupName
cloudwatchLoggingOptions["log_stream_name"] = *clo.LogStreamName
}
return schema.NewSet(cloudwatchLoggingOptionsHash, []interface{}{cloudwatchLoggingOptions})
}

func flattenFirehoseS3Configuration(s3 firehose.S3DestinationDescription) []map[string]interface{} {
s3Configuration := make([]map[string]interface{}, 1)
s3Configuration[0] = map[string]interface{}{
"role_arn": *s3.RoleARN,
"bucket_arn": *s3.BucketARN,
"prefix": *s3.Prefix,
"buffer_size": *s3.BufferingHints.SizeInMBs,
"buffer_interval": *s3.BufferingHints.IntervalInSeconds,
"compression_format": *s3.CompressionFormat,
"cloudwatch_logging_options": flattenCloudwatchLoggingOptions(*s3.CloudWatchLoggingOptions),
}
if s3.EncryptionConfiguration.KMSEncryptionConfig != nil {
s3Configuration[0]["kms_key_arn"] = *s3.EncryptionConfiguration.KMSEncryptionConfig
}
return s3Configuration
}

func flattenProcessingConfiguration(pc firehose.ProcessingConfiguration) []map[string]interface{} {
processingConfiguration := make([]map[string]interface{}, 1)
var processors []map[string]interface{}
for i, p := range pc.Processors {
processors = append(processors, map[string]interface{}{
"type": p.Type,
})
for _, params := range p.Parameters {
processors[i]["parameters"] = map[string]interface{}{
"parameter_name": params.ParameterName,
"parameter_value": params.ParameterValue,
}
}
}
processingConfiguration[0] = map[string]interface{}{
"enabled": *pc.Enabled,
"processors": processors,
}
return processingConfiguration
}

func flattenKinesisFirehoseDeliveryStream(d *schema.ResourceData, s *firehose.DeliveryStreamDescription) error {
d.Set("version_id", s.VersionId)
d.Set("arn", *s.DeliveryStreamARN)
d.Set("name", s.DeliveryStreamName)
if len(s.Destinations) > 0 {
destination := s.Destinations[0]
if destination.RedshiftDestinationDescription != nil {
d.Set("destination", "redshift")

redshiftConfiguration := map[string]interface{}{
"cluster_jdbcurl": *destination.RedshiftDestinationDescription.ClusterJDBCURL,
"role_arn": *destination.RedshiftDestinationDescription.RoleARN,
"username": *destination.RedshiftDestinationDescription.Username,
"data_table_name": *destination.RedshiftDestinationDescription.CopyCommand.DataTableName,
"copy_options": *destination.RedshiftDestinationDescription.CopyCommand.CopyOptions,
"data_table_columns": *destination.RedshiftDestinationDescription.CopyCommand.DataTableColumns,
"s3_backup_mode": *destination.RedshiftDestinationDescription.S3BackupMode,
"s3_backup_configuration": flattenFirehoseS3Configuration(*destination.RedshiftDestinationDescription.S3BackupDescription),
"retry_duration": *destination.RedshiftDestinationDescription.RetryOptions.DurationInSeconds,
"cloudwatch_logging_options": flattenCloudwatchLoggingOptions(*destination.RedshiftDestinationDescription.CloudWatchLoggingOptions),
}
redshiftConfList := make([]map[string]interface{}, 1)
redshiftConfList[0] = redshiftConfiguration
d.Set("redshift_configuration", redshiftConfList)
d.Set("s3_configuration", flattenFirehoseS3Configuration(*destination.RedshiftDestinationDescription.S3DestinationDescription))

} else if destination.ElasticsearchDestinationDescription != nil {
d.Set("destination", "elasticsearch")

elasticsearchConfiguration := map[string]interface{}{
"buffering_interval": *destination.ElasticsearchDestinationDescription.BufferingHints.IntervalInSeconds,
"buffering_size": *destination.ElasticsearchDestinationDescription.BufferingHints.SizeInMBs,
"domain_arn": *destination.ElasticsearchDestinationDescription.DomainARN,
"role_arn": *destination.ElasticsearchDestinationDescription.RoleARN,
"type_name": *destination.ElasticsearchDestinationDescription.TypeName,
"index_name": *destination.ElasticsearchDestinationDescription.IndexName,
"s3_backup_mode": *destination.ElasticsearchDestinationDescription.S3BackupMode,
"retry_duration": *destination.ElasticsearchDestinationDescription.RetryOptions.DurationInSeconds,
"index_rotation_period": *destination.ElasticsearchDestinationDescription.IndexRotationPeriod,
"cloudwatch_logging_options": flattenCloudwatchLoggingOptions(*destination.ElasticsearchDestinationDescription.CloudWatchLoggingOptions),
}
elasticsearchConfList := make([]map[string]interface{}, 1)
elasticsearchConfList[0] = elasticsearchConfiguration
d.Set("elasticsearch_configuration", elasticsearchConfList)
d.Set("s3_configuration", flattenFirehoseS3Configuration(*destination.ElasticsearchDestinationDescription.S3DestinationDescription))
} else if destination.S3DestinationDescription != nil {
d.Set("destination", "s3")
d.Set("s3_configuration", flattenFirehoseS3Configuration(*destination.S3DestinationDescription))
} else if destination.ExtendedS3DestinationDescription != nil {
d.Set("destination", "extended_s3")

extendedS3Configuration := map[string]interface{}{
"buffering_interval": *destination.ExtendedS3DestinationDescription.BufferingHints.IntervalInSeconds,
"buffering_size": *destination.ExtendedS3DestinationDescription.BufferingHints.SizeInMBs,
"bucket_arn": *destination.ExtendedS3DestinationDescription.BucketARN,
"role_arn": *destination.ExtendedS3DestinationDescription.RoleARN,
"compression_format": *destination.ExtendedS3DestinationDescription.CompressionFormat,
"prefix": *destination.ExtendedS3DestinationDescription.Prefix,
"s3_backup_mode": *destination.ExtendedS3DestinationDescription.S3BackupMode,
"cloudwatch_logging_options": flattenCloudwatchLoggingOptions(*destination.ExtendedS3DestinationDescription.CloudWatchLoggingOptions),
}
if destination.ExtendedS3DestinationDescription.EncryptionConfiguration.KMSEncryptionConfig != nil {
extendedS3Configuration["kms_key_arn"] = *destination.ExtendedS3DestinationDescription.EncryptionConfiguration.KMSEncryptionConfig
}
if destination.ExtendedS3DestinationDescription.ProcessingConfiguration != nil {
extendedS3Configuration["processing_configuration"] = flattenProcessingConfiguration(*destination.ExtendedS3DestinationDescription.ProcessingConfiguration)
}
extendedS3ConfList := make([]map[string]interface{}, 1)
extendedS3ConfList[0] = extendedS3Configuration
d.Set("extended_s3_configuration", extendedS3ConfList)
}
d.Set("destination_id", *destination.DestinationId)
}
return nil
}

func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource {
return &schema.Resource{
Create: resourceAwsKinesisFirehoseDeliveryStreamCreate,
Read: resourceAwsKinesisFirehoseDeliveryStreamRead,
Update: resourceAwsKinesisFirehoseDeliveryStreamUpdate,
Delete: resourceAwsKinesisFirehoseDeliveryStreamDelete,

Importer: &schema.ResourceImporter{
State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
resARN, err := arn.Parse(d.Id())
if err != nil {
return nil, err
}
d.Set("name", strings.Split(resARN.Resource, "/")[1])
return []*schema.ResourceData{d}, nil
},
},

SchemaVersion: 1,
MigrateState: resourceAwsKinesisFirehoseMigrateState,
Schema: map[string]*schema.Schema{
Expand Down Expand Up @@ -1103,17 +1253,15 @@ func resourceAwsKinesisFirehoseDeliveryStreamRead(d *schema.ResourceData, meta i
d.SetId("")
return nil
}
return fmt.Errorf("[WARN] Error reading Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code())
return fmt.Errorf("[WARN] Error reading Kinesis Firehose Delivery Stream: %s", awsErr.Error())
}
return err
}

s := resp.DeliveryStreamDescription
d.Set("version_id", s.VersionId)
d.Set("arn", *s.DeliveryStreamARN)
if len(s.Destinations) > 0 {
destination := s.Destinations[0]
d.Set("destination_id", *destination.DestinationId)
err = flattenKinesisFirehoseDeliveryStream(d, s)
if err != nil {
return err
}

return nil
Expand Down