Skip to content

Commit

Permalink
Merge pull request #38784 from ChannyClaus/add-jobStateTimeLimitAction
Browse files Browse the repository at this point in the history
 [36384] Add job state time limit actions to batch queue
  • Loading branch information
ewbankkit authored Aug 16, 2024
2 parents c428d2c + 6ba70db commit 99a1bf7
Show file tree
Hide file tree
Showing 8 changed files with 229 additions and 128 deletions.
7 changes: 7 additions & 0 deletions .changelog/38784.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
```release-note:enhancement
resource/aws_batch_job_queue: Add `job_state_time_limit_action` argument
```

```release-note:enhancement
data-source/aws_batch_job_queue: Add `job_state_time_limit_action` attribute
```
72 changes: 61 additions & 11 deletions internal/service/batch/job_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"github.com/aws/aws-sdk-go-v2/service/batch"
awstypes "github.com/aws/aws-sdk-go-v2/service/batch/types"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework-validators/int64validator"
"github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator"
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/path"
Expand Down Expand Up @@ -117,6 +118,30 @@ func (r *jobQueueResource) Schema(ctx context.Context, request resource.SchemaRe
},
},
},
"job_state_time_limit_action": schema.ListNestedBlock{
CustomType: fwtypes.NewListNestedObjectTypeOf[jobStateTimeLimitActionModel](ctx),
NestedObject: schema.NestedBlockObject{
Attributes: map[string]schema.Attribute{
names.AttrAction: schema.StringAttribute{
CustomType: fwtypes.StringEnumType[awstypes.JobStateTimeLimitActionsAction](),
Required: true,
},
"max_time_seconds": schema.Int64Attribute{
Required: true,
Validators: []validator.Int64{
int64validator.Between(600, 86400),
},
},
"reason": schema.StringAttribute{
Required: true,
},
names.AttrState: schema.StringAttribute{
CustomType: fwtypes.StringEnumType[awstypes.JobStateTimeLimitActionsState](),
Required: true,
},
},
},
},
},
}
}
Expand Down Expand Up @@ -146,6 +171,10 @@ func (r *jobQueueResource) Create(ctx context.Context, request resource.CreateRe
} else {
input.ComputeEnvironmentOrder = expandComputeEnvironments(ctx, data.ComputeEnvironments)
}
response.Diagnostics.Append(fwflex.Expand(ctx, data.JobStateTimeLimitActions, &input.JobStateTimeLimitActions)...)
if response.Diagnostics.HasError() {
return
}
if !data.SchedulingPolicyARN.IsNull() {
input.SchedulingPolicyArn = fwflex.StringFromFramework(ctx, data.SchedulingPolicyARN)
}
Expand Down Expand Up @@ -211,8 +240,13 @@ func (r *jobQueueResource) Read(ctx context.Context, request resource.ReadReques
} else {
data.ComputeEnvironments = flattenComputeEnvironments(ctx, jobQueue.ComputeEnvironmentOrder)
}

data.JobQueueARN = fwflex.StringToFrameworkLegacy(ctx, jobQueue.JobQueueArn)
data.JobQueueName = fwflex.StringToFramework(ctx, jobQueue.JobQueueName)
response.Diagnostics.Append(fwflex.Flatten(ctx, jobQueue.JobStateTimeLimitActions, &data.JobStateTimeLimitActions)...)
if response.Diagnostics.HasError() {
return
}
data.Priority = fwflex.Int32ToFrameworkLegacy(ctx, jobQueue.Priority)
data.SchedulingPolicyARN = fwflex.StringToFrameworkARN(ctx, jobQueue.SchedulingPolicyArn)
data.State = fwflex.StringValueToFramework(ctx, jobQueue.State)
Expand Down Expand Up @@ -252,6 +286,14 @@ func (r *jobQueueResource) Update(ctx context.Context, request resource.UpdateRe
update = true
}
}

if !new.JobStateTimeLimitActions.Equal(old.JobStateTimeLimitActions) {
response.Diagnostics.Append(fwflex.Expand(ctx, new.JobStateTimeLimitActions, &input.JobStateTimeLimitActions)...)
if response.Diagnostics.HasError() {
return
}
update = true
}
if !new.Priority.Equal(old.Priority) {
input.Priority = fwflex.Int32FromFramework(ctx, new.Priority)
update = true
Expand Down Expand Up @@ -497,17 +539,18 @@ func waitJobQueueDeleted(ctx context.Context, conn *batch.Client, id string, tim
}

type jobQueueResourceModel struct {
ComputeEnvironments types.List `tfsdk:"compute_environments"`
ComputeEnvironmentOrder fwtypes.ListNestedObjectValueOf[computeEnvironmentOrderModel] `tfsdk:"compute_environment_order"`
ID types.String `tfsdk:"id"`
JobQueueARN types.String `tfsdk:"arn"`
JobQueueName types.String `tfsdk:"name"`
Priority types.Int64 `tfsdk:"priority"`
SchedulingPolicyARN fwtypes.ARN `tfsdk:"scheduling_policy_arn"`
State types.String `tfsdk:"state"`
Tags types.Map `tfsdk:"tags"`
TagsAll types.Map `tfsdk:"tags_all"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
ComputeEnvironments types.List `tfsdk:"compute_environments"`
ComputeEnvironmentOrder fwtypes.ListNestedObjectValueOf[computeEnvironmentOrderModel] `tfsdk:"compute_environment_order"`
ID types.String `tfsdk:"id"`
JobQueueARN types.String `tfsdk:"arn"`
JobQueueName types.String `tfsdk:"name"`
JobStateTimeLimitActions fwtypes.ListNestedObjectValueOf[jobStateTimeLimitActionModel] `tfsdk:"job_state_time_limit_action"`
Priority types.Int64 `tfsdk:"priority"`
SchedulingPolicyARN fwtypes.ARN `tfsdk:"scheduling_policy_arn"`
State types.String `tfsdk:"state"`
Tags types.Map `tfsdk:"tags"`
TagsAll types.Map `tfsdk:"tags_all"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
}

func (model *jobQueueResourceModel) InitFromID() error {
Expand All @@ -525,6 +568,13 @@ type computeEnvironmentOrderModel struct {
Order types.Int64 `tfsdk:"order"`
}

type jobStateTimeLimitActionModel struct {
Action fwtypes.StringEnum[awstypes.JobStateTimeLimitActionsAction] `tfsdk:"action"`
MaxTimeSeconds types.Int64 `tfsdk:"max_time_seconds"`
Reason types.String `tfsdk:"reason"`
State fwtypes.StringEnum[awstypes.JobStateTimeLimitActionsState] `tfsdk:"state"`
}

func expandComputeEnvironments(ctx context.Context, tfList types.List) []awstypes.ComputeEnvironmentOrder {
var apiObjects []awstypes.ComputeEnvironmentOrder

Expand Down
37 changes: 37 additions & 0 deletions internal/service/batch/job_queue_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,30 @@ func dataSourceJobQueue() *schema.Resource {
},
},
},
"job_state_time_limit_action": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
names.AttrAction: {
Type: schema.TypeString,
Computed: true,
},
"max_time_seconds": {
Type: schema.TypeInt,
Computed: true,
},
"reason": {
Type: schema.TypeString,
Computed: true,
},
names.AttrState: {
Type: schema.TypeString,
Computed: true,
},
},
},
},
names.AttrName: {
Type: schema.TypeString,
Required: true,
Expand Down Expand Up @@ -103,6 +127,19 @@ func dataSourceJobQueueRead(ctx context.Context, d *schema.ResourceData, meta in
return sdkdiag.AppendErrorf(diags, "setting compute_environment_order: %s", err)
}

tfList = make([]interface{}, 0)
for _, apiObject := range jobQueue.JobStateTimeLimitActions {
tfMap := map[string]interface{}{}
tfMap[names.AttrAction] = apiObject.Action
tfMap["max_time_seconds"] = aws.ToInt32(apiObject.MaxTimeSeconds)
tfMap["reason"] = aws.ToString(apiObject.Reason)
tfMap[names.AttrState] = apiObject.State
tfList = append(tfList, tfMap)
}
if err := d.Set("job_state_time_limit_action", tfList); err != nil {
return sdkdiag.AppendErrorf(diags, "setting job_state_time_limit_action: %s", err)
}

setTagsOut(ctx, jobQueue.Tags)

return diags
Expand Down
119 changes: 13 additions & 106 deletions internal/service/batch/job_queue_data_source_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ func TestAccBatchJobQueueDataSource_schedulingPolicy(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN),
resource.TestCheckResourceAttrPair(dataSourceName, "compute_environment_order.#", resourceName, "compute_environments.#"),
resource.TestCheckResourceAttrPair(dataSourceName, "job_state_time_limit_action.#", resourceName, "job_state_time_limit_action.#"),
resource.TestCheckResourceAttrPair(dataSourceName, names.AttrName, resourceName, names.AttrName),
resource.TestCheckResourceAttrPair(dataSourceName, names.AttrPriority, resourceName, names.AttrPriority),
resource.TestCheckResourceAttrPair(dataSourceName, "scheduling_policy_arn", resourceName, "scheduling_policy_arn"),
Expand All @@ -76,123 +77,22 @@ func TestAccBatchJobQueueDataSource_schedulingPolicy(t *testing.T) {
})
}

func testAccJobQueueDataSourceConfigBase(rName string) string {
return fmt.Sprintf(`
data "aws_partition" "current" {}
resource "aws_iam_role" "ecs_instance_role" {
name = "ecs_%[1]s"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.${data.aws_partition.current.dns_suffix}"
}
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "ecs_instance_role" {
role = aws_iam_role.ecs_instance_role.name
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "ecs_instance_role" {
name = "ecs_%[1]s"
role = aws_iam_role.ecs_instance_role.name
}
resource "aws_iam_role" "aws_batch_service_role" {
name = "batch_%[1]s"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "batch.${data.aws_partition.current.dns_suffix}"
}
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "aws_batch_service_role" {
role = aws_iam_role.aws_batch_service_role.name
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AWSBatchServiceRole"
}
resource "aws_security_group" "sample" {
name = "%[1]s"
}
resource "aws_vpc" "sample" {
cidr_block = "10.1.0.0/16"
}
resource "aws_subnet" "sample" {
vpc_id = aws_vpc.sample.id
cidr_block = "10.1.1.0/24"
}
resource "aws_batch_compute_environment" "sample" {
compute_environment_name = "%[1]s"
compute_resources {
instance_role = aws_iam_instance_profile.ecs_instance_role.arn
instance_type = [
"c4.large",
]
max_vcpus = 16
min_vcpus = 0
security_group_ids = [
aws_security_group.sample.id,
]
subnets = [
aws_subnet.sample.id,
]
type = "EC2"
}
service_role = aws_iam_role.aws_batch_service_role.arn
type = "MANAGED"
depends_on = [aws_iam_role_policy_attachment.aws_batch_service_role]
}
`, rName)
}

func testAccJobQueueDataSourceConfig_basic(rName string) string {
return acctest.ConfigCompose(
testAccJobQueueDataSourceConfigBase(rName),
testAccJobQueueConfig_base(rName),
fmt.Sprintf(`
resource "aws_batch_job_queue" "test" {
name = "%[1]s"
state = "ENABLED"
priority = 1
compute_environments = [aws_batch_compute_environment.sample.arn]
compute_environments = [aws_batch_compute_environment.test.arn]
}
resource "aws_batch_job_queue" "wrong" {
name = "%[1]s_wrong"
state = "ENABLED"
priority = 2
compute_environments = [aws_batch_compute_environment.sample.arn]
compute_environments = [aws_batch_compute_environment.test.arn]
}
data "aws_batch_job_queue" "by_name" {
Expand All @@ -203,7 +103,7 @@ data "aws_batch_job_queue" "by_name" {

func testAccJobQueueDataSourceConfig_schedulingPolicy(rName string) string {
return acctest.ConfigCompose(
testAccJobQueueDataSourceConfigBase(rName),
testAccJobQueueConfig_base(rName),
fmt.Sprintf(`
resource "aws_batch_scheduling_policy" "test" {
name = %[1]q
Expand All @@ -224,7 +124,14 @@ resource "aws_batch_job_queue" "test" {
scheduling_policy_arn = aws_batch_scheduling_policy.test.arn
state = "ENABLED"
priority = 1
compute_environments = [aws_batch_compute_environment.sample.arn]
compute_environments = [aws_batch_compute_environment.test.arn]
job_state_time_limit_action {
action = "CANCEL"
max_time_seconds = 600
reason = "MISCONFIGURATION:JOB_RESOURCE_REQUIREMENT"
state = "RUNNABLE"
}
}
data "aws_batch_job_queue" "test" {
Expand Down
21 changes: 11 additions & 10 deletions internal/service/batch/job_queue_migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,16 +72,17 @@ func upgradeJobQueueResourceStateV0toV1(ctx context.Context, request resource.Up
}

jobQueueDataV1 := jobQueueResourceModel{
ComputeEnvironments: jobQueueDataV0.ComputeEnvironments,
ComputeEnvironmentOrder: fwtypes.NewListNestedObjectValueOfNull[computeEnvironmentOrderModel](ctx),
ID: jobQueueDataV0.ID,
JobQueueARN: jobQueueDataV0.JobQueueARN,
JobQueueName: jobQueueDataV0.JobQueueName,
Priority: jobQueueDataV0.Priority,
State: jobQueueDataV0.State,
Tags: jobQueueDataV0.Tags,
TagsAll: jobQueueDataV0.TagsAll,
Timeouts: jobQueueDataV0.Timeouts,
ComputeEnvironments: jobQueueDataV0.ComputeEnvironments,
ComputeEnvironmentOrder: fwtypes.NewListNestedObjectValueOfNull[computeEnvironmentOrderModel](ctx),
ID: jobQueueDataV0.ID,
JobQueueARN: jobQueueDataV0.JobQueueARN,
JobQueueName: jobQueueDataV0.JobQueueName,
JobStateTimeLimitActions: fwtypes.NewListNestedObjectValueOfNull[jobStateTimeLimitActionModel](ctx),
Priority: jobQueueDataV0.Priority,
State: jobQueueDataV0.State,
Tags: jobQueueDataV0.Tags,
TagsAll: jobQueueDataV0.TagsAll,
Timeouts: jobQueueDataV0.Timeouts,
}

if jobQueueDataV0.SchedulingPolicyARN.ValueString() == "" {
Expand Down
Loading

0 comments on commit 99a1bf7

Please sign in to comment.