diff --git a/.github/config/chunks.yaml b/.github/config/chunks.yaml
index d094710473..49676c4ca3 100644
--- a/.github/config/chunks.yaml
+++ b/.github/config/chunks.yaml
@@ -7,6 +7,7 @@ chunks:
- cdk
- cdk-custom-resources
- cdk-environment
+ - build-uptime-lambda
- system-test/system-test-cdk
- system-test/system-test-configuration
- system-test/system-test-data-generation
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index ed623baccc..4bcb4f2740 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -14,6 +14,4 @@ Make sure you have checked _all_ steps below.
### Documentation
- [ ] In case of new functionality, my PR adds documentation that describes how to use it, or I have linked to a
- separate issue for that below.
-- [ ] If I have added, removed, or updated any external dependencies used in the project, I have updated the
- [NOTICES](/NOTICES) file to reflect this.
\ No newline at end of file
+ separate issue for that below.
\ No newline at end of file
diff --git a/.github/workflows/chunk-clients-cdk.yaml b/.github/workflows/chunk-clients-cdk.yaml
index f6dc7f598b..02f97c2267 100644
--- a/.github/workflows/chunk-clients-cdk.yaml
+++ b/.github/workflows/chunk-clients-cdk.yaml
@@ -13,6 +13,7 @@ on:
- 'java/cdk/**'
- 'java/cdk-custom-resources/**'
- 'java/cdk-environment/**'
+ - 'java/build-uptime-lambda/**'
- 'java/system-test/system-test-cdk/**'
- 'java/system-test/system-test-configuration/**'
- 'java/system-test/system-test-data-generation/**'
diff --git a/.gitignore b/.gitignore
index 11801739e0..65da13a7af 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,4 +28,5 @@ python/env/
target/coverage/*
cmake-build-*/
conan-cache/
-test_data/
+scripts/test/deployAll/system-test-instance.properties
+scripts/test/maven/system-test-instance.properties
\ No newline at end of file
diff --git a/code-style/dependency-check-suppressions.xml b/code-style/dependency-check-suppressions.xml
index f70f940e3a..c158ab6751 100644
--- a/code-style/dependency-check-suppressions.xml
+++ b/code-style/dependency-check-suppressions.xml
@@ -283,6 +283,14 @@
^pkg:maven/org\.eclipse\.jetty/jetty-servlets@.*$
CVE-2023-36479
+
+
+ ^pkg:maven/org\.eclipse\.jetty/jetty-http@.*$
+ CVE-2024-6763
+
^pkg:javascript/DOMPurify@.*$
- CVE-2024-45801
+ CVE-2024-45801|CVE-2024-47875
+
+
+
+ ^pkg:maven/org\.glassfish\.hk2/osgi-resource-locator@.*$
+ CVE-2024-9329
diff --git a/docs/02-deployment-guide.md b/docs/02-deployment-guide.md
index 7d9dc10d9a..c724b5389d 100644
--- a/docs/02-deployment-guide.md
+++ b/docs/02-deployment-guide.md
@@ -104,17 +104,20 @@ The Sleeper CLI also lets you manage multiple environments.
You can deploy either the VPC or the EC2 independently, or specify an existing VPC to deploy the EC2 to.
You must specify an environment ID when deploying an environment. Parameters after the environment ID will be passed to
-a `cdk deploy` command.
+a `cdk deploy --all` command.
```bash
# Deploy EC2 in a new VPC
sleeper environment deploy MyEnvironment
# Only deploy VPC
-sleeper environment deploy VPCEnvironment "*-Networking"
+sleeper environment deploy VPCEnvironment -c deployEc2=false
# Deploy EC2 in an existing VPC
-sleeper environment deploy EC2Environment -c vpcId=[vpc-id] "*-BuildEC2"
+sleeper environment deploy EC2Environment -c vpcId=[vpc-id]
+
+# Deploy with nightly system test automation
+sleeper environment deploy NightlyTestEnvironment -c nightlyTestsEnabled=true
```
You can switch environments like this:
diff --git a/example/basic/instance.properties b/example/basic/instance.properties
index 7a2ad83532..1a32596fbd 100644
--- a/example/basic/instance.properties
+++ b/example/basic/instance.properties
@@ -23,7 +23,7 @@ sleeper.retain.infra.after.destroy=true
# PersistentEmrBulkImportStack, EksBulkImportStack, EmrStudioStack, QueryStack, WebSocketQueryStack,
# AthenaStack, KeepLambdaWarmStack, CompactionStack, GarbageCollectorStack, PartitionSplittingStack,
# DashboardStack, TableMetricsStack]
-sleeper.optional.stacks=CompactionStack,GarbageCollectorStack,IngestStack,IngestBatcherStack,PartitionSplittingStack,QueryStack,AthenaStack,EmrServerlessBulkImportStack,EmrStudioStack,DashboardStack,TableMetricsStack
+sleeper.optional.stacks=IngestStack,IngestBatcherStack,EmrServerlessBulkImportStack,EmrStudioStack,QueryStack,AthenaStack,CompactionStack,GarbageCollectorStack,PartitionSplittingStack,DashboardStack,TableMetricsStack
# The AWS account number. This is the AWS account that the instance will be deployed to.
sleeper.account=1234567890
diff --git a/example/full/instance.properties b/example/full/instance.properties
index c3e05afa6a..36d4fec847 100644
--- a/example/full/instance.properties
+++ b/example/full/instance.properties
@@ -28,7 +28,7 @@ sleeper.retain.infra.after.destroy=true
# PersistentEmrBulkImportStack, EksBulkImportStack, EmrStudioStack, QueryStack, WebSocketQueryStack,
# AthenaStack, KeepLambdaWarmStack, CompactionStack, GarbageCollectorStack, PartitionSplittingStack,
# DashboardStack, TableMetricsStack]
-sleeper.optional.stacks=CompactionStack,GarbageCollectorStack,IngestStack,IngestBatcherStack,PartitionSplittingStack,QueryStack,AthenaStack,EmrServerlessBulkImportStack,EmrStudioStack,DashboardStack,TableMetricsStack
+sleeper.optional.stacks=IngestStack,IngestBatcherStack,EmrServerlessBulkImportStack,EmrStudioStack,QueryStack,AthenaStack,CompactionStack,GarbageCollectorStack,PartitionSplittingStack,DashboardStack,TableMetricsStack
# The AWS account number. This is the AWS account that the instance will be deployed to.
sleeper.account=1234567890
@@ -371,6 +371,22 @@ sleeper.ingest.batcher.job.creation.period.minutes=1
## The following properties relate to bulk import, i.e. ingesting data using Spark jobs running on EMR
## or EKS.
+##
+## Note that on EMR, the total resource allocation must align with the instance types used for the
+## cluster. For the maximum memory usage, combine the memory and memory overhead properties, and
+## compare against the maximum memory allocation for YARN in the Hadoop task configuration:
+##
+## https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-hadoop-task-config.html
+##
+## As an example, if we use m7i.xlarge for executor instances, that has a maximum allocation of 54272
+## MiB, or 53 GiB. If we want 3 executors per instance, we can have 53 GiB / 3 = 18,090.666 MiB per
+## executor. We can set the executor memory to 16 GiB, and the executor memory overhead to the
+## remainder of that amount, which is 18,090 MiB - 16 GiB = 1,706 MiB, or 1.666 GiB. This is just above
+## the default Spark memory overhead factor of 0.1, i.e. 16 GiB x 0.1 = 1.6 GiB.
+##
+## Also see EMR best practices:
+##
+## https://aws.github.io/aws-emr-best-practices/docs/bestpractices/Applications/Spark/best_practices/#bp-516----tune-driverexecutor-memory-cores-and-sparksqlshufflepartitions-to-fully-utilize-cluster-resources
# The class to use to perform the bulk import. The default value below uses Spark Dataframes. There is
# an alternative option that uses RDDs (sleeper.bulkimport.job.runner.rdd.BulkImportJobRDDDriver).
@@ -403,11 +419,11 @@ sleeper.bulk.import.emr.spark.executor.instances=29
# The memory overhead for an executor. Used to set spark.executor.memoryOverhead.
# See https://spark.apache.org/docs/latest/configuration.html.
-sleeper.bulk.import.emr.spark.executor.memory.overhead=2g
+sleeper.bulk.import.emr.spark.executor.memory.overhead=1706m
# The memory overhead for the driver. Used to set spark.driver.memoryOverhead.
# See https://spark.apache.org/docs/latest/configuration.html.
-sleeper.bulk.import.emr.spark.driver.memory.overhead=2g
+sleeper.bulk.import.emr.spark.driver.memory.overhead=1706m
# The default parallelism for Spark job. Used to set spark.default.parallelism.
# See https://spark.apache.org/docs/latest/configuration.html.
@@ -511,6 +527,11 @@ sleeper.bulk.import.emr.ebs.volume.type=gp2
# This can be a number from 1 to 25.
sleeper.bulk.import.emr.ebs.volumes.per.instance=4
+# ARN of the KMS Key used to encrypt data at rest on the local file system in AWS EMR.
+# See
+# https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-encryption-enable.html#emr-encryption-create-keys.
+# sleeper.bulk.import.emr.ebs.encryption.key.arn=
+
# The architecture for EMR Serverless to use. X86_64 or ARM64 (Coming soon)
sleeper.bulk.import.emr.serverless.architecture=X86_64
@@ -1151,6 +1172,9 @@ sleeper.athena.handler.memory=4096
# The timeout in seconds for the athena composite handler.
sleeper.athena.handler.timeout.seconds=900
+# ARN of the KMS Key used to encrypt data in the Athena spill bucket.
+# sleeper.athena.spill.master.key.arn=
+
## The following properties relate to default values used by table properties.
diff --git a/java/build-uptime-lambda/pom.xml b/java/build-uptime-lambda/pom.xml
new file mode 100644
index 0000000000..edca9f3b54
--- /dev/null
+++ b/java/build-uptime-lambda/pom.xml
@@ -0,0 +1,104 @@
+
+
+
+
+ 4.0.0
+
+ sleeper
+ aws
+ 0.26.0-SNAPSHOT
+
+
+ build-uptime-lambda
+
+
+
+ org.slf4j
+ slf4j-api
+
+
+ ch.qos.reload4j
+ reload4j
+ runtime
+
+
+ org.slf4j
+ slf4j-reload4j
+ runtime
+
+
+ com.google.code.gson
+ gson
+
+
+ software.amazon.awssdk
+ ec2
+
+
+ software.amazon.awssdk
+ cloudwatchevents
+
+
+ software.amazon.awssdk
+ s3
+
+
+ com.amazonaws
+ aws-lambda-java-core
+ ${aws-lambda-java-core.version}
+
+
+
+ sleeper
+ core
+ ${project.parent.version}
+ test-jar
+ test
+
+
+ com.github.tomakehurst
+ wiremock-jre8
+ ${wiremock.version}
+ test
+
+
+ org.testcontainers
+ localstack
+ test
+
+
+ org.testcontainers
+ testcontainers
+ test
+
+
+ org.testcontainers
+ junit-jupiter
+ test
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+
+
+
\ No newline at end of file
diff --git a/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeCondition.java b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeCondition.java
new file mode 100644
index 0000000000..b9bb52baf1
--- /dev/null
+++ b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeCondition.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.build.uptime.lambda;
+
+import software.amazon.awssdk.services.s3.S3Client;
+
+import java.time.Instant;
+
+public class BuildUptimeCondition {
+
+ public static final String TEST_FINISHED_FROM_TODAY = "testFinishedFromToday";
+
+ private final String condition;
+ private final String testBucket;
+
+ private BuildUptimeCondition(String condition, String testBucket) {
+ this.condition = condition;
+ this.testBucket = testBucket;
+ }
+
+ public static BuildUptimeCondition of(BuildUptimeEvent event) {
+ return new BuildUptimeCondition(event.getCondition(), event.getTestBucket());
+ }
+
+ public boolean check(S3Client s3, Instant now) {
+ return check(GetS3ObjectAsString.fromClient(s3), now);
+ }
+
+ public boolean check(GetS3ObjectAsString s3, Instant now) {
+ if (condition == null) {
+ return true;
+ }
+ switch (condition) {
+ case TEST_FINISHED_FROM_TODAY:
+ NightlyTestSummaryTable summary = NightlyTestSummaryTable.fromS3(s3, testBucket);
+ return summary.containsTestFromToday(now);
+ default:
+ return false;
+ }
+ }
+
+}
diff --git a/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeEvent.java b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeEvent.java
new file mode 100644
index 0000000000..2141bbba2d
--- /dev/null
+++ b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeEvent.java
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.build.uptime.lambda;
+
+import java.util.List;
+
+public class BuildUptimeEvent {
+
+ private final String operation;
+ private final String condition;
+ private final String testBucket;
+ private final List ec2Ids;
+ private final List rules;
+
+ private BuildUptimeEvent(Builder builder) {
+ operation = builder.operation;
+ condition = builder.condition;
+ testBucket = builder.testBucket;
+ ec2Ids = builder.ec2Ids;
+ rules = builder.rules;
+ }
+
+ public String getOperation() {
+ return operation;
+ }
+
+ public String getCondition() {
+ return condition;
+ }
+
+ public String getTestBucket() {
+ return testBucket;
+ }
+
+ public List getEc2Ids() {
+ return ec2Ids;
+ }
+
+ public List getRules() {
+ return rules;
+ }
+
+ @Override
+ public String toString() {
+ return "BuildUptimeEvent{operation=" + operation + ", ec2Ids=" + ec2Ids + ", rules=" + rules + "}";
+ }
+
+ public static Builder start() {
+ return operation("start");
+ }
+
+ public static Builder stop() {
+ return operation("stop");
+ }
+
+ public static Builder operation(String operation) {
+ return new Builder().operation(operation);
+ }
+
+ public static class Builder {
+
+ private String operation;
+ private String condition;
+ private String testBucket;
+ private List ec2Ids;
+ private List rules;
+
+ private Builder() {
+ }
+
+ public Builder operation(String operation) {
+ this.operation = operation;
+ return this;
+ }
+
+ public Builder condition(String condition) {
+ this.condition = condition;
+ return this;
+ }
+
+ public Builder testBucket(String testBucket) {
+ this.testBucket = testBucket;
+ return this;
+ }
+
+ public Builder ec2Ids(List ec2Ids) {
+ this.ec2Ids = ec2Ids;
+ return this;
+ }
+
+ public Builder rules(List rules) {
+ this.rules = rules;
+ return this;
+ }
+
+ public Builder ec2Ids(String... ec2Ids) {
+ return ec2Ids(List.of(ec2Ids));
+ }
+
+ public Builder rules(String... rules) {
+ return rules(List.of(rules));
+ }
+
+ public Builder ifTestFinishedFromToday() {
+ return condition(BuildUptimeCondition.TEST_FINISHED_FROM_TODAY);
+ }
+
+ public BuildUptimeEvent build() {
+ return new BuildUptimeEvent(this);
+ }
+ }
+}
diff --git a/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeEventSerDe.java b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeEventSerDe.java
new file mode 100644
index 0000000000..bce5a0baf0
--- /dev/null
+++ b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeEventSerDe.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.build.uptime.lambda;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.nio.charset.StandardCharsets;
+
+public class BuildUptimeEventSerDe {
+ private final Gson gson = new GsonBuilder().create();
+
+ public BuildUptimeEvent fromJson(InputStream stream) {
+ return gson.fromJson(new InputStreamReader(stream, StandardCharsets.UTF_8), BuildUptimeEvent.class);
+ }
+
+ public String toJson(BuildUptimeEvent event) {
+ return gson.toJson(event);
+ }
+
+}
diff --git a/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeLambda.java b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeLambda.java
new file mode 100644
index 0000000000..c2ff4943b8
--- /dev/null
+++ b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/BuildUptimeLambda.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.build.uptime.lambda;
+
+import com.amazonaws.services.lambda.runtime.Context;
+import com.amazonaws.services.lambda.runtime.RequestStreamHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.cloudwatchevents.CloudWatchEventsClient;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.s3.S3Client;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.time.Instant;
+import java.util.function.Supplier;
+
+public class BuildUptimeLambda implements RequestStreamHandler {
+
+ public static final Logger LOGGER = LoggerFactory.getLogger(BuildUptimeLambda.class);
+
+ private final Ec2Client ec2;
+ private final CloudWatchEventsClient cloudWatch;
+ private final S3Client s3;
+ private final BuildUptimeEventSerDe serDe = new BuildUptimeEventSerDe();
+ private final Supplier timeSupplier;
+
+ public BuildUptimeLambda() {
+ this(Ec2Client.create(), CloudWatchEventsClient.create(), S3Client.create(), Instant::now);
+ }
+
+ public BuildUptimeLambda(Ec2Client ec2, CloudWatchEventsClient cloudWatch, S3Client s3, Supplier timeSupplier) {
+ this.ec2 = ec2;
+ this.cloudWatch = cloudWatch;
+ this.s3 = s3;
+ this.timeSupplier = timeSupplier;
+ }
+
+ @Override
+ public void handleRequest(InputStream input, OutputStream output, Context context) {
+ BuildUptimeEvent event = serDe.fromJson(input);
+ LOGGER.info("Found event: {}", event);
+ if (BuildUptimeCondition.of(event).check(s3, timeSupplier.get())) {
+ applyOperation(event);
+ }
+ }
+
+ private void applyOperation(BuildUptimeEvent event) {
+ switch (event.getOperation()) {
+ case "start":
+ if (event.getEc2Ids() != null && !event.getEc2Ids().isEmpty()) {
+ ec2.startInstances(builder -> builder.instanceIds(event.getEc2Ids()));
+ }
+ if (event.getRules() != null) {
+ event.getRules().forEach(rule -> cloudWatch.enableRule(builder -> builder.name(rule)));
+ }
+ break;
+ case "stop":
+ if (event.getEc2Ids() != null && !event.getEc2Ids().isEmpty()) {
+ ec2.stopInstances(builder -> builder.instanceIds(event.getEc2Ids()));
+ }
+ if (event.getRules() != null) {
+ event.getRules().forEach(rule -> cloudWatch.disableRule(builder -> builder.name(rule)));
+ }
+ break;
+ default:
+ throw new IllegalArgumentException("Unrecognised operation: " + event.getOperation());
+ }
+ }
+
+}
diff --git a/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/GetS3ObjectAsString.java b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/GetS3ObjectAsString.java
new file mode 100644
index 0000000000..01bc7353d1
--- /dev/null
+++ b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/GetS3ObjectAsString.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.build.uptime.lambda;
+
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.NoSuchKeyException;
+
+import java.util.Optional;
+
+@FunctionalInterface
+public interface GetS3ObjectAsString {
+
+ Optional getS3ObjectAsString(String bucket, String key);
+
+ static GetS3ObjectAsString fromClient(S3Client s3) {
+ return (bucket, key) -> {
+ try {
+ String json = s3.getObjectAsBytes(builder -> builder.bucket(bucket).key(key)).asUtf8String();
+ return Optional.of(json);
+ } catch (NoSuchKeyException e) {
+ return Optional.empty();
+ }
+ };
+ }
+
+}
diff --git a/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/NightlyTestSummaryTable.java b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/NightlyTestSummaryTable.java
new file mode 100644
index 0000000000..954c32f65e
--- /dev/null
+++ b/java/build-uptime-lambda/src/main/java/sleeper/build/uptime/lambda/NightlyTestSummaryTable.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package sleeper.build.uptime.lambda;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Type;
+import java.time.Instant;
+import java.time.temporal.ChronoUnit;
+import java.util.LinkedList;
+import java.util.Optional;
+
+public class NightlyTestSummaryTable {
+ private static final Logger LOGGER = LoggerFactory.getLogger(NightlyTestSummaryTable.class);
+
+ private static final Gson GSON = createGson();
+
+ private final LinkedList executions = new LinkedList<>();
+
+ private NightlyTestSummaryTable() {
+ }
+
+ public static NightlyTestSummaryTable empty() {
+ return new NightlyTestSummaryTable();
+ }
+
+ public static NightlyTestSummaryTable fromJson(String json) {
+ return GSON.fromJson(json, NightlyTestSummaryTable.class);
+ }
+
+ public static NightlyTestSummaryTable fromS3(GetS3ObjectAsString s3, String bucketName) {
+ LOGGER.info("Loading existing test summary from S3");
+ Optional summaryOpt = s3.getS3ObjectAsString(bucketName, "summary.json")
+ .map(NightlyTestSummaryTable::fromJson);
+ if (summaryOpt.isPresent()) {
+ LOGGER.info("Found test summary with {} executions", summaryOpt.get().executions.size());
+ return summaryOpt.get();
+ } else {
+ LOGGER.info("Found no test summary");
+ return empty();
+ }
+ }
+
+ public String toJson() {
+ return GSON.toJson(this);
+ }
+
+ public boolean containsTestFromToday(Instant now) {
+ Instant today = now.truncatedTo(ChronoUnit.DAYS);
+ return executions.stream()
+ .map(execution -> execution.startTime)
+ .anyMatch(startTime -> startTime.truncatedTo(ChronoUnit.DAYS).equals(today));
+ }
+
+ public static class Execution {
+
+ private final Instant startTime;
+
+ public Execution(Instant startTime) {
+ this.startTime = startTime;
+ }
+ }
+
+ public static Gson createGson() {
+ return new GsonBuilder()
+ .registerTypeAdapter(Instant.class, new InstantSerDe())
+ .create();
+ }
+
+ private static class InstantSerDe implements JsonSerializer, JsonDeserializer {
+ @Override
+ public Instant deserialize(JsonElement element, Type type, JsonDeserializationContext context) {
+ return Instant.parse(element.getAsString());
+ }
+
+ @Override
+ public JsonElement serialize(Instant instant, Type type, JsonSerializationContext context) {
+ return new JsonPrimitive(instant.toString());
+ }
+ }
+}
diff --git a/java/build-uptime-lambda/src/test/java/sleeper/build/uptime/lambda/BuildUptimeLambdaIT.java b/java/build-uptime-lambda/src/test/java/sleeper/build/uptime/lambda/BuildUptimeLambdaIT.java
new file mode 100644
index 0000000000..e0d5d6f2e1
--- /dev/null
+++ b/java/build-uptime-lambda/src/test/java/sleeper/build/uptime/lambda/BuildUptimeLambdaIT.java
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.build.uptime.lambda;
+
+import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo;
+import com.github.tomakehurst.wiremock.junit5.WireMockTest;
+import com.github.tomakehurst.wiremock.matching.RequestPatternBuilder;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.testcontainers.containers.localstack.LocalStackContainer;
+import org.testcontainers.junit.jupiter.Container;
+import org.testcontainers.junit.jupiter.Testcontainers;
+import org.testcontainers.utility.DockerImageName;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.services.cloudwatchevents.CloudWatchEventsClient;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.s3.S3Client;
+
+import sleeper.core.CommonTestConstants;
+
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.time.Instant;
+import java.util.LinkedList;
+import java.util.Queue;
+import java.util.UUID;
+import java.util.stream.IntStream;
+
+import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
+import static com.github.tomakehurst.wiremock.client.WireMock.equalTo;
+import static com.github.tomakehurst.wiremock.client.WireMock.matching;
+import static com.github.tomakehurst.wiremock.client.WireMock.post;
+import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor;
+import static com.github.tomakehurst.wiremock.client.WireMock.stubFor;
+import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
+import static com.github.tomakehurst.wiremock.client.WireMock.verify;
+import static java.util.stream.Collectors.joining;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static sleeper.build.uptime.lambda.LocalStackTestHelper.localStackClient;
+import static sleeper.build.uptime.lambda.WiremockTestHelper.wiremockClient;
+
+@WireMockTest
+@Testcontainers
+public class BuildUptimeLambdaIT {
+
+ @Container
+ public static LocalStackContainer localStackContainer = new LocalStackContainer(DockerImageName.parse(CommonTestConstants.LOCALSTACK_DOCKER_IMAGE))
+ .withServices(LocalStackContainer.Service.S3);
+
+ private final BuildUptimeEventSerDe serDe = new BuildUptimeEventSerDe();
+ private BuildUptimeLambda lambda;
+ private final S3Client s3 = localStackClient(localStackContainer, LocalStackContainer.Service.S3, S3Client.builder());
+ private final Queue times = new LinkedList<>();
+
+ @BeforeEach
+ void setUp(WireMockRuntimeInfo runtimeInfo) {
+ lambda = new BuildUptimeLambda(
+ wiremockClient(runtimeInfo, Ec2Client.builder()),
+ wiremockClient(runtimeInfo, CloudWatchEventsClient.builder()),
+ s3, times::poll);
+ stubFor(post("/").willReturn(aResponse().withStatus(200)));
+ }
+
+ @Test
+ void shouldStartEc2s() {
+ // When
+ handle(BuildUptimeEvent.start().ec2Ids("A", "B").build());
+
+ // Then
+ verify(1, startRequestedForEc2Ids("A", "B"));
+ verify(1, postRequestedFor(urlEqualTo("/")));
+ }
+
+ @Test
+ void shouldStopEc2s() {
+ // When
+ handle(BuildUptimeEvent.stop().ec2Ids("A", "B").build());
+
+ // Then
+ verify(1, stopRequestedForEc2Ids("A", "B"));
+ verify(1, postRequestedFor(urlEqualTo("/")));
+ }
+
+ @Test
+ void shouldEnableCloudWatchRules() {
+ // When
+ handle(BuildUptimeEvent.start().rules("A", "B").build());
+
+ // Then
+ verify(1, enableRequestedForRuleName("A"));
+ verify(1, enableRequestedForRuleName("B"));
+ verify(2, postRequestedFor(urlEqualTo("/")));
+ }
+
+ @Test
+ void shouldDisableCloudWatchRules() {
+ // When
+ handle(BuildUptimeEvent.stop().rules("A", "B").build());
+
+ // Then
+ verify(1, disableRequestedForRuleName("A"));
+ verify(1, disableRequestedForRuleName("B"));
+ verify(2, postRequestedFor(urlEqualTo("/")));
+ }
+
+ @Test
+ void shouldFailWithUnrecognisedOperation() {
+ // When / Then
+ assertThatThrownBy(() -> handle(BuildUptimeEvent.operation("test").build()))
+ .isInstanceOf(IllegalArgumentException.class);
+ verify(0, postRequestedFor(urlEqualTo("/")));
+ }
+
+ @Test
+ void shouldDoNothingWhenConditionNotMet() {
+ // Given
+ String bucketName = UUID.randomUUID().toString();
+ s3.createBucket(builder -> builder.bucket(bucketName));
+ times.add(Instant.parse("2024-10-02T15:02:00Z"));
+
+ // When
+ handle(BuildUptimeEvent.stop()
+ .ec2Ids("A", "B")
+ .ifTestFinishedFromToday()
+ .testBucket(bucketName)
+ .build());
+
+ // Then
+ verify(0, postRequestedFor(urlEqualTo("/")));
+ }
+
+ @Test
+ void shouldPerformOperationWhenConditionIsMet() {
+ // Given
+ String bucketName = UUID.randomUUID().toString();
+ s3.createBucket(builder -> builder.bucket(bucketName));
+ s3.putObject(builder -> builder.bucket(bucketName).key("summary.json"), RequestBody.fromString("{" +
+ "\"executions\": [{" +
+ "\"startTime\": \"2024-10-02T03:00:00Z\"" +
+ "}]}"));
+ times.add(Instant.parse("2024-10-02T15:02:00Z"));
+
+ // When
+ handle(BuildUptimeEvent.stop()
+ .ec2Ids("nightly-test-ec2")
+ .ifTestFinishedFromToday()
+ .testBucket(bucketName)
+ .build());
+
+ // Then
+ verify(1, stopRequestedForEc2Ids("nightly-test-ec2"));
+ verify(1, postRequestedFor(urlEqualTo("/")));
+ }
+
+ void handle(BuildUptimeEvent event) {
+ InputStream inputStream = new ByteArrayInputStream(serDe.toJson(event).getBytes());
+ lambda.handleRequest(inputStream, null, null);
+ }
+
+ private RequestPatternBuilder startRequestedForEc2Ids(String... ec2Ids) {
+ return postRequestedFor(urlEqualTo("/"))
+ .withRequestBody(matching("^Action=StartInstances&Version=[0-9\\-]+" + buildInstanceIdParams(ec2Ids) + "$"));
+ }
+
+ private RequestPatternBuilder stopRequestedForEc2Ids(String... ec2Ids) {
+ return postRequestedFor(urlEqualTo("/"))
+ .withRequestBody(matching("^Action=StopInstances&Version=[0-9\\-]+" + buildInstanceIdParams(ec2Ids) + "$"));
+ }
+
+ private RequestPatternBuilder enableRequestedForRuleName(String ruleName) {
+ return postRequestedFor(urlEqualTo("/"))
+ .withHeader("X-Amz-Target", equalTo("AWSEvents.EnableRule"))
+ .withRequestBody(equalTo("{\"Name\":\"" + ruleName + "\"}"));
+ }
+
+ private RequestPatternBuilder disableRequestedForRuleName(String ruleName) {
+ return postRequestedFor(urlEqualTo("/"))
+ .withHeader("X-Amz-Target", equalTo("AWSEvents.DisableRule"))
+ .withRequestBody(equalTo("{\"Name\":\"" + ruleName + "\"}"));
+ }
+
+ private String buildInstanceIdParams(String... instanceIds) {
+ return IntStream.range(0, instanceIds.length)
+ .mapToObj(i -> "&InstanceId." + (i + 1) + "=" + instanceIds[i])
+ .collect(joining());
+ }
+}
diff --git a/java/build-uptime-lambda/src/test/java/sleeper/build/uptime/lambda/LocalStackTestHelper.java b/java/build-uptime-lambda/src/test/java/sleeper/build/uptime/lambda/LocalStackTestHelper.java
new file mode 100644
index 0000000000..35035335f4
--- /dev/null
+++ b/java/build-uptime-lambda/src/test/java/sleeper/build/uptime/lambda/LocalStackTestHelper.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.build.uptime.lambda;
+
+import org.testcontainers.containers.localstack.LocalStackContainer;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder;
+import software.amazon.awssdk.regions.Region;
+
+public class LocalStackTestHelper {
+
+ private LocalStackTestHelper() {
+ }
+
+ public static , T> T localStackClient(
+ LocalStackContainer localStackContainer, LocalStackContainer.Service service, B builder) {
+ return builder
+ .endpointOverride(localStackContainer.getEndpointOverride(service))
+ .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(
+ localStackContainer.getAccessKey(), localStackContainer.getSecretKey())))
+ .region(Region.of(localStackContainer.getRegion()))
+ .build();
+ }
+
+}
diff --git a/java/build-uptime-lambda/src/test/java/sleeper/build/uptime/lambda/WiremockTestHelper.java b/java/build-uptime-lambda/src/test/java/sleeper/build/uptime/lambda/WiremockTestHelper.java
new file mode 100644
index 0000000000..a7eb04dda2
--- /dev/null
+++ b/java/build-uptime-lambda/src/test/java/sleeper/build/uptime/lambda/WiremockTestHelper.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.build.uptime.lambda;
+
+import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder;
+import software.amazon.awssdk.regions.Region;
+
+import java.net.URI;
+
+public class WiremockTestHelper {
+
+ public static final String WIREMOCK_ACCESS_KEY = "wiremock-access-key";
+ public static final String WIREMOCK_SECRET_KEY = "wiremock-secret-key";
+
+ private WiremockTestHelper() {
+ }
+
+ public static , T> T wiremockClient(WireMockRuntimeInfo runtimeInfo, B builder) {
+ return builder
+ .endpointOverride(URI.create(runtimeInfo.getHttpBaseUrl()))
+ .region(Region.US_EAST_1)
+ .credentialsProvider(StaticCredentialsProvider.create(
+ AwsBasicCredentials.create(WIREMOCK_ACCESS_KEY, WIREMOCK_SECRET_KEY)))
+ .build();
+ }
+}
diff --git a/java/bulk-import/bulk-import-common/src/main/java/sleeper/bulkimport/configuration/ConfigurationUtils.java b/java/bulk-import/bulk-import-common/src/main/java/sleeper/bulkimport/configuration/ConfigurationUtils.java
index 0102468139..fa3b0d5324 100644
--- a/java/bulk-import/bulk-import-common/src/main/java/sleeper/bulkimport/configuration/ConfigurationUtils.java
+++ b/java/bulk-import/bulk-import-common/src/main/java/sleeper/bulkimport/configuration/ConfigurationUtils.java
@@ -46,6 +46,7 @@
import static sleeper.core.properties.instance.EMRProperty.BULK_IMPORT_EMR_SPARK_SQL_SHUFFLE_PARTITIONS;
import static sleeper.core.properties.instance.EMRProperty.BULK_IMPORT_EMR_SPARK_STORAGE_LEVEL;
import static sleeper.core.properties.instance.EMRProperty.BULK_IMPORT_EMR_SPARK_YARN_SCHEDULER_REPORTER_THREAD_MAX_FAILURES;
+import static sleeper.core.properties.instance.EMRServerlessProperty.BULK_IMPORT_EMR_SERVERLESS_DRIVER_CORES;
import static sleeper.core.properties.instance.EMRServerlessProperty.BULK_IMPORT_EMR_SERVERLESS_DRIVER_MEMORY;
import static sleeper.core.properties.instance.EMRServerlessProperty.BULK_IMPORT_EMR_SERVERLESS_DYNAMIC_ALLOCATION;
import static sleeper.core.properties.instance.EMRServerlessProperty.BULK_IMPORT_EMR_SERVERLESS_EXECUTOR_CORES;
@@ -146,7 +147,7 @@ public static Map getSparkServerlessConfigurationFromInstancePro
InstanceProperties instanceProperties, EmrInstanceArchitecture arch) {
Map sparkConf = new HashMap<>();
// spark.driver properties
- sparkConf.put("spark.driver.cores", instanceProperties.get(BULK_IMPORT_EMR_SERVERLESS_EXECUTOR_CORES));
+ sparkConf.put("spark.driver.cores", instanceProperties.get(BULK_IMPORT_EMR_SERVERLESS_DRIVER_CORES));
sparkConf.put("spark.driver.memory", instanceProperties.get(BULK_IMPORT_EMR_SERVERLESS_DRIVER_MEMORY));
// spark.executor properties
diff --git a/java/bulk-import/bulk-import-runner/src/main/java/sleeper/bulkimport/job/runner/dataframe/FileWritingIterator.java b/java/bulk-import/bulk-import-runner/src/main/java/sleeper/bulkimport/job/runner/dataframe/FileWritingIterator.java
index 96e8267253..40f55b0bbd 100644
--- a/java/bulk-import/bulk-import-runner/src/main/java/sleeper/bulkimport/job/runner/dataframe/FileWritingIterator.java
+++ b/java/bulk-import/bulk-import-runner/src/main/java/sleeper/bulkimport/job/runner/dataframe/FileWritingIterator.java
@@ -15,8 +15,6 @@
*/
package sleeper.bulkimport.job.runner.dataframe;
-import com.facebook.collections.ByteArray;
-import org.apache.datasketches.quantiles.ItemsSketch;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.hadoop.ParquetWriter;
@@ -31,7 +29,6 @@
import sleeper.core.record.Record;
import sleeper.core.schema.Field;
import sleeper.core.schema.Schema;
-import sleeper.core.schema.type.ByteArrayType;
import sleeper.core.schema.type.ListType;
import sleeper.core.schema.type.MapType;
import sleeper.core.util.LoggedDuration;
@@ -41,11 +38,8 @@
import java.io.IOException;
import java.time.Instant;
-import java.util.Comparator;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
-import java.util.Map;
import java.util.UUID;
import java.util.function.Supplier;
@@ -61,7 +55,7 @@ public class FileWritingIterator implements Iterator {
private final Supplier outputFilenameSupplier;
private String currentPartitionId;
private ParquetWriter parquetWriter;
- private Map sketches;
+ private Sketches sketches;
private String path;
private long numRecords;
private boolean hasMore = false;
@@ -141,7 +135,7 @@ private void write(Row row) throws IOException {
if (numRecords % 1_000_000L == 0) {
LOGGER.info("Wrote {} records", numRecords);
}
- updateQuantilesSketch(record, sketches, schema.getRowKeyFields());
+ sketches.update(schema, record);
}
private void initialiseState(String partitionId) throws IOException {
@@ -149,7 +143,7 @@ private void initialiseState(String partitionId) throws IOException {
// Create writer;
parquetWriter = createWriter(partitionId);
// Initialise sketches
- sketches = getSketches(schema.getRowKeyFields());
+ sketches = Sketches.from(schema);
}
private void writeFiles() throws IOException {
@@ -158,7 +152,7 @@ private void writeFiles() throws IOException {
return;
}
parquetWriter.close();
- new SketchesSerDeToS3(schema).saveToHadoopFS(new Path(path.replace(".parquet", ".sketches")), new Sketches(sketches), conf);
+ new SketchesSerDeToS3(schema).saveToHadoopFS(new Path(path.replace(".parquet", ".sketches")), sketches, conf);
LoggedDuration duration = LoggedDuration.withFullOutput(startTime, Instant.now());
double rate = numRecords / (double) duration.getSeconds();
LOGGER.info("Overall written {} records in {} (rate was {} per second)",
@@ -181,30 +175,6 @@ private Record getRecord(Row row) {
return record;
}
- // TODO These methods are copies of the same ones in IngestRecordsFromIterator -
- // move to sketches module
- private Map getSketches(List rowKeyFields) {
- Map keyFieldToSketch = new HashMap<>();
- for (Field rowKeyField : rowKeyFields) {
- ItemsSketch> sketch = ItemsSketch.getInstance(1024, Comparator.naturalOrder());
- keyFieldToSketch.put(rowKeyField.getName(), sketch);
- }
- return keyFieldToSketch;
- }
-
- private void updateQuantilesSketch(
- Record record, Map keyFieldToSketch, List rowKeyFields) {
- for (Field rowKeyField : rowKeyFields) {
- if (rowKeyField.getType() instanceof ByteArrayType) {
- byte[] value = (byte[]) record.get(rowKeyField.getName());
- keyFieldToSketch.get(rowKeyField.getName()).update(ByteArray.wrap(value));
- } else {
- Object value = record.get(rowKeyField.getName());
- keyFieldToSketch.get(rowKeyField.getName()).update(value);
- }
- }
- }
-
private ParquetWriter createWriter(String partitionId) throws IOException {
numRecords = 0L;
path = TableFilePaths.buildDataFilePathPrefix(instanceProperties, tableProperties)
diff --git a/java/bulk-import/bulk-import-runner/src/main/java/sleeper/bulkimport/job/runner/rdd/SingleFileWritingIterator.java b/java/bulk-import/bulk-import-runner/src/main/java/sleeper/bulkimport/job/runner/rdd/SingleFileWritingIterator.java
index 37f96227c6..624c240133 100644
--- a/java/bulk-import/bulk-import-runner/src/main/java/sleeper/bulkimport/job/runner/rdd/SingleFileWritingIterator.java
+++ b/java/bulk-import/bulk-import-runner/src/main/java/sleeper/bulkimport/job/runner/rdd/SingleFileWritingIterator.java
@@ -15,8 +15,6 @@
*/
package sleeper.bulkimport.job.runner.rdd;
-import com.facebook.collections.ByteArray;
-import org.apache.datasketches.quantiles.ItemsSketch;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.hadoop.ParquetWriter;
@@ -34,7 +32,6 @@
import sleeper.core.record.Record;
import sleeper.core.schema.Field;
import sleeper.core.schema.Schema;
-import sleeper.core.schema.type.ByteArrayType;
import sleeper.core.schema.type.ListType;
import sleeper.core.schema.type.MapType;
import sleeper.core.util.LoggedDuration;
@@ -44,11 +41,8 @@
import java.io.IOException;
import java.time.Instant;
-import java.util.Comparator;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
-import java.util.Map;
import java.util.UUID;
public class SingleFileWritingIterator implements Iterator {
@@ -62,7 +56,7 @@ public class SingleFileWritingIterator implements Iterator {
private final Configuration conf;
private final PartitionTree partitionTree;
private ParquetWriter parquetWriter;
- private Map sketches;
+ private Sketches sketches;
private String path;
private long numRecords;
private final String outputFilename;
@@ -125,14 +119,14 @@ private void write(Row row) throws IOException {
if (numRecords % 1_000_000L == 0) {
LOGGER.info("Wrote {} records", numRecords);
}
- updateQuantilesSketch(record, sketches, schema.getRowKeyFields());
+ sketches.update(schema, record);
}
private void initialiseState(String partitionId) throws IOException {
// Create writer
parquetWriter = createWriter(partitionId);
// Initialise sketches
- sketches = getSketches(schema.getRowKeyFields());
+ sketches = Sketches.from(schema);
}
private void closeFile() throws IOException {
@@ -141,7 +135,7 @@ private void closeFile() throws IOException {
return;
}
parquetWriter.close();
- new SketchesSerDeToS3(schema).saveToHadoopFS(new Path(path.replace(".parquet", ".sketches")), new Sketches(sketches), conf);
+ new SketchesSerDeToS3(schema).saveToHadoopFS(new Path(path.replace(".parquet", ".sketches")), sketches, conf);
LoggedDuration duration = LoggedDuration.withFullOutput(startTime, Instant.now());
double rate = numRecords / (double) duration.getSeconds();
LOGGER.info("Finished writing {} records to file {} in {} (rate was {} per second)",
@@ -164,30 +158,6 @@ private Record getRecord(Row row) {
return record;
}
- // TODO These methods are copies of the same ones in IngestRecordsFromIterator -
- // move to sketches module
- private Map getSketches(List rowKeyFields) {
- Map keyFieldToSketch = new HashMap<>();
- for (Field rowKeyField : rowKeyFields) {
- ItemsSketch> sketch = ItemsSketch.getInstance(1024, Comparator.naturalOrder());
- keyFieldToSketch.put(rowKeyField.getName(), sketch);
- }
- return keyFieldToSketch;
- }
-
- private void updateQuantilesSketch(
- Record record, Map keyFieldToSketch, List rowKeyFields) {
- for (Field rowKeyField : rowKeyFields) {
- if (rowKeyField.getType() instanceof ByteArrayType) {
- byte[] value = (byte[]) record.get(rowKeyField.getName());
- keyFieldToSketch.get(rowKeyField.getName()).update(ByteArray.wrap(value));
- } else {
- Object value = record.get(rowKeyField.getName());
- keyFieldToSketch.get(rowKeyField.getName()).update(value);
- }
- }
- }
-
private ParquetWriter createWriter(String partitionId) throws IOException {
numRecords = 0L;
path = TableFilePaths.buildDataFilePathPrefix(instanceProperties, tableProperties)
diff --git a/java/bulk-import/bulk-import-starter/pom.xml b/java/bulk-import/bulk-import-starter/pom.xml
index 3eb3cbfc23..40c11b79b5 100644
--- a/java/bulk-import/bulk-import-starter/pom.xml
+++ b/java/bulk-import/bulk-import-starter/pom.xml
@@ -45,12 +45,10 @@
software.amazon.awssdk
emr
- ${aws-java-sdk-v2.version}
software.amazon.awssdk
emrserverless
- ${aws-java-sdk-v2.version}
diff --git a/java/cdk-custom-resources/pom.xml b/java/cdk-custom-resources/pom.xml
index 888d80e263..6eed49128c 100644
--- a/java/cdk-custom-resources/pom.xml
+++ b/java/cdk-custom-resources/pom.xml
@@ -32,11 +32,6 @@
configuration
${project.parent.version}
-
- com.amazonaws
- aws-java-sdk-ec2
- ${aws-java-sdk.version}
-
com.amazonaws
aws-lambda-java-core
@@ -47,6 +42,14 @@
aws-lambda-java-events
${aws-lambda-java-events.version}
+
+ software.amazon.awssdk
+ s3
+
+
+ software.amazon.awssdk
+ ec2
+
sleeper
diff --git a/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/AutoDeleteS3ObjectsLambda.java b/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/AutoDeleteS3ObjectsLambda.java
index 7f2765857d..3baaf83ac7 100644
--- a/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/AutoDeleteS3ObjectsLambda.java
+++ b/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/AutoDeleteS3ObjectsLambda.java
@@ -17,31 +17,32 @@
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.events.CloudFormationCustomResourceEvent;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.s3.model.DeleteObjectsRequest;
-import com.amazonaws.services.s3.model.DeleteObjectsResult;
-import com.amazonaws.services.s3.model.ListObjectsV2Request;
-import com.amazonaws.services.s3.model.ListObjectsV2Result;
-import com.amazonaws.services.s3.model.S3ObjectSummary;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.DeleteMarkerEntry;
+import software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse;
+import software.amazon.awssdk.services.s3.model.NoSuchBucketException;
+import software.amazon.awssdk.services.s3.model.ObjectIdentifier;
+import software.amazon.awssdk.services.s3.model.ObjectVersion;
-import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
public class AutoDeleteS3ObjectsLambda {
public static final Logger LOGGER = LoggerFactory.getLogger(AutoDeleteS3ObjectsLambda.class);
- private final AmazonS3 s3Client;
+ private final S3Client s3Client;
private final int batchSize;
public AutoDeleteS3ObjectsLambda() {
- this(AmazonS3ClientBuilder.defaultClient(), 100);
+ this(S3Client.create(), 1000);
}
- public AutoDeleteS3ObjectsLambda(AmazonS3 s3Client, int batchSize) {
+ public AutoDeleteS3ObjectsLambda(S3Client s3Client, int batchSize) {
this.s3Client = s3Client;
this.batchSize = batchSize;
}
@@ -55,46 +56,52 @@ public void handleEvent(CloudFormationCustomResourceEvent event, Context context
case "Update":
break;
case "Delete":
- deleteAllObjectsInBucket(bucketName);
+ emptyBucket(bucketName);
break;
default:
throw new IllegalArgumentException("Invalid request type: " + event.getRequestType());
}
}
- private void deleteAllObjectsInBucket(String bucketName) {
- List objectKeysForDeletion = new ArrayList<>();
- ListObjectsV2Request req = new ListObjectsV2Request()
- .withBucketName(bucketName)
- .withMaxKeys(batchSize);
- ListObjectsV2Result result;
+ private void emptyBucket(String bucketName) {
+ try {
+ LOGGER.info("Emptying bucket {}", bucketName);
+ s3Client.listObjectVersionsPaginator(builder -> builder.bucket(bucketName).maxKeys(batchSize))
+ .stream().parallel()
+ .forEach(response -> {
+ deleteVersions(bucketName, response);
+ deleteMarkers(bucketName, response);
+ });
+ } catch (NoSuchBucketException e) {
+ LOGGER.info("Bucket not found: {}", bucketName);
+ }
+ }
+
+ private void deleteVersions(String bucketName, ListObjectVersionsResponse response) {
+ if (!response.versions().isEmpty()) {
+ LOGGER.info("Deleting {} versions", response.versions().size());
+ s3Client.deleteObjects(builder -> builder.bucket(bucketName)
+ .delete(deleteBuilder -> deleteBuilder
+ .objects(objectIdentifiers(response.versions(), ObjectVersion::key, ObjectVersion::versionId))));
+ }
- LOGGER.info("Deleting all objects in the bucket {}", bucketName);
- int totalObjectsDeleted = 0;
- do {
- objectKeysForDeletion.clear();
- result = s3Client.listObjectsV2(req);
- for (S3ObjectSummary objectSummary : result.getObjectSummaries()) {
- objectKeysForDeletion.add(objectSummary.getKey());
- }
- String token = result.getNextContinuationToken();
- req.setContinuationToken(token);
- totalObjectsDeleted += deleteObjects(s3Client, bucketName, objectKeysForDeletion);
- } while (result.isTruncated());
- LOGGER.info("A total of {} objects were deleted", totalObjectsDeleted);
}
- private static int deleteObjects(AmazonS3 s3Client, String bucketName, List keys) {
- int successfulDeletes = 0;
- if (!keys.isEmpty()) {
- DeleteObjectsRequest multiObjectDeleteRequest = new DeleteObjectsRequest(bucketName)
- .withKeys(keys.toArray(new String[0]))
- .withQuiet(false);
- DeleteObjectsResult delObjRes = s3Client.deleteObjects(multiObjectDeleteRequest);
- successfulDeletes = delObjRes.getDeletedObjects().size();
- LOGGER.info("{} objects successfully deleted from S3 bucket: {}", successfulDeletes, bucketName);
+ private void deleteMarkers(String bucketName, ListObjectVersionsResponse response) {
+ if (!response.deleteMarkers().isEmpty()) {
+ LOGGER.info("Deleting {} delete markers", response.deleteMarkers().size());
+ s3Client.deleteObjects(builder -> builder.bucket(bucketName)
+ .delete(deleteBuilder -> deleteBuilder
+ .objects(objectIdentifiers(response.deleteMarkers(), DeleteMarkerEntry::key, DeleteMarkerEntry::versionId))));
}
- return successfulDeletes;
}
+ private static Collection objectIdentifiers(
+ List versions, Function getKey, Function getVersionId) {
+ return versions.stream()
+ .map(version -> ObjectIdentifier.builder()
+ .key(getKey.apply(version))
+ .versionId(getVersionId.apply(version)).build())
+ .collect(Collectors.toList());
+ }
}
diff --git a/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/PropertiesWriterLambda.java b/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/PropertiesWriterLambda.java
index 6806374ec6..9f084c968d 100644
--- a/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/PropertiesWriterLambda.java
+++ b/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/PropertiesWriterLambda.java
@@ -17,10 +17,10 @@
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.events.CloudFormationCustomResourceEvent;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.services.s3.S3Client;
import sleeper.configuration.properties.S3InstanceProperties;
@@ -35,14 +35,14 @@
*/
public class PropertiesWriterLambda {
private static final Logger LOGGER = LoggerFactory.getLogger(PropertiesWriterLambda.class);
- private final AmazonS3 s3Client;
+ private final S3Client s3Client;
private final String bucketName;
public PropertiesWriterLambda() {
- this(AmazonS3ClientBuilder.defaultClient(), System.getenv(CONFIG_BUCKET.toEnvironmentVariable()));
+ this(S3Client.create(), System.getenv(CONFIG_BUCKET.toEnvironmentVariable()));
}
- public PropertiesWriterLambda(AmazonS3 s3Client, String bucketName) {
+ public PropertiesWriterLambda(S3Client s3Client, String bucketName) {
this.s3Client = s3Client;
this.bucketName = bucketName;
}
@@ -65,13 +65,14 @@ public void handleEvent(CloudFormationCustomResourceEvent event, Context context
private void deleteProperties(String propertiesStr) throws IOException {
String bucketName = readBucketName(propertiesStr);
LOGGER.info("Deleting from bucket {}", bucketName);
- s3Client.deleteObject(bucketName, S3InstanceProperties.S3_INSTANCE_PROPERTIES_FILE);
+ s3Client.deleteObject(builder -> builder.bucket(bucketName).key(S3InstanceProperties.S3_INSTANCE_PROPERTIES_FILE));
}
private void updateProperties(String propertiesStr) throws IOException {
String bucketName = readBucketName(propertiesStr);
LOGGER.info("Writing to bucket {}", bucketName);
- s3Client.putObject(bucketName, S3InstanceProperties.S3_INSTANCE_PROPERTIES_FILE, propertiesStr);
+ s3Client.putObject(builder -> builder.bucket(bucketName).key(S3InstanceProperties.S3_INSTANCE_PROPERTIES_FILE),
+ RequestBody.fromString(propertiesStr));
}
private String readBucketName(String propertiesStr) throws IOException {
diff --git a/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/VpcCheckLambda.java b/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/VpcCheckLambda.java
index 09bd6693a4..e462e9e4e4 100644
--- a/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/VpcCheckLambda.java
+++ b/java/cdk-custom-resources/src/main/java/sleeper/cdk/custom/VpcCheckLambda.java
@@ -15,27 +15,23 @@
*/
package sleeper.cdk.custom;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.AmazonEC2ClientBuilder;
-import com.amazonaws.services.ec2.model.DescribeVpcEndpointsRequest;
-import com.amazonaws.services.ec2.model.DescribeVpcEndpointsResult;
-import com.amazonaws.services.ec2.model.Filter;
-import com.amazonaws.services.ec2.model.VpcEndpoint;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.events.CloudFormationCustomResourceEvent;
-import com.google.common.collect.Lists;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.ec2.model.Filter;
+import software.amazon.awssdk.services.ec2.model.VpcEndpoint;
import java.util.List;
import java.util.Map;
public class VpcCheckLambda {
- private final AmazonEC2 vpcClient;
+ private final Ec2Client vpcClient;
public VpcCheckLambda() {
- this(AmazonEC2ClientBuilder.defaultClient());
+ this(Ec2Client.create());
}
- public VpcCheckLambda(AmazonEC2 vpcClient) {
+ public VpcCheckLambda(Ec2Client vpcClient) {
this.vpcClient = vpcClient;
}
@@ -57,10 +53,10 @@ public void handleEvent(CloudFormationCustomResourceEvent event, Context context
}
private void validateVpc(String vpcId, String region) {
- DescribeVpcEndpointsResult s3Endpoints = vpcClient.describeVpcEndpoints(new DescribeVpcEndpointsRequest()
- .withFilters(new Filter("vpc-id", Lists.newArrayList(vpcId)),
- new Filter("service-name", Lists.newArrayList("com.amazonaws." + region + ".s3"))));
- List vpcEndpoints = s3Endpoints.getVpcEndpoints();
+ List vpcEndpoints = vpcClient.describeVpcEndpoints(builder -> builder
+ .filters(Filter.builder().name("vpc-id").values(vpcId).build(),
+ Filter.builder().name("service-name").values("com.amazonaws." + region + ".s3").build()))
+ .vpcEndpoints();
if (vpcEndpoints.size() != 1) {
throw new IllegalArgumentException("The S3 endpoint for the requested VPC for this deployment is missing. This can mean very high cost "
diff --git a/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/AutoDeleteS3ObjectsLambdaIT.java b/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/AutoDeleteS3ObjectsLambdaIT.java
index a16099d8a6..8eb73f24ad 100644
--- a/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/AutoDeleteS3ObjectsLambdaIT.java
+++ b/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/AutoDeleteS3ObjectsLambdaIT.java
@@ -16,7 +16,6 @@
package sleeper.cdk.custom;
import com.amazonaws.services.lambda.runtime.events.CloudFormationCustomResourceEvent;
-import com.amazonaws.services.s3.model.S3ObjectSummary;
import org.junit.jupiter.api.Test;
import java.util.Map;
@@ -30,60 +29,58 @@ public class AutoDeleteS3ObjectsLambdaIT extends LocalStackTestBase {
void shouldDeleteObjectOnDelete() {
// Given
String bucketName = UUID.randomUUID().toString();
- s3Client.createBucket(bucketName);
- s3Client.putObject(bucketName, "test.txt", "some content");
+ createBucket(bucketName);
+ putObject(bucketName, "test.txt", "some content");
// When
lambda().handleEvent(deleteEventForBucket(bucketName), null);
// Then
- assertThat(s3Client.listObjectsV2(bucketName).getObjectSummaries()).isEmpty();
+ assertThat(listObjectKeys(bucketName)).isEmpty();
}
@Test
void shouldDeleteMoreObjectsThanBatchSizeOnDelete() {
// Given
String bucketName = UUID.randomUUID().toString();
- s3Client.createBucket(bucketName);
- s3Client.putObject(bucketName, "test1.txt", "some content");
- s3Client.putObject(bucketName, "test2.txt", "other content");
- s3Client.putObject(bucketName, "test3.txt", "more content");
+ createBucket(bucketName);
+ putObject(bucketName, "test1.txt", "some content");
+ putObject(bucketName, "test2.txt", "other content");
+ putObject(bucketName, "test3.txt", "more content");
int batchSize = 2;
// When
lambdaWithBatchSize(batchSize).handleEvent(deleteEventForBucket(bucketName), null);
// Then
- assertThat(s3Client.listObjectsV2(bucketName).getObjectSummaries()).isEmpty();
+ assertThat(listObjectKeys(bucketName)).isEmpty();
}
@Test
void shouldDeleteNoObjectsOnDelete() {
// Given
String bucketName = UUID.randomUUID().toString();
- s3Client.createBucket(bucketName);
+ createBucket(bucketName);
// When
lambda().handleEvent(deleteEventForBucket(bucketName), null);
// Then
- assertThat(s3Client.listObjectsV2(bucketName).getObjectSummaries()).isEmpty();
+ assertThat(listObjectKeys(bucketName)).isEmpty();
}
@Test
void shouldDoNothingOnCreate() {
// Given
String bucketName = UUID.randomUUID().toString();
- s3Client.createBucket(bucketName);
- s3Client.putObject(bucketName, "test.txt", "some content");
+ createBucket(bucketName);
+ putObject(bucketName, "test.txt", "some content");
// When
lambda().handleEvent(createEventForBucket(bucketName), null);
// Then
- assertThat(s3Client.listObjectsV2(bucketName).getObjectSummaries())
- .extracting(S3ObjectSummary::getKey)
- .containsExactly("test.txt");
+ assertThat(listObjectKeys(bucketName)).containsExactly("test.txt");
}
private CloudFormationCustomResourceEvent createEventForBucket(String bucketName) {
diff --git a/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/LocalStackTestBase.java b/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/LocalStackTestBase.java
index 3d84956438..d354538824 100644
--- a/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/LocalStackTestBase.java
+++ b/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/LocalStackTestBase.java
@@ -22,9 +22,19 @@
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import org.testcontainers.utility.DockerImageName;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.S3Object;
import sleeper.core.CommonTestConstants;
+import java.util.List;
+
+import static java.util.stream.Collectors.toUnmodifiableList;
import static sleeper.configuration.testutils.LocalStackAwsV1ClientHelper.buildAwsV1Client;
@Testcontainers
@@ -34,10 +44,35 @@ public abstract class LocalStackTestBase {
public static LocalStackContainer localStackContainer = new LocalStackContainer(DockerImageName.parse(CommonTestConstants.LOCALSTACK_DOCKER_IMAGE))
.withServices(LocalStackContainer.Service.S3);
- protected final AmazonS3 s3Client = buildAwsV1Client(localStackContainer, LocalStackContainer.Service.S3, AmazonS3ClientBuilder.standard());
+ protected final S3Client s3Client = buildAwsV2Client(localStackContainer, LocalStackContainer.Service.S3, S3Client.builder());
+ protected final AmazonS3 s3ClientV1 = buildAwsV1Client(localStackContainer, LocalStackContainer.Service.S3, AmazonS3ClientBuilder.standard());
@AfterEach
void tearDownLocalStackBase() {
- s3Client.shutdown();
+ s3Client.close();
+ }
+
+ private static , T> T buildAwsV2Client(LocalStackContainer localStackContainer, LocalStackContainer.Service service, B builder) {
+ return builder
+ .endpointOverride(localStackContainer.getEndpointOverride(service))
+ .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(
+ localStackContainer.getAccessKey(), localStackContainer.getSecretKey())))
+ .region(Region.of(localStackContainer.getRegion()))
+ .build();
+ }
+
+ protected void createBucket(String bucketName) {
+ s3Client.createBucket(builder -> builder.bucket(bucketName));
+ }
+
+ protected void putObject(String bucketName, String key, String content) {
+ s3Client.putObject(builder -> builder.bucket(bucketName).key(key),
+ RequestBody.fromString(content));
+ }
+
+ protected List listObjectKeys(String bucketName) {
+ return s3Client.listObjectsV2Paginator(builder -> builder.bucket(bucketName))
+ .contents().stream().map(S3Object::key)
+ .collect(toUnmodifiableList());
}
}
diff --git a/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/PropertiesWriterLambdaIT.java b/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/PropertiesWriterLambdaIT.java
index 13c279821c..81df8afd52 100644
--- a/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/PropertiesWriterLambdaIT.java
+++ b/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/PropertiesWriterLambdaIT.java
@@ -54,7 +54,7 @@ private InstanceProperties createDefaultProperties(String account, String bucket
public void shouldUpdateS3BucketOnCreate() throws IOException {
// Given
String bucketName = UUID.randomUUID().toString();
- s3Client.createBucket(bucketName);
+ createBucket(bucketName);
PropertiesWriterLambda propertiesWriterLambda = new PropertiesWriterLambda(s3Client, bucketName);
// When
@@ -71,7 +71,7 @@ public void shouldUpdateS3BucketOnCreate() throws IOException {
propertiesWriterLambda.handleEvent(event, null);
// Then
- InstanceProperties loadedProperties = S3InstanceProperties.loadFromBucket(s3Client, bucketName);
+ InstanceProperties loadedProperties = S3InstanceProperties.loadFromBucket(s3ClientV1, bucketName);
assertThat(loadedProperties.get(ACCOUNT)).isEqualTo("foo");
}
@@ -80,10 +80,10 @@ public void shouldUpdateS3BucketOnCreate() throws IOException {
public void shouldUpdateS3BucketOnUpdate() throws IOException {
// Given
String bucketName = UUID.randomUUID().toString();
- s3Client.createBucket(bucketName);
+ createBucket(bucketName);
PropertiesWriterLambda propertiesWriterLambda = new PropertiesWriterLambda(s3Client, bucketName);
- s3Client.putObject(bucketName, S3InstanceProperties.S3_INSTANCE_PROPERTIES_FILE, "foo");
+ putObject(bucketName, S3InstanceProperties.S3_INSTANCE_PROPERTIES_FILE, "foo");
// When
InstanceProperties instanceProperties = createDefaultProperties("bar", bucketName);
@@ -99,7 +99,7 @@ public void shouldUpdateS3BucketOnUpdate() throws IOException {
propertiesWriterLambda.handleEvent(event, null);
// Then
- InstanceProperties loadedProperties = S3InstanceProperties.loadFromBucket(s3Client, bucketName);
+ InstanceProperties loadedProperties = S3InstanceProperties.loadFromBucket(s3ClientV1, bucketName);
assertThat(loadedProperties.get(ACCOUNT)).isEqualTo("bar");
}
@@ -107,11 +107,11 @@ public void shouldUpdateS3BucketOnUpdate() throws IOException {
public void shouldUpdateS3BucketAccordingToProperties() throws IOException {
// Given
String bucketName = UUID.randomUUID().toString();
- s3Client.createBucket(bucketName);
+ createBucket(bucketName);
PropertiesWriterLambda propertiesWriterLambda = new PropertiesWriterLambda(s3Client, bucketName);
String alternativeBucket = bucketName + "-alternative";
- s3Client.createBucket(alternativeBucket);
+ createBucket(alternativeBucket);
// When
InstanceProperties instanceProperties = createDefaultProperties("foo", alternativeBucket);
@@ -127,7 +127,7 @@ public void shouldUpdateS3BucketAccordingToProperties() throws IOException {
propertiesWriterLambda.handleEvent(event, null);
// Then
- InstanceProperties loadedProperties = S3InstanceProperties.loadFromBucket(s3Client, alternativeBucket);
+ InstanceProperties loadedProperties = S3InstanceProperties.loadFromBucket(s3ClientV1, alternativeBucket);
assertThat(loadedProperties.get(ACCOUNT)).isEqualTo("foo");
}
@@ -135,8 +135,8 @@ public void shouldUpdateS3BucketAccordingToProperties() throws IOException {
public void shouldDeleteConfigObjectWhenCalledWithDeleteRequest() throws IOException {
// Given
String bucketName = UUID.randomUUID().toString();
- s3Client.createBucket(bucketName);
- s3Client.putObject(bucketName, S3InstanceProperties.S3_INSTANCE_PROPERTIES_FILE, "foo");
+ createBucket(bucketName);
+ putObject(bucketName, S3InstanceProperties.S3_INSTANCE_PROPERTIES_FILE, "foo");
// When
InstanceProperties instanceProperties = createDefaultProperties("foo", bucketName);
@@ -153,6 +153,6 @@ public void shouldDeleteConfigObjectWhenCalledWithDeleteRequest() throws IOExcep
lambda.handleEvent(event, null);
// Then
- assertThat(s3Client.listObjects(bucketName).getObjectSummaries()).isEmpty();
+ assertThat(listObjectKeys(bucketName)).isEmpty();
}
}
diff --git a/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/WiremockTestHelper.java b/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/WiremockTestHelper.java
index 837d804849..0005db5950 100644
--- a/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/WiremockTestHelper.java
+++ b/java/cdk-custom-resources/src/test/java/sleeper/cdk/custom/WiremockTestHelper.java
@@ -15,15 +15,13 @@
*/
package sleeper.cdk.custom;
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.auth.AWSStaticCredentialsProvider;
-import com.amazonaws.auth.BasicAWSCredentials;
-import com.amazonaws.client.builder.AwsClientBuilder;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.AmazonEC2ClientBuilder;
import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.ec2.Ec2Client;
-import static com.amazonaws.regions.Regions.DEFAULT_REGION;
+import java.net.URI;
public class WiremockTestHelper {
@@ -33,18 +31,16 @@ public class WiremockTestHelper {
private WiremockTestHelper() {
}
- public static AmazonEC2 wiremockEc2Client(WireMockRuntimeInfo runtimeInfo) {
- return AmazonEC2ClientBuilder.standard()
- .withEndpointConfiguration(wiremockEndpointConfiguration(runtimeInfo))
- .withCredentials(wiremockCredentialsProvider())
- .build();
- }
-
- public static AwsClientBuilder.EndpointConfiguration wiremockEndpointConfiguration(WireMockRuntimeInfo runtimeInfo) {
- return new AwsClientBuilder.EndpointConfiguration(runtimeInfo.getHttpBaseUrl(), DEFAULT_REGION.getName());
+ public static Ec2Client wiremockEc2Client(WireMockRuntimeInfo runtimeInfo) {
+ return wiremockAwsV2Client(runtimeInfo, Ec2Client.builder());
}
- public static AWSCredentialsProvider wiremockCredentialsProvider() {
- return new AWSStaticCredentialsProvider(new BasicAWSCredentials(WIREMOCK_ACCESS_KEY, WIREMOCK_SECRET_KEY));
+ public static , T> T wiremockAwsV2Client(WireMockRuntimeInfo runtimeInfo, B builder) {
+ return builder
+ .endpointOverride(URI.create(runtimeInfo.getHttpBaseUrl()))
+ .region(Region.AWS_GLOBAL)
+ .credentialsProvider(StaticCredentialsProvider.create(
+ AwsBasicCredentials.create(WIREMOCK_ACCESS_KEY, WIREMOCK_SECRET_KEY)))
+ .build();
}
}
diff --git a/java/cdk-environment/pom.xml b/java/cdk-environment/pom.xml
index 1285326f55..9cc35c89c8 100644
--- a/java/cdk-environment/pom.xml
+++ b/java/cdk-environment/pom.xml
@@ -39,7 +39,6 @@
software.amazon.awssdk
cloudformation
- ${aws-java-sdk-v2.version}
@@ -54,6 +53,10 @@
com.google.code.gson
gson
+
+ com.github.spotbugs
+ spotbugs-annotations
+
com.github.tomakehurst
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/SleeperEnvironmentCdkApp.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/SleeperEnvironmentCdkApp.java
index 4a3896f515..a8f850ca57 100644
--- a/java/cdk-environment/src/main/java/sleeper/environment/cdk/SleeperEnvironmentCdkApp.java
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/SleeperEnvironmentCdkApp.java
@@ -18,12 +18,21 @@
import software.amazon.awscdk.App;
import software.amazon.awscdk.AppProps;
import software.amazon.awscdk.Environment;
+import software.amazon.awscdk.Stack;
import software.amazon.awscdk.StackProps;
+import software.amazon.awscdk.services.events.IRule;
-import sleeper.environment.cdk.buildec2.BuildEC2Stack;
+import sleeper.environment.cdk.buildec2.BuildEC2Deployment;
+import sleeper.environment.cdk.builduptime.AutoShutdownSchedule;
+import sleeper.environment.cdk.builduptime.BuildUptimeDeployment;
import sleeper.environment.cdk.config.AppContext;
-import sleeper.environment.cdk.networking.NetworkingStack;
+import sleeper.environment.cdk.networking.NetworkingDeployment;
+import sleeper.environment.cdk.nightlytests.NightlyTestDeployment;
+import java.util.List;
+
+import static sleeper.environment.cdk.config.AppParameters.BUILD_UPTIME_LAMBDA_JAR;
+import static sleeper.environment.cdk.config.AppParameters.DEPLOY_EC2;
import static sleeper.environment.cdk.config.AppParameters.INSTANCE_ID;
/**
@@ -42,12 +51,20 @@ public static void main(String[] args) {
.account(System.getenv("CDK_DEFAULT_ACCOUNT"))
.region(System.getenv("CDK_DEFAULT_REGION"))
.build();
- String instanceId = AppContext.of(app).get(INSTANCE_ID);
- NetworkingStack networking = new NetworkingStack(app,
- StackProps.builder().stackName(instanceId + "-Networking").env(environment).build());
- new BuildEC2Stack(app,
- StackProps.builder().stackName(instanceId + "-BuildEC2").env(environment).build(),
- networking.getVpc());
+ AppContext context = AppContext.of(app);
+ String instanceId = context.get(INSTANCE_ID);
+ Stack stack = new Stack(app, "SleeperEnvironment", StackProps.builder().stackName(instanceId + "-SleeperEnvironment").env(environment).build());
+ NightlyTestDeployment nightlyTests = new NightlyTestDeployment(stack);
+ NetworkingDeployment networking = new NetworkingDeployment(stack);
+ BuildEC2Deployment buildEc2 = null;
+ if (context.get(DEPLOY_EC2)) {
+ buildEc2 = new BuildEC2Deployment(stack, networking.getVpc(), nightlyTests);
+ }
+ if (context.get(BUILD_UPTIME_LAMBDA_JAR).isPresent()) {
+ BuildUptimeDeployment buildUptime = new BuildUptimeDeployment(stack);
+ List autoStopRules = nightlyTests.automateUptimeGetAutoStopRules(buildEc2, buildUptime);
+ AutoShutdownSchedule.create(stack, buildUptime, buildEc2, autoStopRules);
+ }
app.synth();
}
}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Stack.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Deployment.java
similarity index 77%
rename from java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Stack.java
rename to java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Deployment.java
index a7121a2f8b..27bf8ef7a1 100644
--- a/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Stack.java
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Deployment.java
@@ -16,8 +16,7 @@
package sleeper.environment.cdk.buildec2;
import software.amazon.awscdk.CfnOutput;
-import software.amazon.awscdk.Stack;
-import software.amazon.awscdk.StackProps;
+import software.amazon.awscdk.services.ec2.IInstance;
import software.amazon.awscdk.services.ec2.IVpc;
import software.amazon.awscdk.services.ec2.Instance;
import software.amazon.awscdk.services.ec2.InstanceClass;
@@ -27,8 +26,6 @@
import software.amazon.awscdk.services.ec2.SubnetSelection;
import software.amazon.awscdk.services.ec2.SubnetType;
import software.amazon.awscdk.services.ec2.UserData;
-import software.amazon.awscdk.services.ec2.Vpc;
-import software.amazon.awscdk.services.ec2.VpcLookupOptions;
import software.amazon.awscdk.services.iam.AccountRootPrincipal;
import software.amazon.awscdk.services.iam.Effect;
import software.amazon.awscdk.services.iam.ManagedPolicy;
@@ -37,28 +34,29 @@
import software.constructs.Construct;
import sleeper.environment.cdk.config.AppContext;
+import sleeper.environment.cdk.nightlytests.NightlyTestDeployment;
import java.util.Collections;
import java.util.List;
-import static sleeper.environment.cdk.config.AppParameters.VPC_ID;
-
-public class BuildEC2Stack extends Stack {
+public class BuildEC2Deployment {
private final IVpc vpc;
-
- public BuildEC2Stack(Construct scope, StackProps props, IVpc inheritVpc) {
- super(scope, props.getStackName(), props);
- AppContext context = AppContext.of(this);
- BuildEC2Parameters params = BuildEC2Parameters.from(context);
- vpc = context.get(VPC_ID)
- .map(vpcId -> Vpc.fromLookup(this, "Vpc", VpcLookupOptions.builder().vpcId(vpcId).build()))
- .orElse(inheritVpc);
+ private final Instance instance;
+
+ public BuildEC2Deployment(Construct scope, IVpc vpc, NightlyTestDeployment nightlyTests) {
+ AppContext context = AppContext.of(scope);
+ BuildEC2Parameters params = BuildEC2Parameters.builder()
+ .context(context)
+ .testBucket(nightlyTests.getTestBucketName())
+ .inheritVpc(vpc)
+ .build();
+ this.vpc = vpc;
BuildEC2Image image = params.image();
- Instance instance = Instance.Builder.create(this, "EC2")
+ instance = Instance.Builder.create(scope, "BuildEC2")
.vpc(vpc)
- .securityGroup(createSecurityGroup())
+ .securityGroup(createSecurityGroup(scope))
.machineImage(image.machineImage())
.instanceType(InstanceType.of(InstanceClass.T3, InstanceSize.LARGE))
.vpcSubnets(SubnetSelection.builder().subnetType(SubnetType.PRIVATE_WITH_EGRESS).build())
@@ -68,17 +66,17 @@ public BuildEC2Stack(Construct scope, StackProps props, IVpc inheritVpc) {
.build();
instance.getRole().addManagedPolicy(ManagedPolicy.fromAwsManagedPolicyName("AdministratorAccess"));
- Role restrictedRole = createRestrictedRole();
+ Role restrictedRole = createRestrictedRole(scope);
- CfnOutput.Builder.create(this, "LoginUser")
+ CfnOutput.Builder.create(scope, "BuildEC2LoginUser")
.value(image.loginUser())
.description("User to SSH into on build EC2 instance")
.build();
- CfnOutput.Builder.create(this, "InstanceId")
+ CfnOutput.Builder.create(scope, "BuildEC2Id")
.value(instance.getInstanceId())
.description("ID of the build EC2 instance")
.build();
- CfnOutput.Builder.create(this, "RestrictedRoleArn")
+ CfnOutput.Builder.create(scope, "BuildEC2RestrictedRoleArn")
.value(restrictedRole.getRoleArn())
.description("Role with restricted access to deploy Sleeper instances. " +
"This can be assumed to test deploying a Sleeper instance with fewer permissions. " +
@@ -86,12 +84,12 @@ public BuildEC2Stack(Construct scope, StackProps props, IVpc inheritVpc) {
.build();
}
- private Role createRestrictedRole() {
+ private Role createRestrictedRole(Construct scope) {
- Role role = Role.Builder.create(this, "RestrictedRole")
+ Role role = Role.Builder.create(scope, "RestrictedRole")
.assumedBy(new AccountRootPrincipal())
.build();
- ManagedPolicy policy = new ManagedPolicy(this, "BuildEC2Policy");
+ ManagedPolicy policy = new ManagedPolicy(scope, "BuildEC2Policy");
// Allow running CDK by assuming roles created by cdk bootstrap
// Allow interacting with Sleeper by assuming admin role
@@ -124,12 +122,16 @@ private Role createRestrictedRole() {
return role;
}
- private SecurityGroup createSecurityGroup() {
- return SecurityGroup.Builder.create(this, "AllowOutbound")
+ private SecurityGroup createSecurityGroup(Construct scope) {
+ return SecurityGroup.Builder.create(scope, "AllowOutbound")
.vpc(vpc)
.description("Allow outbound traffic")
.allowAllOutbound(true)
.build();
}
+ public IInstance getInstance() {
+ return instance;
+ }
+
}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Image.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Image.java
index c86a4c7914..6ed34db104 100644
--- a/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Image.java
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Image.java
@@ -67,7 +67,7 @@ BlockDevice rootBlockDevice() {
return BlockDevice.builder()
.deviceName(rootDeviceName)
.volume(BlockDeviceVolume.ebs(rootVolumeSizeGiB,
- EbsDeviceOptions.builder().volumeType(EbsDeviceVolumeType.GP3).build()))
+ EbsDeviceOptions.builder().volumeType(EbsDeviceVolumeType.GP3).encrypted(true).build()))
.build();
}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Parameters.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Parameters.java
index 86f0175cc6..360aba6620 100644
--- a/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Parameters.java
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/BuildEC2Parameters.java
@@ -15,10 +15,23 @@
*/
package sleeper.environment.cdk.buildec2;
+import software.amazon.awscdk.services.ec2.ISubnet;
+import software.amazon.awscdk.services.ec2.IVpc;
+
import sleeper.environment.cdk.config.AppContext;
import sleeper.environment.cdk.config.AppParameters;
import sleeper.environment.cdk.config.StringParameter;
+import java.util.List;
+import java.util.Objects;
+
+import static java.util.stream.Collectors.toUnmodifiableList;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_BUCKET;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_RUN_ENABLED;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_RUN_HOUR_UTC;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_SUBNETS;
+import static sleeper.environment.cdk.config.AppParameters.VPC_ID;
+
public class BuildEC2Parameters {
public static final StringParameter REPOSITORY = AppParameters.BUILD_REPOSITORY;
@@ -29,27 +42,104 @@ public class BuildEC2Parameters {
private final String fork;
private final String branch;
private final BuildEC2Image image;
+ private final boolean nightlyTestEnabled;
+ private final String testHour;
+ private final String testBucket;
+ private final String vpc;
+ private final String subnets;
- private BuildEC2Parameters(AppContext context) {
+ private BuildEC2Parameters(Builder builder) {
+ AppContext context = Objects.requireNonNull(builder.context, "context must not be null");
repository = context.get(REPOSITORY);
fork = context.get(FORK);
branch = context.get(BRANCH);
image = BuildEC2Image.from(context);
+ nightlyTestEnabled = context.get(NIGHTLY_TEST_RUN_ENABLED);
+ if (nightlyTestEnabled) {
+ testHour = "" + context.get(NIGHTLY_TEST_RUN_HOUR_UTC);
+ testBucket = context.get(NIGHTLY_TEST_BUCKET)
+ .orElseGet(() -> Objects.requireNonNull(builder.testBucket, "testBucket must not be null"));
+ vpc = context.get(VPC_ID).orElseGet(() -> Objects.requireNonNull(builder.inheritVpc, "inheritVpc must not be null"));
+ List subnetsList = context.get(NIGHTLY_TEST_SUBNETS);
+ if (subnetsList.isEmpty()) {
+ subnetsList = Objects.requireNonNull(builder.inheritSubnets, "inheritSubnets must not be null");
+ }
+ subnets = String.join(",", subnetsList);
+ } else {
+ testHour = null;
+ testBucket = null;
+ vpc = null;
+ subnets = null;
+ }
}
static BuildEC2Parameters from(AppContext context) {
- return new BuildEC2Parameters(context);
+ return builder().context(context).build();
+ }
+
+ static Builder builder() {
+ return new Builder();
+ }
+
+ boolean isNightlyTestEnabled() {
+ return nightlyTestEnabled;
}
String fillUserDataTemplate(String template) {
- return template.replace("${repository}", repository)
+ String noNightlyTests = template
+ .replace("${repository}", repository)
.replace("${fork}", fork)
.replace("${branch}", branch)
.replace("${loginUser}", image.loginUser());
+ if (!nightlyTestEnabled) {
+ return noNightlyTests;
+ }
+ return noNightlyTests
+ .replace("${testHour}", testHour)
+ .replace("${testBucket}", testBucket)
+ .replace("${vpc}", vpc)
+ .replace("${subnets}", subnets);
}
BuildEC2Image image() {
return image;
}
+ public static class Builder {
+ private AppContext context;
+ private String testBucket;
+ private String inheritVpc;
+ private List inheritSubnets;
+
+ private Builder() {
+ }
+
+ public Builder context(AppContext context) {
+ this.context = context;
+ return this;
+ }
+
+ public Builder testBucket(String testBucket) {
+ this.testBucket = testBucket;
+ return this;
+ }
+
+ public Builder inheritVpc(String vpc, List subnetIds) {
+ inheritVpc = vpc;
+ inheritSubnets = subnetIds;
+ return this;
+ }
+
+ public Builder inheritVpc(IVpc inheritVpc) {
+ return inheritVpc(inheritVpc.getVpcId(),
+ inheritVpc.getPrivateSubnets().stream()
+ .map(ISubnet::getSubnetId)
+ .collect(toUnmodifiableList()));
+ }
+
+ public BuildEC2Parameters build() {
+ return new BuildEC2Parameters(this);
+ }
+ }
+
}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/LoadUserDataUtil.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/LoadUserDataUtil.java
index 37a3b79c40..bbb4d49d58 100644
--- a/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/LoadUserDataUtil.java
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/buildec2/LoadUserDataUtil.java
@@ -16,6 +16,7 @@
package sleeper.environment.cdk.buildec2;
import org.apache.commons.io.IOUtils;
+import software.amazon.awscdk.Fn;
import java.net.URL;
import java.nio.charset.Charset;
@@ -28,7 +29,29 @@ private LoadUserDataUtil() {
}
static String userData(BuildEC2Parameters params) {
- return params.fillUserDataTemplate(templateString());
+ return params.fillUserDataTemplate(templateString())
+ .replace("%write-files-yaml%", writeFilesYaml(params));
+ }
+
+ static String writeFilesYaml(BuildEC2Parameters params) {
+ if (!params.isNightlyTestEnabled()) {
+ return "";
+ }
+ return resourceString("write-files-nightly-tests.yaml")
+ .replace("${nightlyTestSettingsBase64}",
+ Fn.base64(nightlyTestSettingsJson(params)))
+ .replace("${crontabBase64}",
+ Fn.base64(crontab(params)));
+ }
+
+ static String nightlyTestSettingsJson(BuildEC2Parameters params) {
+ String template = resourceString("nightlyTestSettings.json");
+ return params.fillUserDataTemplate(template);
+ }
+
+ static String crontab(BuildEC2Parameters params) {
+ String template = resourceString("crontab");
+ return params.fillUserDataTemplate(template);
}
private static String templateString() {
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/builduptime/AutoShutdownSchedule.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/builduptime/AutoShutdownSchedule.java
new file mode 100644
index 0000000000..514711fb56
--- /dev/null
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/builduptime/AutoShutdownSchedule.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.builduptime;
+
+import software.amazon.awscdk.services.events.CronOptions;
+import software.amazon.awscdk.services.events.IRule;
+import software.amazon.awscdk.services.events.Rule;
+import software.amazon.awscdk.services.events.RuleTargetInput;
+import software.amazon.awscdk.services.events.Schedule;
+import software.amazon.awscdk.services.events.targets.LambdaFunction;
+import software.constructs.Construct;
+
+import sleeper.environment.cdk.buildec2.BuildEC2Deployment;
+import sleeper.environment.cdk.config.AppContext;
+import sleeper.environment.cdk.config.AppParameters;
+import sleeper.environment.cdk.config.IntParameter;
+import sleeper.environment.cdk.config.StringListParameter;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static java.util.stream.Collectors.toUnmodifiableList;
+import static sleeper.environment.cdk.config.AppParameters.INSTANCE_ID;
+
+public class AutoShutdownSchedule {
+
+ public static final StringListParameter AUTO_SHUTDOWN_EXISTING_EC2_IDS = AppParameters.AUTO_SHUTDOWN_EXISTING_EC2_IDS;
+ public static final IntParameter AUTO_SHUTDOWN_HOUR_UTC = AppParameters.AUTO_SHUTDOWN_HOUR_UTC;
+
+ private AutoShutdownSchedule() {
+ }
+
+ public static void create(Construct scope, BuildUptimeDeployment buildUptime, BuildEC2Deployment buildEc2, List autoStopRules) {
+ AppContext context = AppContext.of(scope);
+
+ List ec2Ids = new ArrayList<>();
+ ec2Ids.addAll(context.get(AUTO_SHUTDOWN_EXISTING_EC2_IDS));
+ if (buildEc2 != null) {
+ ec2Ids.add(buildEc2.getInstance().getInstanceId());
+ }
+
+ List rules = autoStopRules.stream().map(IRule::getRuleName).collect(toUnmodifiableList());
+
+ Rule.Builder.create(scope, "AutoShutdownSchedule")
+ .ruleName("sleeper-" + context.get(INSTANCE_ID) + "-auto-shutdown")
+ .description("Daily invocation to shut down EC2s for the night")
+ .schedule(Schedule.cron(CronOptions.builder()
+ .hour("" + context.get(AUTO_SHUTDOWN_HOUR_UTC))
+ .minute("00")
+ .build()))
+ .targets(List.of(LambdaFunction.Builder.create(buildUptime.getFunction())
+ .event(RuleTargetInput.fromObject(Map.of(
+ "operation", "stop",
+ "ec2Ids", ec2Ids,
+ "rules", rules)))
+ .build()))
+ .build();
+ }
+
+}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/builduptime/BuildUptimeDeployment.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/builduptime/BuildUptimeDeployment.java
new file mode 100644
index 0000000000..9130d8b62a
--- /dev/null
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/builduptime/BuildUptimeDeployment.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.builduptime;
+
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import software.amazon.awscdk.Duration;
+import software.amazon.awscdk.services.iam.PolicyStatement;
+import software.amazon.awscdk.services.lambda.Code;
+import software.amazon.awscdk.services.lambda.Function;
+import software.amazon.awscdk.services.lambda.IFunction;
+import software.constructs.Construct;
+
+import sleeper.environment.cdk.config.AppContext;
+import sleeper.environment.cdk.config.AppParameters;
+import sleeper.environment.cdk.config.OptionalStringParameter;
+
+import java.util.List;
+import java.util.Map;
+
+import static sleeper.environment.cdk.config.AppParameters.INSTANCE_ID;
+import static software.amazon.awscdk.services.lambda.Runtime.JAVA_11;
+
+public class BuildUptimeDeployment {
+ public static final OptionalStringParameter LAMBDA_JAR = AppParameters.BUILD_UPTIME_LAMBDA_JAR;
+
+ private final IFunction function;
+
+ @SuppressFBWarnings("NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE")
+ public BuildUptimeDeployment(Construct scope) {
+ AppContext context = AppContext.of(scope);
+ String lambdaJarPath = context.get(LAMBDA_JAR)
+ .orElseThrow(() -> new IllegalArgumentException("buildUptimeLambdaJar is required for BuildUptimeStack"));
+
+ function = Function.Builder.create(scope, "BuildUptimeFunction")
+ .code(Code.fromAsset(lambdaJarPath))
+ .functionName("sleeper-" + context.get(INSTANCE_ID) + "-build-uptime")
+ .description("Start and stop EC2 instances and schedule rules")
+ .runtime(JAVA_11)
+ .memorySize(1024)
+ .timeout(Duration.minutes(10))
+ .handler("sleeper.build.uptime.lambda.BuildUptimeLambda::handleRequest")
+ .environment(Map.of())
+ .reservedConcurrentExecutions(1)
+ .build().getCurrentVersion();
+
+ function.getRole().addToPrincipalPolicy(PolicyStatement.Builder.create()
+ .resources(List.of("*"))
+ .actions(List.of(
+ "ec2:StartInstances", "ec2:StopInstances",
+ "events:EnableRule", "events:DisableRule"))
+ .build());
+ }
+
+ public IFunction getFunction() {
+ return function;
+ }
+}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/AppContext.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/AppContext.java
index 2d91525661..2ef2c5533a 100644
--- a/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/AppContext.java
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/AppContext.java
@@ -15,10 +15,10 @@
*/
package sleeper.environment.cdk.config;
-import software.amazon.awscdk.App;
-import software.amazon.awscdk.Stack;
+import software.constructs.Construct;
import software.constructs.Node;
+import java.util.List;
import java.util.Optional;
@FunctionalInterface
@@ -30,20 +30,28 @@ default String get(StringParameter string) {
return string.get(this);
}
+ default String get(RequiredStringParameter string) {
+ return string.get(this);
+ }
+
default Optional get(OptionalStringParameter string) {
return string.get(this);
}
- default int get(IntParameter integer) {
- return integer.get(this);
+ default List get(StringListParameter list) {
+ return list.get(this);
}
- static AppContext of(App app) {
- return of(app.getNode());
+ default boolean get(BooleanParameter bool) {
+ return bool.get(this);
+ }
+
+ default int get(IntParameter integer) {
+ return integer.get(this);
}
- static AppContext of(Stack stack) {
- return of(stack.getNode());
+ static AppContext of(Construct construct) {
+ return of(construct.getNode());
}
static AppContext of(Node node) {
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/AppParameters.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/AppParameters.java
index bb09870b4e..6d920050a7 100644
--- a/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/AppParameters.java
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/AppParameters.java
@@ -20,16 +20,25 @@ public class AppParameters {
private AppParameters() {
}
- public static final StringParameter INSTANCE_ID = StringParameter.keyAndDefault("instanceId", "SleeperEnvironment");
+ public static final RequiredStringParameter INSTANCE_ID = RequiredStringParameter.key("instanceId");
public static final OptionalStringParameter VPC_ID = OptionalStringParameter.key("vpcId");
+ public static final BooleanParameter DEPLOY_EC2 = BooleanParameter.keyAndDefault("deployEc2", true);
public static final StringParameter BUILD_REPOSITORY = StringParameter.keyAndDefault("repository", "sleeper");
public static final StringParameter BUILD_FORK = StringParameter.keyAndDefault("fork", "gchq");
public static final StringParameter BUILD_BRANCH = StringParameter.keyAndDefault("branch", "develop");
- public static final StringParameter BUILD_IMAGE_NAME = StringParameter.keyAndDefault("buildImageName", "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*");
+ public static final StringParameter BUILD_IMAGE_NAME = StringParameter.keyAndDefault("buildImageName", "ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*");
public static final StringParameter BUILD_IMAGE_OWNER = StringParameter.keyAndDefault("buildImageOwner", "099720109477");
public static final StringParameter BUILD_IMAGE_LOGIN_USER = StringParameter.keyAndDefault("buildImageLoginUser", "ubuntu");
public static final StringParameter BUILD_IMAGE_ROOT_DEVICE_NAME = StringParameter.keyAndDefault("buildImageRootDeviceName", "/dev/sda1");
public static final IntParameter BUILD_ROOT_VOLUME_SIZE_GIB = IntParameter.keyAndDefault("buildRootVolumeSizeGiB", 200);
+
+ public static final OptionalStringParameter BUILD_UPTIME_LAMBDA_JAR = OptionalStringParameter.key("buildUptimeLambdaJar");
+ public static final StringListParameter AUTO_SHUTDOWN_EXISTING_EC2_IDS = StringListParameter.key("autoShutdownExistingEc2Ids");
+ public static final IntParameter AUTO_SHUTDOWN_HOUR_UTC = IntParameter.keyAndDefault("autoShutdownHourUtc", 19);
+ public static final BooleanParameter NIGHTLY_TEST_RUN_ENABLED = BooleanParameter.keyAndDefault("nightlyTestsEnabled", false);
+ public static final IntParameter NIGHTLY_TEST_RUN_HOUR_UTC = IntParameter.keyAndDefault("nightlyTestHourUtc", 3);
+ public static final OptionalStringParameter NIGHTLY_TEST_BUCKET = OptionalStringParameter.key("nightlyTestBucket");
+ public static final StringListParameter NIGHTLY_TEST_SUBNETS = StringListParameter.key("subnetIds");
}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/BooleanParameter.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/BooleanParameter.java
new file mode 100644
index 0000000000..e367ef6ac9
--- /dev/null
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/BooleanParameter.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.config;
+
+public class BooleanParameter {
+
+ private final String key;
+ private final boolean defaultValue;
+
+ private BooleanParameter(String key, boolean defaultValue) {
+ this.key = key;
+ this.defaultValue = defaultValue;
+ }
+
+ boolean get(AppContext context) {
+ return OptionalStringParameter.getOptionalString(context, key)
+ .map(Boolean::parseBoolean)
+ .orElse(defaultValue);
+ }
+
+ public StringValue value(boolean value) {
+ return value("" + value);
+ }
+
+ public StringValue value(String value) {
+ return new StringValue(key, value);
+ }
+
+ static BooleanParameter keyAndDefault(String key, boolean defaultValue) {
+ return new BooleanParameter(key, defaultValue);
+ }
+
+}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/RequiredStringParameter.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/RequiredStringParameter.java
new file mode 100644
index 0000000000..7cd81b3211
--- /dev/null
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/RequiredStringParameter.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.config;
+
+import java.util.Optional;
+
+public class RequiredStringParameter {
+
+ private final String key;
+
+ private RequiredStringParameter(String key) {
+ this.key = key;
+ }
+
+ String get(AppContext context) {
+ return OptionalStringParameter.getOptionalString(context, key)
+ .orElseThrow(() -> new IllegalArgumentException(key + " is required"));
+ }
+
+ public StringValue value(String value) {
+ return new StringValue(key, value);
+ }
+
+ static RequiredStringParameter key(String key) {
+ return new RequiredStringParameter(key);
+ }
+
+ static Optional getOptionalString(AppContext context, String key) {
+ return Optional.ofNullable(StringParameter.getStringOrDefault(context, key, null));
+ }
+}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/StringListParameter.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/StringListParameter.java
new file mode 100644
index 0000000000..d94739e6bd
--- /dev/null
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/config/StringListParameter.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.config;
+
+import java.util.List;
+
+public class StringListParameter {
+
+ private final String key;
+
+ private StringListParameter(String key) {
+ this.key = key;
+ }
+
+ List get(AppContext context) {
+ return readList(context.get(key));
+ }
+
+ public StringValue value(String... values) {
+ return new StringValue(key, String.join(",", values));
+ }
+
+ static StringListParameter key(String key) {
+ return new StringListParameter(key);
+ }
+
+ private List readList(Object value) {
+ if (value == null) {
+ return List.of();
+ } else if (value instanceof String) {
+ return readList((String) value);
+ } else {
+ throw new IllegalArgumentException(key + " must be a comma-separated string");
+ }
+ }
+
+ private static List readList(String value) {
+ if (value.length() < 1) {
+ return List.of();
+ } else {
+ return List.of(value.split(","));
+ }
+ }
+
+}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/networking/NetworkingStack.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/networking/NetworkingDeployment.java
similarity index 74%
rename from java/cdk-environment/src/main/java/sleeper/environment/cdk/networking/NetworkingStack.java
rename to java/cdk-environment/src/main/java/sleeper/environment/cdk/networking/NetworkingDeployment.java
index f2bd40ca1e..8bfa01eb4d 100644
--- a/java/cdk-environment/src/main/java/sleeper/environment/cdk/networking/NetworkingStack.java
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/networking/NetworkingDeployment.java
@@ -15,8 +15,6 @@
*/
package sleeper.environment.cdk.networking;
-import software.amazon.awscdk.Stack;
-import software.amazon.awscdk.StackProps;
import software.amazon.awscdk.services.ec2.GatewayVpcEndpoint;
import software.amazon.awscdk.services.ec2.GatewayVpcEndpointAwsService;
import software.amazon.awscdk.services.ec2.IVpc;
@@ -25,19 +23,29 @@
import software.amazon.awscdk.services.ec2.SubnetSelection;
import software.amazon.awscdk.services.ec2.SubnetType;
import software.amazon.awscdk.services.ec2.Vpc;
+import software.amazon.awscdk.services.ec2.VpcLookupOptions;
import software.constructs.Construct;
+import sleeper.environment.cdk.config.AppContext;
+
import java.util.Arrays;
import java.util.Collections;
+import java.util.Optional;
-public class NetworkingStack extends Stack {
+import static sleeper.environment.cdk.config.AppParameters.VPC_ID;
- private final Vpc vpc;
+public class NetworkingDeployment {
- public NetworkingStack(Construct scope, StackProps props) {
- super(scope, props.getStackName(), props);
+ private final IVpc vpc;
- vpc = Vpc.Builder.create(this, "Vpc")
+ public NetworkingDeployment(Construct scope) {
+ AppContext context = AppContext.of(scope);
+ Optional vpcId = context.get(VPC_ID);
+ if (vpcId.isPresent()) {
+ vpc = Vpc.fromLookup(scope, "Vpc", VpcLookupOptions.builder().vpcId(vpcId.get()).build());
+ return;
+ }
+ vpc = Vpc.Builder.create(scope, "Vpc")
.ipAddresses(IpAddresses.cidr("10.0.0.0/16"))
.maxAzs(3)
.natGateways(1)
@@ -50,13 +58,13 @@ public NetworkingStack(Construct scope, StackProps props) {
.cidrMask(19).build()))
.build();
- GatewayVpcEndpoint.Builder.create(this, "S3").vpc(vpc)
+ GatewayVpcEndpoint.Builder.create(scope, "S3Endpoint").vpc(vpc)
.service(GatewayVpcEndpointAwsService.S3)
.subnets(Collections.singletonList(SubnetSelection.builder()
.subnetType(SubnetType.PRIVATE_WITH_EGRESS).build()))
.build();
- GatewayVpcEndpoint.Builder.create(this, "DynamoDB").vpc(vpc)
+ GatewayVpcEndpoint.Builder.create(scope, "DynamoDBEndpoint").vpc(vpc)
.service(GatewayVpcEndpointAwsService.DYNAMODB)
.subnets(Collections.singletonList(SubnetSelection.builder()
.subnetType(SubnetType.PRIVATE_WITH_EGRESS).build()))
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/nightlytests/NightlyTestBucket.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/nightlytests/NightlyTestBucket.java
new file mode 100644
index 0000000000..76ded432c8
--- /dev/null
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/nightlytests/NightlyTestBucket.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.nightlytests;
+
+import software.amazon.awscdk.RemovalPolicy;
+import software.amazon.awscdk.services.s3.BlockPublicAccess;
+import software.amazon.awscdk.services.s3.Bucket;
+import software.amazon.awscdk.services.s3.BucketEncryption;
+import software.amazon.awscdk.services.s3.IBucket;
+import software.constructs.Construct;
+
+import sleeper.environment.cdk.config.AppContext;
+import sleeper.environment.cdk.config.AppParameters;
+import sleeper.environment.cdk.config.OptionalStringParameter;
+
+import static sleeper.environment.cdk.config.AppParameters.INSTANCE_ID;
+
+public class NightlyTestBucket {
+ public static final OptionalStringParameter NIGHTLY_TEST_BUCKET = AppParameters.NIGHTLY_TEST_BUCKET;
+
+ private final IBucket bucket;
+
+ public NightlyTestBucket(Construct scope) {
+ AppContext context = AppContext.of(scope);
+
+ bucket = context.get(NIGHTLY_TEST_BUCKET)
+ .map(bucketName -> Bucket.fromBucketName(scope, "TestBucket", bucketName))
+ .orElseGet(() -> Bucket.Builder.create(scope, "TestBucket")
+ .bucketName("sleeper-" + context.get(INSTANCE_ID) + "-tests")
+ .versioned(false)
+ .encryption(BucketEncryption.S3_MANAGED)
+ .blockPublicAccess(BlockPublicAccess.BLOCK_ALL)
+ .removalPolicy(RemovalPolicy.RETAIN_ON_UPDATE_OR_DELETE)
+ .build());
+ }
+
+ public IBucket getBucket() {
+ return bucket;
+ }
+
+}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/nightlytests/NightlyTestDeployment.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/nightlytests/NightlyTestDeployment.java
new file mode 100644
index 0000000000..7346c2477b
--- /dev/null
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/nightlytests/NightlyTestDeployment.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.nightlytests;
+
+import software.amazon.awscdk.services.events.IRule;
+import software.amazon.awscdk.services.s3.IBucket;
+import software.constructs.Construct;
+
+import sleeper.environment.cdk.buildec2.BuildEC2Deployment;
+import sleeper.environment.cdk.builduptime.BuildUptimeDeployment;
+import sleeper.environment.cdk.config.AppContext;
+
+import java.util.List;
+
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_RUN_ENABLED;
+
+public class NightlyTestDeployment {
+
+ private final Construct scope;
+ private final boolean enabled;
+ private final IBucket testBucket;
+
+ public NightlyTestDeployment(Construct scope) {
+ this.scope = scope;
+ AppContext context = AppContext.of(scope);
+ enabled = context.get(NIGHTLY_TEST_RUN_ENABLED);
+ if (enabled) {
+ testBucket = new NightlyTestBucket(scope).getBucket();
+ } else {
+ testBucket = null;
+ }
+ }
+
+ public String getTestBucketName() {
+ if (enabled) {
+ return testBucket.getBucketName();
+ } else {
+ return null;
+ }
+ }
+
+ public List automateUptimeGetAutoStopRules(BuildEC2Deployment buildEc2, BuildUptimeDeployment buildUptime) {
+ if (enabled && buildEc2 != null) {
+ testBucket.grantRead(buildUptime.getFunction());
+ NightlyTestUptimeSchedules uptimeStack = new NightlyTestUptimeSchedules(scope,
+ buildUptime.getFunction(), buildEc2.getInstance(), testBucket.getBucketName());
+ return List.of(uptimeStack.getStopAfterTestsRule());
+ } else {
+ return List.of();
+ }
+ }
+
+}
diff --git a/java/cdk-environment/src/main/java/sleeper/environment/cdk/nightlytests/NightlyTestUptimeSchedules.java b/java/cdk-environment/src/main/java/sleeper/environment/cdk/nightlytests/NightlyTestUptimeSchedules.java
new file mode 100644
index 0000000000..acc11073da
--- /dev/null
+++ b/java/cdk-environment/src/main/java/sleeper/environment/cdk/nightlytests/NightlyTestUptimeSchedules.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.nightlytests;
+
+import software.amazon.awscdk.Duration;
+import software.amazon.awscdk.services.ec2.IInstance;
+import software.amazon.awscdk.services.events.CronOptions;
+import software.amazon.awscdk.services.events.IRule;
+import software.amazon.awscdk.services.events.Rule;
+import software.amazon.awscdk.services.events.RuleTargetInput;
+import software.amazon.awscdk.services.events.Schedule;
+import software.amazon.awscdk.services.events.targets.LambdaFunction;
+import software.amazon.awscdk.services.lambda.IFunction;
+import software.constructs.Construct;
+
+import sleeper.environment.cdk.config.AppContext;
+import sleeper.environment.cdk.config.AppParameters;
+import sleeper.environment.cdk.config.IntParameter;
+
+import java.util.List;
+import java.util.Map;
+
+import static sleeper.environment.cdk.config.AppParameters.INSTANCE_ID;
+
+public class NightlyTestUptimeSchedules {
+ public static final IntParameter NIGHTLY_TEST_RUN_HOUR_UTC = AppParameters.NIGHTLY_TEST_RUN_HOUR_UTC;
+
+ private final IRule stopAfterTestsRule;
+
+ public NightlyTestUptimeSchedules(
+ Construct scope, IFunction buildUptimeFn, IInstance buildEc2, String testBucketName) {
+ AppContext context = AppContext.of(scope);
+
+ String stopAfterTestsRuleName = "sleeper-" + context.get(INSTANCE_ID) + "-stop-nightly-tests";
+ stopAfterTestsRule = Rule.Builder.create(scope, "StopAfterNightlyTests")
+ .ruleName(stopAfterTestsRuleName)
+ .description("Periodic trigger to take the build EC2 down when nightly tests finish")
+ .schedule(Schedule.rate(Duration.minutes(10)))
+ .targets(List.of(LambdaFunction.Builder.create(buildUptimeFn)
+ .event(RuleTargetInput.fromObject(Map.of(
+ "operation", "stop",
+ "condition", "testFinishedFromToday",
+ "testBucket", testBucketName,
+ "ec2Ids", List.of(buildEc2.getInstanceId()),
+ "rules", List.of(stopAfterTestsRuleName))))
+ .build()))
+ .enabled(false)
+ .build();
+ Rule.Builder.create(scope, "StartForNightlyTests")
+ .ruleName("sleeper-" + context.get(INSTANCE_ID) + "-start-for-nightly-tests")
+ .description("Nightly invocation to start the build EC2 for nightly tests")
+ .schedule(Schedule.cron(CronOptions.builder()
+ .hour("" + (context.get(NIGHTLY_TEST_RUN_HOUR_UTC) - 1))
+ .minute("50")
+ .build()))
+ .targets(List.of(LambdaFunction.Builder.create(buildUptimeFn)
+ .event(RuleTargetInput.fromObject(Map.of(
+ "operation", "start",
+ "ec2Ids", List.of(buildEc2.getInstanceId()),
+ "rules", List.of(stopAfterTestsRuleName))))
+ .build()))
+ .build();
+ }
+
+ public IRule getStopAfterTestsRule() {
+ return stopAfterTestsRule;
+ }
+}
diff --git a/java/cdk-environment/src/main/resources/cloud-init.sh b/java/cdk-environment/src/main/resources/cloud-init.sh
index c133bcb85c..e49f62a005 100644
--- a/java/cdk-environment/src/main/resources/cloud-init.sh
+++ b/java/cdk-environment/src/main/resources/cloud-init.sh
@@ -63,6 +63,13 @@ if [ ! -d "$REPOSITORY_DIR" ]; then
runuser --login "$LOGIN_USER" -c "sleeper builder git clone -b $BRANCH https://github.com/$FORK/$REPOSITORY.git"
fi
+CRONTAB_FILE="/sleeper-init/crontab"
+if [ -f "$CRONTAB_FILE" ]; then
+ runuser --login "$LOGIN_USER" -c "cp /sleeper-init/nightlyTestSettings.json $LOGIN_HOME/.sleeper/builder/"
+ chown "$LOGIN_USER:$LOGIN_USER" "$LOGIN_HOME/.sleeper/builder/nightlyTestSettings.json"
+ runuser --login "$LOGIN_USER" -c "crontab $CRONTAB_FILE"
+fi
+
if [ -f /var/run/reboot-required ]; then
/sbin/shutdown -r now && exit
fi
diff --git a/java/cdk-environment/src/main/resources/crontab b/java/cdk-environment/src/main/resources/crontab
new file mode 100644
index 0000000000..9fe820503e
--- /dev/null
+++ b/java/cdk-environment/src/main/resources/crontab
@@ -0,0 +1,27 @@
+# Edit this file to introduce tasks to be run by cron.
+#
+# Each task to run has to be defined through a single line
+# indicating with different fields when the task will be run
+# and what command to run for the task
+#
+# To define the time you can provide concrete values for
+# minute (m), hour (h), day of month (dom), month (mon),
+# and day of week (dow) or use '*' in these fields (for 'any').
+#
+# Notice that tasks will be started based on the cron's system
+# daemon's notion of time and timezones.
+#
+# Output of the crontab jobs (including errors) is sent through
+# email to the user the crontab file belongs to (unless redirected).
+#
+# For example, you can run a backup of all your user accounts
+# at 5 a.m every week with:
+# For more information see the manual pages of crontab(5) and cron(8)
+#
+# m h dom mon dow command
+
+MAILTO=""
+SHELL=/usr/bin/bash
+PATH=$PATH:/usr/bin:/home/${loginUser}/.local/bin
+0 ${testHour} * * TUE,THU,SAT,SUN docker system prune -af && sleeper cli upgrade && sleeper builder ./sleeper/scripts/test/nightly/updateAndRunTests.sh "/sleeper-builder/nightlyTestSettings.json" "functional" &> /tmp/sleeperFunctionalTests.log
+0 ${testHour} * * MON,WED,FRI docker system prune -af && sleeper cli upgrade && sleeper builder ./sleeper/scripts/test/nightly/updateAndRunTests.sh "/sleeper-builder/nightlyTestSettings.json" "performance" &> /tmp/sleeperPerformanceTests.log
diff --git a/java/cdk-environment/src/main/resources/nightlyTestSettings.json b/java/cdk-environment/src/main/resources/nightlyTestSettings.json
new file mode 100644
index 0000000000..444c280a0d
--- /dev/null
+++ b/java/cdk-environment/src/main/resources/nightlyTestSettings.json
@@ -0,0 +1,15 @@
+{
+ "vpc": "${vpc}",
+ "subnets": "${subnets}",
+ "resultsBucket": "${testBucket}",
+ "repoPath": "${fork}/${repository}",
+ "mergeToMainOnTestType": {
+ "performance": false,
+ "functional": false
+ },
+ "gitHubApp": {
+ "privateKeyFile": "/sleeper-builder/.pem",
+ "appId": "my GitHub App ID",
+ "installationId": "my GitHub App installation ID"
+ }
+}
diff --git a/java/cdk-environment/src/main/resources/user_data b/java/cdk-environment/src/main/resources/user_data
index b8303402f5..332dfde9ef 100644
--- a/java/cdk-environment/src/main/resources/user_data
+++ b/java/cdk-environment/src/main/resources/user_data
@@ -10,6 +10,7 @@ Content-Disposition: attachment; filename="cloud-config.txt"
#cloud-config
cloud_final_modules:
- [scripts-user, always]
+%write-files-yaml%
--//
Content-Type: text/x-shellscript; charset="us-ascii"
diff --git a/java/cdk-environment/src/main/resources/write-files-nightly-tests.yaml b/java/cdk-environment/src/main/resources/write-files-nightly-tests.yaml
new file mode 100644
index 0000000000..d1a9150b5b
--- /dev/null
+++ b/java/cdk-environment/src/main/resources/write-files-nightly-tests.yaml
@@ -0,0 +1,7 @@
+write_files:
+- encoding: b64
+ content: ${nightlyTestSettingsBase64}
+ path: /sleeper-init/nightlyTestSettings.json
+- encoding: b64
+ content: ${crontabBase64}
+ path: /sleeper-init/crontab
diff --git a/java/cdk-environment/src/test/java/sleeper/environment/cdk/buildec2/BuildEC2ParametersTest.java b/java/cdk-environment/src/test/java/sleeper/environment/cdk/buildec2/BuildEC2ParametersTest.java
index c2f555df03..21e5b91a33 100644
--- a/java/cdk-environment/src/test/java/sleeper/environment/cdk/buildec2/BuildEC2ParametersTest.java
+++ b/java/cdk-environment/src/test/java/sleeper/environment/cdk/buildec2/BuildEC2ParametersTest.java
@@ -24,20 +24,26 @@
import static sleeper.environment.cdk.buildec2.BuildEC2Parameters.BRANCH;
import static sleeper.environment.cdk.buildec2.BuildEC2Parameters.FORK;
import static sleeper.environment.cdk.buildec2.BuildEC2Parameters.REPOSITORY;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_RUN_ENABLED;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_SUBNETS;
+import static sleeper.environment.cdk.config.AppParameters.VPC_ID;
public class BuildEC2ParametersTest {
@Test
- public void fillGitClone() {
+ void shouldFillGitClone() {
assertThat(BuildEC2Parameters.from(AppContext.of(
- BRANCH.value("feature/test"), FORK.value("test-fork"), REPOSITORY.value("test-project")))
+ BRANCH.value("feature/test"),
+ FORK.value("test-fork"),
+ REPOSITORY.value("test-project")))
.fillUserDataTemplate("git clone -b ${branch} https://github.com/${fork}/${repository}.git"))
.isEqualTo("git clone -b feature/test https://github.com/test-fork/test-project.git");
}
@Test
- public void fillLoginUser() {
- assertThat(BuildEC2Parameters.from(AppContext.of(LOGIN_USER.value("test-user")))
+ void shouldFillLoginUser() {
+ assertThat(BuildEC2Parameters.from(AppContext.of(
+ LOGIN_USER.value("test-user")))
.fillUserDataTemplate("LOGIN_USER=${loginUser}\n" +
"LOGIN_HOME=/home/$LOGIN_USER"))
.isEqualTo("LOGIN_USER=test-user\n" +
@@ -45,18 +51,60 @@ public void fillLoginUser() {
}
@Test
- public void templateCanContainSameKeyMultipleTimes() {
- assertThat(BuildEC2Parameters.from(AppContext.of(REPOSITORY.value("repeated-repo")))
+ void templateCanContainSameKeyMultipleTimes() {
+ assertThat(BuildEC2Parameters.from(AppContext.of(
+ REPOSITORY.value("repeated-repo")))
.fillUserDataTemplate("[ ! -d ~/${repository} ] && mkdir ~/${repository}"))
.isEqualTo("[ ! -d ~/repeated-repo ] && mkdir ~/repeated-repo");
}
@Test
- public void setDefaultParametersWhenUsingEmptyContext() {
+ void shouldSetDefaultParametersWhenUsingEmptyContext() {
assertThat(BuildEC2Parameters.from(AppContext.empty()))
.usingRecursiveComparison()
.isEqualTo(BuildEC2Parameters.from(AppContext.of(
- REPOSITORY.value("sleeper"), FORK.value("gchq"), BRANCH.value("develop"))));
+ REPOSITORY.value("sleeper"),
+ FORK.value("gchq"),
+ BRANCH.value("develop"))));
+ }
+
+ @Test
+ void shouldFillNightlyTestSettings() {
+ assertThat(BuildEC2Parameters.builder()
+ .context(AppContext.of(
+ NIGHTLY_TEST_RUN_ENABLED.value(true),
+ VPC_ID.value("my-vpc"),
+ NIGHTLY_TEST_SUBNETS.value("subnet-1,subnet-2"),
+ FORK.value("my-fork"),
+ REPOSITORY.value("my-repo")))
+ .testBucket("nightly-test-results")
+ .build().fillUserDataTemplate("{" +
+ "\"vpc\":\"${vpc}\"," +
+ "\"subnets\":\"${subnets}\"," +
+ "\"resultsBucket\":\"${testBucket}\"," +
+ "\"repoPath\":\"${fork}/${repository}\"}"))
+ .isEqualTo("{" +
+ "\"vpc\":\"my-vpc\"," +
+ "\"subnets\":\"subnet-1,subnet-2\"," +
+ "\"resultsBucket\":\"nightly-test-results\"," +
+ "\"repoPath\":\"my-fork/my-repo\"}");
+ }
+
+ @Test
+ void shouldFillNoNightlyTestSettings() {
+ assertThat(BuildEC2Parameters.builder()
+ .context(AppContext.empty())
+ .testBucket(null)
+ .build().fillUserDataTemplate("{" +
+ "\"vpc\":\"${vpc}\"," +
+ "\"subnets\":\"${subnets}\"," +
+ "\"resultsBucket\":\"${testBucket}\"," +
+ "\"repoPath\":\"${fork}/${repository}\"}"))
+ .isEqualTo("{" +
+ "\"vpc\":\"${vpc}\"," +
+ "\"subnets\":\"${subnets}\"," +
+ "\"resultsBucket\":\"${testBucket}\"," +
+ "\"repoPath\":\"gchq/sleeper\"}");
}
}
diff --git a/java/cdk-environment/src/test/java/sleeper/environment/cdk/buildec2/LoadUserDataUtilTest.java b/java/cdk-environment/src/test/java/sleeper/environment/cdk/buildec2/LoadUserDataUtilTest.java
index 66710f5cef..42428e2ee1 100644
--- a/java/cdk-environment/src/test/java/sleeper/environment/cdk/buildec2/LoadUserDataUtilTest.java
+++ b/java/cdk-environment/src/test/java/sleeper/environment/cdk/buildec2/LoadUserDataUtilTest.java
@@ -24,11 +24,16 @@
import static sleeper.environment.cdk.buildec2.BuildEC2Parameters.BRANCH;
import static sleeper.environment.cdk.buildec2.BuildEC2Parameters.FORK;
import static sleeper.environment.cdk.buildec2.BuildEC2Parameters.REPOSITORY;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_BUCKET;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_RUN_ENABLED;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_RUN_HOUR_UTC;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_SUBNETS;
+import static sleeper.environment.cdk.config.AppParameters.VPC_ID;
class LoadUserDataUtilTest {
@Test
- void canLoadUserData() {
+ void shouldLoadUserDataWithNoNightlyTests() {
assertThat(LoadUserDataUtil.userData(BuildEC2Parameters.from(AppContext.of(
LOGIN_USER.value("test-user"),
REPOSITORY.value("a-repo"),
@@ -38,7 +43,55 @@ void canLoadUserData() {
.contains("LOGIN_USER=test-user" + System.lineSeparator() +
"REPOSITORY=a-repo" + System.lineSeparator() +
"FORK=a-fork" + System.lineSeparator() +
- "BRANCH=feature/something" + System.lineSeparator());
+ "BRANCH=feature/something" + System.lineSeparator())
+ .doesNotContain("write_files");
+ }
+
+ @Test
+ void shouldLoadUserDataWithNightlyTests() {
+ assertThat(LoadUserDataUtil.userData(BuildEC2Parameters.from(AppContext.of(
+ LOGIN_USER.value("test-user"),
+ REPOSITORY.value("a-repo"),
+ FORK.value("a-fork"),
+ BRANCH.value("feature/something"),
+ NIGHTLY_TEST_RUN_ENABLED.value(true),
+ VPC_ID.value("my-vpc"),
+ NIGHTLY_TEST_SUBNETS.value("subnet-1", "subnet-2"),
+ NIGHTLY_TEST_BUCKET.value("my-bucket")))))
+ .startsWith("Content-Type: multipart/mixed;")
+ .contains("LOGIN_USER=test-user" + System.lineSeparator() +
+ "REPOSITORY=a-repo" + System.lineSeparator() +
+ "FORK=a-fork" + System.lineSeparator() +
+ "BRANCH=feature/something" + System.lineSeparator())
+ .contains("write_files");
+ }
+
+ @Test
+ void shouldLoadNightlyTestSettings() {
+ assertThat(LoadUserDataUtil.nightlyTestSettingsJson(BuildEC2Parameters.from(AppContext.of(
+ NIGHTLY_TEST_RUN_ENABLED.value(true),
+ VPC_ID.value("my-vpc"),
+ NIGHTLY_TEST_SUBNETS.value("subnet-1", "subnet-2"),
+ NIGHTLY_TEST_BUCKET.value("my-bucket"),
+ FORK.value("my-fork"),
+ REPOSITORY.value("my-repo")))))
+ .contains("\"vpc\": \"my-vpc\"")
+ .contains("\"subnets\": \"subnet-1,subnet-2\"")
+ .contains("\"repoPath\": \"my-fork/my-repo\"")
+ .contains("\"resultsBucket\": \"my-bucket\"");
+ }
+
+ @Test
+ void shouldLoadCrontab() {
+ assertThat(LoadUserDataUtil.crontab(BuildEC2Parameters.from(AppContext.of(
+ NIGHTLY_TEST_RUN_ENABLED.value(true),
+ NIGHTLY_TEST_RUN_HOUR_UTC.value(3),
+ LOGIN_USER.value("my-user"),
+ VPC_ID.value("my-vpc"),
+ NIGHTLY_TEST_SUBNETS.value("subnet-1", "subnet-2"),
+ NIGHTLY_TEST_BUCKET.value("my-bucket")))))
+ .contains("PATH=$PATH:/usr/bin:/home/my-user/.local/bin")
+ .contains("0 3 * * TUE,THU,SAT,SUN");
}
}
diff --git a/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/BooleanParameterTest.java b/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/BooleanParameterTest.java
new file mode 100644
index 0000000000..a12f8264f0
--- /dev/null
+++ b/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/BooleanParameterTest.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.config;
+
+import org.junit.jupiter.api.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static sleeper.environment.cdk.config.AppParameters.NIGHTLY_TEST_RUN_ENABLED;
+
+public class BooleanParameterTest {
+
+ @Test
+ public void refuseEmptyString() {
+ AppContext context = AppContext.of(NIGHTLY_TEST_RUN_ENABLED.value(""));
+ assertThatThrownBy(() -> context.get(NIGHTLY_TEST_RUN_ENABLED))
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("nightlyTestsEnabled");
+ }
+
+ @Test
+ public void useDefaultValueWhenUnset() {
+ AppContext context = AppContext.empty();
+ assertThat(context.get(NIGHTLY_TEST_RUN_ENABLED)).isFalse();
+ }
+
+ @Test
+ public void canSetValue() {
+ AppContext context = AppContext.of(NIGHTLY_TEST_RUN_ENABLED.value(true));
+ assertThat(context.get(NIGHTLY_TEST_RUN_ENABLED)).isTrue();
+ }
+}
diff --git a/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/RequiredStringParameterTest.java b/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/RequiredStringParameterTest.java
new file mode 100644
index 0000000000..2537995ca8
--- /dev/null
+++ b/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/RequiredStringParameterTest.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.config;
+
+import org.junit.jupiter.api.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static sleeper.environment.cdk.config.AppParameters.INSTANCE_ID;
+
+public class RequiredStringParameterTest {
+
+ @Test
+ public void refuseEmptyString() {
+ AppContext context = AppContext.of(INSTANCE_ID.value(""));
+ assertThatThrownBy(() -> context.get(INSTANCE_ID))
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("instanceId");
+ }
+
+ @Test
+ public void refuseUnset() {
+ AppContext context = AppContext.empty();
+ assertThatThrownBy(() -> context.get(INSTANCE_ID))
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("instanceId");
+ }
+
+ @Test
+ public void canSetValue() {
+ AppContext context = AppContext.of(INSTANCE_ID.value("some-test-id"));
+ assertThat(context.get(INSTANCE_ID)).contains("some-test-id");
+ }
+
+}
diff --git a/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/StringListParameterTest.java b/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/StringListParameterTest.java
new file mode 100644
index 0000000000..6def7613cb
--- /dev/null
+++ b/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/StringListParameterTest.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.environment.cdk.config;
+
+import org.junit.jupiter.api.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static sleeper.environment.cdk.config.AppParameters.AUTO_SHUTDOWN_EXISTING_EC2_IDS;
+
+public class StringListParameterTest {
+
+ @Test
+ public void allowEmptyString() {
+ AppContext context = AppContext.of(AUTO_SHUTDOWN_EXISTING_EC2_IDS.value(""));
+ assertThat(context.get(AUTO_SHUTDOWN_EXISTING_EC2_IDS)).isEmpty();
+ }
+
+ @Test
+ public void allowUnset() {
+ AppContext context = AppContext.empty();
+ assertThat(context.get(AUTO_SHUTDOWN_EXISTING_EC2_IDS)).isEmpty();
+ }
+
+ @Test
+ public void canSetOneValue() {
+ AppContext context = AppContext.of(AUTO_SHUTDOWN_EXISTING_EC2_IDS.value("a-value"));
+ assertThat(context.get(AUTO_SHUTDOWN_EXISTING_EC2_IDS))
+ .containsExactly("a-value");
+ }
+
+ @Test
+ public void canSetMultipleValues() {
+ AppContext context = AppContext.of(AUTO_SHUTDOWN_EXISTING_EC2_IDS.value("value-1", "value-2"));
+ assertThat(context.get(AUTO_SHUTDOWN_EXISTING_EC2_IDS))
+ .containsExactly("value-1", "value-2");
+ }
+
+}
diff --git a/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/StringParameterTest.java b/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/StringParameterTest.java
index 6ac8eaeca8..90ddb4ae4b 100644
--- a/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/StringParameterTest.java
+++ b/java/cdk-environment/src/test/java/sleeper/environment/cdk/config/StringParameterTest.java
@@ -19,27 +19,27 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
-import static sleeper.environment.cdk.config.AppParameters.INSTANCE_ID;
+import static sleeper.environment.cdk.config.AppParameters.BUILD_REPOSITORY;
public class StringParameterTest {
@Test
public void refuseEmptyString() {
- AppContext context = AppContext.of(INSTANCE_ID.value(""));
- assertThatThrownBy(() -> context.get(INSTANCE_ID))
+ AppContext context = AppContext.of(BUILD_REPOSITORY.value(""));
+ assertThatThrownBy(() -> context.get(BUILD_REPOSITORY))
.isInstanceOf(IllegalArgumentException.class)
- .hasMessageContaining("instanceId");
+ .hasMessageContaining("repository");
}
@Test
public void useDefaultValueWhenUnset() {
AppContext context = AppContext.empty();
- assertThat(context.get(INSTANCE_ID)).isEqualTo("SleeperEnvironment");
+ assertThat(context.get(BUILD_REPOSITORY)).isEqualTo("sleeper");
}
@Test
public void canSetValue() {
- AppContext context = AppContext.of(INSTANCE_ID.value("some-test-id"));
- assertThat(context.get(INSTANCE_ID)).isEqualTo("some-test-id");
+ AppContext context = AppContext.of(BUILD_REPOSITORY.value("some-repository"));
+ assertThat(context.get(BUILD_REPOSITORY)).isEqualTo("some-repository");
}
}
diff --git a/java/cdk/pom.xml b/java/cdk/pom.xml
index fa2928a3d8..a9ffb87269 100644
--- a/java/cdk/pom.xml
+++ b/java/cdk/pom.xml
@@ -31,6 +31,14 @@
aws-java-sdk-sqs
${aws-java-sdk.version}
+
+ software.amazon.awssdk
+ s3
+
+
+ software.amazon.awssdk
+ dynamodb
+
software.amazon.awscdk
diff --git a/java/cdk/src/main/java/sleeper/cdk/SleeperCdkApp.java b/java/cdk/src/main/java/sleeper/cdk/SleeperCdkApp.java
index 198ce17397..29360f4a5c 100644
--- a/java/cdk/src/main/java/sleeper/cdk/SleeperCdkApp.java
+++ b/java/cdk/src/main/java/sleeper/cdk/SleeperCdkApp.java
@@ -15,8 +15,6 @@
*/
package sleeper.cdk;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import software.amazon.awscdk.App;
import software.amazon.awscdk.AppProps;
import software.amazon.awscdk.Environment;
@@ -24,6 +22,7 @@
import software.amazon.awscdk.StackProps;
import software.amazon.awscdk.Tags;
import software.amazon.awscdk.services.cloudwatch.IMetric;
+import software.amazon.awssdk.services.s3.S3Client;
import software.constructs.Construct;
import sleeper.cdk.jars.BuiltJars;
@@ -43,6 +42,7 @@
import sleeper.cdk.stack.IngestStatusStoreStack;
import sleeper.cdk.stack.InstanceRolesStack;
import sleeper.cdk.stack.KeepLambdaWarmStack;
+import sleeper.cdk.stack.LoggingStack;
import sleeper.cdk.stack.ManagedPoliciesStack;
import sleeper.cdk.stack.PartitionSplittingStack;
import sleeper.cdk.stack.PropertiesStack;
@@ -118,15 +118,18 @@ public void create() {
.collect(toUnmodifiableSet());
List errorMetrics = new ArrayList<>();
+
+ LoggingStack loggingStack = new LoggingStack(this, "Logging", instanceProperties);
+
// Stack for Checking VPC configuration
- new VpcStack(this, "Vpc", instanceProperties, jars);
+ new VpcStack(this, "Vpc", instanceProperties, jars, loggingStack);
// Topic stack
TopicStack topicStack = new TopicStack(this, "Topic", instanceProperties);
// Stacks for tables
ManagedPoliciesStack policiesStack = new ManagedPoliciesStack(this, "Policies", instanceProperties);
- TableDataStack dataStack = new TableDataStack(this, "TableData", instanceProperties, policiesStack, jars);
+ TableDataStack dataStack = new TableDataStack(this, "TableData", instanceProperties, loggingStack, policiesStack, jars);
TransactionLogStateStoreStack transactionLogStateStoreStack = new TransactionLogStateStoreStack(
this, "TransactionLogStateStore", instanceProperties, dataStack);
StateStoreStacks stateStoreStacks = new StateStoreStacks(
@@ -137,15 +140,15 @@ public void create() {
instanceProperties, policiesStack).getResources();
CompactionStatusStoreResources compactionStatusStore = new CompactionStatusStoreStack(this, "CompactionStatusStore",
instanceProperties, policiesStack).getResources();
- ConfigBucketStack configBucketStack = new ConfigBucketStack(this, "Configuration", instanceProperties, policiesStack, jars);
+ ConfigBucketStack configBucketStack = new ConfigBucketStack(this, "Configuration", instanceProperties, loggingStack, policiesStack, jars);
TableIndexStack tableIndexStack = new TableIndexStack(this, "TableIndex", instanceProperties, policiesStack);
StateStoreCommitterStack stateStoreCommitterStack = new StateStoreCommitterStack(this, "StateStoreCommitter",
instanceProperties, jars,
- configBucketStack, tableIndexStack,
+ loggingStack, configBucketStack, tableIndexStack,
stateStoreStacks, ingestStatusStore, compactionStatusStore,
policiesStack, topicStack.getTopic(), errorMetrics);
coreStacks = new CoreStacks(
- configBucketStack, tableIndexStack, policiesStack, stateStoreStacks, dataStack,
+ loggingStack, configBucketStack, tableIndexStack, policiesStack, stateStoreStacks, dataStack,
stateStoreCommitterStack, ingestStatusStore, compactionStatusStore);
new TransactionLogSnapshotStack(this, "TransactionLogSnapshot",
@@ -352,8 +355,7 @@ public static void main(String[] args) {
.account(instanceProperties.get(ACCOUNT))
.region(instanceProperties.get(REGION))
.build();
- AmazonS3 s3Client = AmazonS3ClientBuilder.defaultClient();
- try {
+ try (S3Client s3Client = S3Client.create()) {
BuiltJars jars = new BuiltJars(s3Client, instanceProperties.get(JARS_BUCKET));
new SleeperCdkApp(app, id, StackProps.builder()
@@ -363,8 +365,6 @@ public static void main(String[] args) {
instanceProperties, jars).create();
app.synth();
- } finally {
- s3Client.shutdown();
}
}
}
diff --git a/java/cdk/src/main/java/sleeper/cdk/jars/BuiltJars.java b/java/cdk/src/main/java/sleeper/cdk/jars/BuiltJars.java
index 0b13600da1..e43237d310 100644
--- a/java/cdk/src/main/java/sleeper/cdk/jars/BuiltJars.java
+++ b/java/cdk/src/main/java/sleeper/cdk/jars/BuiltJars.java
@@ -15,10 +15,10 @@
*/
package sleeper.cdk.jars;
-import com.amazonaws.services.s3.AmazonS3;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awscdk.services.s3.IBucket;
+import software.amazon.awssdk.services.s3.S3Client;
import java.util.HashMap;
import java.util.Map;
@@ -27,11 +27,11 @@ public class BuiltJars {
public static final Logger LOGGER = LoggerFactory.getLogger(BuiltJars.class);
- private final AmazonS3 s3;
+ private final S3Client s3;
private final String bucketName;
private final Map latestVersionIdByJar = new HashMap<>();
- public BuiltJars(AmazonS3 s3, String bucketName) {
+ public BuiltJars(S3Client s3, String bucketName) {
this.s3 = s3;
this.bucketName = bucketName;
}
@@ -47,7 +47,7 @@ public LambdaCode lambdaCode(BuiltJar jar, IBucket bucketConstruct) {
public String getLatestVersionId(BuiltJar jar) {
return latestVersionIdByJar.computeIfAbsent(jar,
missingJar -> {
- String versionId = s3.getObjectMetadata(bucketName, missingJar.getFileName()).getVersionId();
+ String versionId = s3.headObject(builder -> builder.bucket(bucketName).key(missingJar.getFileName())).versionId();
LOGGER.info("Found latest version ID for jar {}: {}", missingJar.getFileName(), versionId);
return versionId;
});
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/AthenaStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/AthenaStack.java
index 6cce333044..451b363b83 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/AthenaStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/AthenaStack.java
@@ -24,6 +24,7 @@
import software.amazon.awscdk.services.iam.IRole;
import software.amazon.awscdk.services.iam.Policy;
import software.amazon.awscdk.services.iam.PolicyStatement;
+import software.amazon.awscdk.services.kms.IKey;
import software.amazon.awscdk.services.kms.Key;
import software.amazon.awscdk.services.lambda.IFunction;
import software.amazon.awscdk.services.lambda.Runtime;
@@ -45,10 +46,10 @@
import java.util.Map;
import java.util.Objects;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.core.properties.instance.AthenaProperty.ATHENA_COMPOSITE_HANDLER_CLASSES;
import static sleeper.core.properties.instance.AthenaProperty.ATHENA_COMPOSITE_HANDLER_MEMORY;
import static sleeper.core.properties.instance.AthenaProperty.ATHENA_COMPOSITE_HANDLER_TIMEOUT_IN_SECONDS;
+import static sleeper.core.properties.instance.AthenaProperty.ATHENA_SPILL_MASTER_KEY_ARN;
import static sleeper.core.properties.instance.AthenaProperty.SPILL_BUCKET_AGE_OFF_IN_DAYS;
import static sleeper.core.properties.instance.CommonProperty.ACCOUNT;
import static sleeper.core.properties.instance.CommonProperty.REGION;
@@ -76,15 +77,9 @@ public AthenaStack(
.removalPolicy(RemovalPolicy.DESTROY)
.build();
- AutoDeleteS3Objects.autoDeleteForBucket(this, customResourcesJar, instanceProperties, spillBucket);
+ AutoDeleteS3Objects.autoDeleteForBucket(this, instanceProperties, coreStacks, customResourcesJar, spillBucket, bucketName);
- Key spillMasterKey = Key.Builder.create(this, "SpillMasterKey")
- .description("Master key used by Sleeper to generate data keys. The data keys created are used to " +
- "encrypt spilled data to S3 when communicating with Amazon Athena.")
- .enableKeyRotation(true)
- .removalPolicy(RemovalPolicy.DESTROY)
- .pendingWindow(Duration.days(7))
- .build();
+ IKey spillMasterKey = createSpillMasterKey(this, instanceProperties);
Map env = Utils.createDefaultEnvironment(instanceProperties);
env.put("spill_bucket", spillBucket.getBucketName());
@@ -117,7 +112,7 @@ public AthenaStack(
.build();
for (String className : handlerClasses) {
- IFunction handler = createConnector(className, instanceProperties, jarCode, env, memory, timeout);
+ IFunction handler = createConnector(className, instanceProperties, coreStacks, jarCode, env, memory, timeout);
jarsBucket.grantRead(handler);
@@ -141,7 +136,23 @@ public AthenaStack(
Utils.addStackTagIfSet(this, instanceProperties);
}
- private IFunction createConnector(String className, InstanceProperties instanceProperties, LambdaCode jar, Map env, Integer memory, Integer timeout) {
+ private static IKey createSpillMasterKey(Construct scope, InstanceProperties instanceProperties) {
+ String spillKeyArn = instanceProperties.get(ATHENA_SPILL_MASTER_KEY_ARN);
+ if (spillKeyArn == null) {
+ return Key.Builder.create(scope, "SpillMasterKey")
+ .description("Key used to encrypt data in the Athena spill bucket for Sleeper.")
+ .enableKeyRotation(true)
+ .removalPolicy(RemovalPolicy.DESTROY)
+ .pendingWindow(Duration.days(7))
+ .build();
+ } else {
+ return Key.fromKeyArn(scope, "SpillMasterKey", spillKeyArn);
+ }
+ }
+
+ private IFunction createConnector(
+ String className, InstanceProperties instanceProperties, CoreStacks coreStacks,
+ LambdaCode jar, Map env, Integer memory, Integer timeout) {
String instanceId = Utils.cleanInstanceId(instanceProperties);
String simpleClassName = getSimpleClassName(className);
@@ -152,7 +163,7 @@ private IFunction createConnector(String className, InstanceProperties instanceP
.memorySize(memory)
.timeout(Duration.seconds(timeout))
.runtime(Runtime.JAVA_11)
- .logGroup(createLambdaLogGroup(this, simpleClassName + "AthenaCompositeHandlerLogGroup", functionName, instanceProperties))
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName))
.handler(className)
.environment(env));
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/CompactionStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/CompactionStack.java
index 80ade9406d..e99e4116c8 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/CompactionStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/CompactionStack.java
@@ -64,9 +64,8 @@
import software.amazon.awscdk.services.iam.InstanceProfile;
import software.amazon.awscdk.services.iam.ManagedPolicy;
import software.amazon.awscdk.services.iam.PolicyStatement;
-import software.amazon.awscdk.services.iam.Role;
+import software.amazon.awscdk.services.lambda.CfnPermission;
import software.amazon.awscdk.services.lambda.IFunction;
-import software.amazon.awscdk.services.lambda.Permission;
import software.amazon.awscdk.services.lambda.eventsources.SqsEventSource;
import software.amazon.awscdk.services.s3.Bucket;
import software.amazon.awscdk.services.s3.IBucket;
@@ -74,6 +73,7 @@
import software.amazon.awscdk.services.sqs.DeadLetterQueue;
import software.amazon.awscdk.services.sqs.Queue;
import software.constructs.Construct;
+import software.constructs.IDependable;
import sleeper.cdk.jars.BuiltJar;
import sleeper.cdk.jars.BuiltJars;
@@ -96,7 +96,6 @@
import java.util.stream.Collectors;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.cdk.util.Utils.shouldDeployPaused;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.COMPACTION_AUTO_SCALING_GROUP;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.COMPACTION_CLUSTER;
@@ -281,7 +280,7 @@ private void lambdaToCreateCompactionJobsBatchedViaSQS(
.handler("sleeper.compaction.job.creation.lambda.CreateCompactionJobsTriggerLambda::handleRequest")
.environment(environmentVariables)
.reservedConcurrentExecutions(1)
- .logGroup(createLambdaLogGroup(this, "CompactionJobsCreationTriggerLogGroup", triggerFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(triggerFunctionName)));
IFunction handlerFunction = jobCreatorJar.buildFunction(this, "CompactionJobsCreationHandler", builder -> builder
.functionName(functionName)
@@ -292,7 +291,7 @@ private void lambdaToCreateCompactionJobsBatchedViaSQS(
.handler("sleeper.compaction.job.creation.lambda.CreateCompactionJobsLambda::handleRequest")
.environment(environmentVariables)
.reservedConcurrentExecutions(instanceProperties.getInt(COMPACTION_JOB_CREATION_LAMBDA_CONCURRENCY_RESERVED))
- .logGroup(createLambdaLogGroup(this, "CompactionJobsCreationHandlerLogGroup", functionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName)));
// Send messages from the trigger function to the handler function
Queue jobCreationQueue = sqsQueueForCompactionJobCreation(coreStacks, topic, errorMetrics);
@@ -399,9 +398,8 @@ private void ecsClusterForCompactionTasks(
FargateTaskDefinition fargateTaskDefinition = compactionFargateTaskDefinition();
String fargateTaskDefinitionFamily = fargateTaskDefinition.getFamily();
instanceProperties.set(COMPACTION_TASK_FARGATE_DEFINITION_FAMILY, fargateTaskDefinitionFamily);
- LogDriver logDriver = Utils.createECSContainerLogDriver(this, instanceProperties, "FargateCompactionTasks");
- ContainerDefinitionOptions fargateContainerDefinitionOptions = createFargateContainerDefinition(containerImage,
- environmentVariables, instanceProperties, logDriver);
+ ContainerDefinitionOptions fargateContainerDefinitionOptions = createFargateContainerDefinition(
+ coreStacks, containerImage, environmentVariables, instanceProperties);
fargateTaskDefinition.addContainer(ContainerConstants.COMPACTION_CONTAINER_NAME,
fargateContainerDefinitionOptions);
grantPermissions.accept(fargateTaskDefinition);
@@ -409,9 +407,8 @@ private void ecsClusterForCompactionTasks(
Ec2TaskDefinition ec2TaskDefinition = compactionEC2TaskDefinition();
String ec2TaskDefinitionFamily = ec2TaskDefinition.getFamily();
instanceProperties.set(COMPACTION_TASK_EC2_DEFINITION_FAMILY, ec2TaskDefinitionFamily);
- LogDriver logDriver = Utils.createECSContainerLogDriver(this, instanceProperties, "EC2CompactionTasks");
- ContainerDefinitionOptions ec2ContainerDefinitionOptions = createEC2ContainerDefinition(containerImage,
- environmentVariables, instanceProperties, logDriver);
+ ContainerDefinitionOptions ec2ContainerDefinitionOptions = createEC2ContainerDefinition(
+ coreStacks, containerImage, environmentVariables, instanceProperties);
ec2TaskDefinition.addContainer(ContainerConstants.COMPACTION_CONTAINER_NAME, ec2ContainerDefinitionOptions);
if (instanceProperties.getBoolean(COMPACTION_GPU_ENABLED)) {
@@ -446,11 +443,13 @@ private void addEC2CapacityProvider(
.build());
IFunction customTermination = lambdaForCustomTerminationPolicy(coreStacks, taskCreatorJar);
- customTermination.addPermission("AutoscalingCall", Permission.builder()
+
+ IDependable autoScalingPermission = CfnPermission.Builder.create(this, "AutoscalingCall")
.action("lambda:InvokeFunction")
- .principal(Role.fromRoleArn(this, "compaction_role_arn", "arn:aws:iam::" + instanceProperties.get(ACCOUNT)
- + ":role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling"))
- .build());
+ .principal("arn:aws:iam::" + instanceProperties.get(ACCOUNT)
+ + ":role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling")
+ .functionName(customTermination.getFunctionArn())
+ .build();
SecurityGroup scalingSecurityGroup = SecurityGroup.Builder.create(this, "CompactionScalingDefaultSG")
.vpc(vpc)
@@ -489,6 +488,7 @@ private void addEC2CapacityProvider(
.terminationPolicies(List.of(TerminationPolicy.CUSTOM_LAMBDA_FUNCTION))
.terminationPolicyCustomLambdaFunctionArn(customTermination.getFunctionArn())
.build();
+ ec2scalingGroup.getNode().addDependency(autoScalingPermission);
AsgCapacityProvider ec2Provider = AsgCapacityProvider.Builder
.create(this, "CompactionCapacityProvider")
@@ -564,7 +564,7 @@ private Ec2TaskDefinition compactionEC2TaskDefinition() {
}
private ContainerDefinitionOptions createFargateContainerDefinition(
- ContainerImage image, Map environment, InstanceProperties instanceProperties, LogDriver logDriver) {
+ CoreStacks coreStacks, ContainerImage image, Map environment, InstanceProperties instanceProperties) {
String architecture = instanceProperties.get(COMPACTION_TASK_CPU_ARCHITECTURE).toUpperCase(Locale.ROOT);
CompactionTaskRequirements requirements = CompactionTaskRequirements.getArchRequirements(architecture, instanceProperties);
return ContainerDefinitionOptions.builder()
@@ -572,11 +572,12 @@ private ContainerDefinitionOptions createFargateContainerDefinition(
.environment(environment)
.cpu(requirements.getCpu())
.memoryLimitMiB(requirements.getMemoryLimitMiB())
- .logging(Utils.createECSContainerLogDriver(this, instanceProperties, "FargateCompactionTasks"))
+ .logging(Utils.createECSContainerLogDriver(coreStacks, "FargateCompactionTasks"))
.build();
}
- private ContainerDefinitionOptions createEC2ContainerDefinition(ContainerImage image, Map environment, InstanceProperties instanceProperties, LogDriver logDriver) {
+ private ContainerDefinitionOptions createEC2ContainerDefinition(
+ CoreStacks coreStacks, ContainerImage image, Map environment, InstanceProperties instanceProperties) {
String architecture = instanceProperties.get(COMPACTION_TASK_CPU_ARCHITECTURE).toUpperCase(Locale.ROOT);
CompactionTaskRequirements requirements = CompactionTaskRequirements.getArchRequirements(architecture, instanceProperties);
return ContainerDefinitionOptions.builder()
@@ -587,7 +588,7 @@ private ContainerDefinitionOptions createEC2ContainerDefinition(ContainerImage i
// container allocation failing when we need almost entire resources
// of machine
.memoryLimitMiB((int) (requirements.getMemoryLimitMiB() * 0.95))
- .logging(Utils.createECSContainerLogDriver(this, instanceProperties, "EC2CompactionTasks"))
+ .logging(Utils.createECSContainerLogDriver(coreStacks, "EC2CompactionTasks"))
.build();
}
@@ -623,7 +624,7 @@ private IFunction lambdaForCustomTerminationPolicy(CoreStacks coreStacks, Lambda
.description("Custom termination policy for ECS auto scaling group. Only terminate empty instances.")
.environment(environmentVariables)
.handler("sleeper.compaction.task.creation.SafeTerminationLambda::handleRequest")
- .logGroup(createLambdaLogGroup(this, "CompactionTerminatorLogGroup", functionName, instanceProperties))
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName))
.memorySize(512)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.timeout(Duration.seconds(10)));
@@ -656,7 +657,7 @@ private void lambdaToCreateCompactionTasks(
.handler("sleeper.compaction.task.creation.RunCompactionTasksLambda::eventHandler")
.environment(Utils.createDefaultEnvironment(instanceProperties))
.reservedConcurrentExecutions(1)
- .logGroup(createLambdaLogGroup(this, "CompactionTasksCreatorLogGroup", functionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName)));
// Grant this function permission to read from the S3 bucket
coreStacks.grantReadInstanceConfig(handler);
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/ConfigBucketStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/ConfigBucketStack.java
index e1a5a4a558..5633a9f9b5 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/ConfigBucketStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/ConfigBucketStack.java
@@ -40,12 +40,13 @@ public class ConfigBucketStack extends NestedStack {
private final IBucket configBucket;
public ConfigBucketStack(
- Construct scope, String id, InstanceProperties instanceProperties, ManagedPoliciesStack policiesStack, BuiltJars jars) {
+ Construct scope, String id, InstanceProperties instanceProperties,
+ LoggingStack loggingStack, ManagedPoliciesStack policiesStack, BuiltJars jars) {
super(scope, id);
-
+ String bucketName = String.join("-", "sleeper",
+ Utils.cleanInstanceId(instanceProperties), "config");
configBucket = Bucket.Builder.create(this, "ConfigBucket")
- .bucketName(String.join("-", "sleeper",
- Utils.cleanInstanceId(instanceProperties), "config"))
+ .bucketName(bucketName)
.versioned(false)
.encryption(BucketEncryption.S3_MANAGED)
.blockPublicAccess(BlockPublicAccess.BLOCK_ALL)
@@ -54,7 +55,7 @@ public ConfigBucketStack(
instanceProperties.set(CONFIG_BUCKET, configBucket.getBucketName());
- AutoDeleteS3Objects.autoDeleteForBucket(this, jars, instanceProperties, configBucket);
+ AutoDeleteS3Objects.autoDeleteForBucket(this, instanceProperties, loggingStack, jars, configBucket, bucketName);
configBucket.grantRead(policiesStack.getDirectIngestPolicyForGrants());
configBucket.grantRead(policiesStack.getIngestByQueuePolicyForGrants());
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/CoreStacks.java b/java/cdk/src/main/java/sleeper/cdk/stack/CoreStacks.java
index 4a0b552472..54d393731f 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/CoreStacks.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/CoreStacks.java
@@ -21,6 +21,7 @@
import software.amazon.awscdk.services.iam.IRole;
import software.amazon.awscdk.services.iam.ManagedPolicy;
import software.amazon.awscdk.services.lambda.IFunction;
+import software.amazon.awscdk.services.logs.ILogGroup;
import software.amazon.awscdk.services.sqs.IQueue;
import javax.annotation.Nullable;
@@ -29,6 +30,7 @@
public class CoreStacks {
+ private final LoggingStack loggingStack;
private final ConfigBucketStack configBucketStack;
private final TableIndexStack tableIndexStack;
private final ManagedPoliciesStack policiesStack;
@@ -38,11 +40,12 @@ public class CoreStacks {
private final IngestStatusStoreResources ingestStatusStore;
private final CompactionStatusStoreResources compactionStatusStore;
- public CoreStacks(ConfigBucketStack configBucketStack, TableIndexStack tableIndexStack,
+ public CoreStacks(LoggingStack loggingStack, ConfigBucketStack configBucketStack, TableIndexStack tableIndexStack,
ManagedPoliciesStack policiesStack, StateStoreStacks stateStoreStacks, TableDataStack dataStack,
StateStoreCommitterStack stateStoreCommitterStack,
IngestStatusStoreResources ingestStatusStore,
CompactionStatusStoreResources compactionStatusStore) {
+ this.loggingStack = loggingStack;
this.configBucketStack = configBucketStack;
this.tableIndexStack = tableIndexStack;
this.policiesStack = policiesStack;
@@ -53,6 +56,22 @@ public CoreStacks(ConfigBucketStack configBucketStack, TableIndexStack tableInde
this.compactionStatusStore = compactionStatusStore;
}
+ public ILogGroup getLogGroupByFunctionName(String functionName) {
+ return loggingStack.getLogGroupByFunctionName(functionName);
+ }
+
+ public ILogGroup getProviderLogGroupByFunctionName(String functionName) {
+ return loggingStack.getProviderLogGroupByFunctionName(functionName);
+ }
+
+ public ILogGroup getLogGroupByECSLogDriverId(String id) {
+ return loggingStack.getLogGroupByECSLogDriverId(id);
+ }
+
+ public ILogGroup getLogGroupByStateMachineId(String id) {
+ return loggingStack.getLogGroupByStateMachineId(id);
+ }
+
public void grantReadInstanceConfig(IGrantable grantee) {
configBucketStack.grantRead(grantee);
}
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/DynamoDBStateStoreStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/DynamoDBStateStoreStack.java
index 896f5caee9..01fc1b0ae0 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/DynamoDBStateStoreStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/DynamoDBStateStoreStack.java
@@ -29,7 +29,7 @@
import sleeper.statestore.dynamodb.DynamoDBStateStore;
import static sleeper.cdk.util.Utils.removalPolicy;
-import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.ACTIVE_FILES_TABLELENAME;
+import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.ACTIVE_FILES_TABLENAME;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.FILE_REFERENCE_COUNT_TABLENAME;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.PARTITION_TABLENAME;
import static sleeper.core.properties.instance.CommonProperty.DYNAMO_STATE_STORE_POINT_IN_TIME_RECOVERY;
@@ -65,7 +65,7 @@ public DynamoDBStateStoreStack(
.sortKey(sortKeyActiveFileReferenceTable)
.pointInTimeRecovery(instanceProperties.getBoolean(DYNAMO_STATE_STORE_POINT_IN_TIME_RECOVERY))
.build();
- instanceProperties.set(ACTIVE_FILES_TABLELENAME, activeFilesTable.getTableName());
+ instanceProperties.set(ACTIVE_FILES_TABLENAME, activeFilesTable.getTableName());
// DynamoDB table for file reference counts
Attribute partitionKeyFileReferenceCountTable = Attribute.builder()
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/GarbageCollectorStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/GarbageCollectorStack.java
index 25bdd5cdbf..96e474d9e4 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/GarbageCollectorStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/GarbageCollectorStack.java
@@ -41,7 +41,6 @@
import java.util.List;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.cdk.util.Utils.shouldDeployPaused;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.GARBAGE_COLLECTOR_CLOUDWATCH_RULE;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.GARBAGE_COLLECTOR_DLQ_ARN;
@@ -92,7 +91,7 @@ public GarbageCollectorStack(
.reservedConcurrentExecutions(1)
.memorySize(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_TIMEOUT_IN_SECONDS)))
- .logGroup(createLambdaLogGroup(this, "GarbageCollectorTriggerLogGroup", triggerFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(triggerFunctionName)));
IFunction handlerFunction = gcJar.buildFunction(this, "GarbageCollectorLambda", builder -> builder
.functionName(functionName)
.description("Scan the state store looking for files that need deleting and delete them")
@@ -102,7 +101,7 @@ public GarbageCollectorStack(
.handler("sleeper.garbagecollector.GarbageCollectorLambda::handleRequest")
.environment(Utils.createDefaultEnvironment(instanceProperties))
.reservedConcurrentExecutions(instanceProperties.getInt(GARBAGE_COLLECTOR_LAMBDA_CONCURRENCY_RESERVED))
- .logGroup(createLambdaLogGroup(this, "GarbageCollectorLambdaLogGroup", functionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName)));
instanceProperties.set(GARBAGE_COLLECTOR_LAMBDA_FUNCTION, triggerFunction.getFunctionName());
// Grant this function permission delete files from the data bucket and
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/IngestBatcherStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/IngestBatcherStack.java
index 03915e2715..96d6565ba2 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/IngestBatcherStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/IngestBatcherStack.java
@@ -50,7 +50,6 @@
import java.util.Map;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.cdk.util.Utils.removalPolicy;
import static sleeper.cdk.util.Utils.shouldDeployPaused;
import static sleeper.core.properties.instance.BatcherProperty.INGEST_BATCHER_JOB_CREATION_LAMBDA_PERIOD_IN_MINUTES;
@@ -145,7 +144,7 @@ public IngestBatcherStack(
.timeout(Duration.seconds(instanceProperties.getInt(INGEST_BATCHER_SUBMITTER_TIMEOUT_IN_SECONDS)))
.handler("sleeper.ingest.batcher.submitter.IngestBatcherSubmitterLambda::handleRequest")
.environment(environmentVariables)
- .logGroup(createLambdaLogGroup(this, "SubmitToIngestBatcherLogGroup", submitterName, instanceProperties))
+ .logGroup(coreStacks.getLogGroupByFunctionName(submitterName))
.events(List.of(new SqsEventSource(submitQueue))));
instanceProperties.set(INGEST_BATCHER_SUBMIT_REQUEST_FUNCTION, submitterLambda.getFunctionName());
@@ -163,7 +162,7 @@ public IngestBatcherStack(
.handler("sleeper.ingest.batcher.job.creator.IngestBatcherJobCreatorLambda::eventHandler")
.environment(environmentVariables)
.reservedConcurrentExecutions(1)
- .logGroup(createLambdaLogGroup(this, "IngestBatcherJobCreationLogGroup", jobCreatorName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(jobCreatorName)));
instanceProperties.set(INGEST_BATCHER_JOB_CREATION_FUNCTION, jobCreatorLambda.getFunctionName());
ingestRequestsTable.grantReadWriteData(jobCreatorLambda);
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/IngestStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/IngestStack.java
index 3c9004f28d..ad15796d94 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/IngestStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/IngestStack.java
@@ -58,7 +58,6 @@
import java.util.Objects;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.cdk.util.Utils.shouldDeployPaused;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.INGEST_CLOUDWATCH_RULE;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.INGEST_CLUSTER;
@@ -213,7 +212,7 @@ private Cluster ecsClusterForIngestTasks(
ContainerDefinitionOptions containerDefinitionOptions = ContainerDefinitionOptions.builder()
.image(containerImage)
- .logging(Utils.createECSContainerLogDriver(this, instanceProperties, "IngestTasks"))
+ .logging(Utils.createECSContainerLogDriver(coreStacks, "IngestTasks"))
.environment(Utils.createDefaultEnvironment(instanceProperties))
.build();
taskDefinition.addContainer("IngestContainer", containerDefinitionOptions);
@@ -257,7 +256,7 @@ private void lambdaToCreateIngestTasks(CoreStacks coreStacks, Queue ingestJobQue
.handler("sleeper.ingest.starter.RunIngestTasksLambda::eventHandler")
.environment(Utils.createDefaultEnvironment(instanceProperties))
.reservedConcurrentExecutions(1)
- .logGroup(createLambdaLogGroup(this, "IngestTasksCreatorLogGroup", functionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName)));
// Grant this function permission to read from the S3 bucket
coreStacks.grantReadInstanceConfig(handler);
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/KeepLambdaWarmStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/KeepLambdaWarmStack.java
index eafd31076f..e5ed38b44f 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/KeepLambdaWarmStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/KeepLambdaWarmStack.java
@@ -37,7 +37,6 @@
import java.util.Collections;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.cdk.util.Utils.shouldDeployPaused;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.QUERY_WARM_LAMBDA_CLOUDWATCH_RULE;
import static sleeper.core.properties.instance.CommonProperty.ID;
@@ -76,7 +75,7 @@ public KeepLambdaWarmStack(Construct scope,
.handler("sleeper.query.lambda.WarmQueryExecutorLambda::handleRequest")
.environment(Utils.createDefaultEnvironment(instanceProperties))
.reservedConcurrentExecutions(1)
- .logGroup(createLambdaLogGroup(this, id + "LogGroup", functionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName)));
// Cloudwatch rule to trigger this lambda
Rule rule = Rule.Builder
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/LoggingStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/LoggingStack.java
new file mode 100644
index 0000000000..d8209f9759
--- /dev/null
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/LoggingStack.java
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.cdk.stack;
+
+import software.amazon.awscdk.NestedStack;
+import software.amazon.awscdk.services.logs.ILogGroup;
+import software.amazon.awscdk.services.logs.LogGroup;
+import software.constructs.Construct;
+
+import sleeper.cdk.util.Utils;
+import sleeper.core.properties.instance.InstanceProperties;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import static sleeper.core.properties.instance.CommonProperty.LOG_RETENTION_IN_DAYS;
+
+public class LoggingStack extends NestedStack {
+
+ private final Map logGroupByName = new HashMap<>();
+ private final InstanceProperties instanceProperties;
+
+ public LoggingStack(Construct scope, String id, InstanceProperties instanceProperties) {
+ super(scope, id);
+ this.instanceProperties = instanceProperties;
+
+ // Accessed directly by getter on this class
+ createLogGroup("vpc-check");
+ createLogGroup("vpc-check-provider");
+ createLogGroup("config-autodelete");
+ createLogGroup("config-autodelete-provider");
+ createLogGroup("table-data-autodelete");
+ createLogGroup("table-data-autodelete-provider");
+ createLogGroup("statestore-committer");
+
+ // Accessed via CoreStacks getters
+ createLogGroup("properties-writer");
+ createLogGroup("properties-writer-provider");
+ createLogGroup("state-snapshot-creation-trigger");
+ createLogGroup("state-snapshot-creation");
+ createLogGroup("state-snapshot-deletion-trigger");
+ createLogGroup("state-snapshot-deletion");
+ createLogGroup("state-transaction-deletion-trigger");
+ createLogGroup("state-transaction-deletion");
+ createLogGroup("metrics-trigger");
+ createLogGroup("metrics-publisher");
+ createLogGroup("bulk-import-EMRServerless-start");
+ createLogGroup("bulk-import-NonPersistentEMR-start");
+ createLogGroup("bulk-import-PersistentEMR-start");
+ createLogGroup("bulk-import-eks-starter");
+ createStateMachineLogGroup("EksBulkImportStateMachine");
+ createLogGroup("bulk-import-autodelete");
+ createLogGroup("bulk-import-autodelete-provider");
+ createLogGroup("IngestTasks");
+ createLogGroup("ingest-create-tasks");
+ createLogGroup("ingest-batcher-submit-files");
+ createLogGroup("ingest-batcher-create-jobs");
+ createLogGroup("partition-splitting-trigger");
+ createLogGroup("partition-splitting-find-to-split");
+ createLogGroup("partition-splitting-handler");
+ createLogGroup("FargateCompactionTasks");
+ createLogGroup("EC2CompactionTasks");
+ createLogGroup("compaction-job-creation-trigger");
+ createLogGroup("compaction-job-creation-handler");
+ createLogGroup("compaction-tasks-creator");
+ createLogGroup("compaction-custom-termination");
+ createLogGroup("garbage-collector-trigger");
+ createLogGroup("garbage-collector");
+ createLogGroup("query-executor");
+ createLogGroup("query-leaf-partition");
+ createLogGroup("query-websocket-handler");
+ createLogGroup("query-results-autodelete");
+ createLogGroup("query-results-autodelete-provider");
+ createLogGroup("query-keep-warm");
+ createLogGroup("Simple-athena-handler");
+ createLogGroup("IteratorApplying-athena-handler");
+ createLogGroup("spill-bucket-autodelete");
+ createLogGroup("spill-bucket-autodelete-provider");
+ }
+
+ public ILogGroup getLogGroupByFunctionName(String functionName) {
+ return getLogGroupByNameWithPrefixes(functionName);
+ }
+
+ public ILogGroup getProviderLogGroupByFunctionName(String functionName) {
+ return getLogGroupByNameWithPrefixes(functionName + "-provider");
+ }
+
+ public ILogGroup getLogGroupByECSLogDriverId(String id) {
+ return getLogGroupByNameWithPrefixes(addNamePrefixes(id));
+ }
+
+ public ILogGroup getLogGroupByStateMachineId(String id) {
+ return getLogGroupByNameWithPrefixes(addStateMachineNamePrefixes(id));
+ }
+
+ private ILogGroup getLogGroupByNameWithPrefixes(String nameWithPrefixes) {
+ return Objects.requireNonNull(logGroupByName.get(nameWithPrefixes), "No log group found: " + nameWithPrefixes);
+ }
+
+ private void createLogGroup(String shortName) {
+ createLogGroup(shortName, addNamePrefixes(shortName));
+ }
+
+ private void createStateMachineLogGroup(String shortName) {
+ createLogGroup(shortName, addStateMachineNamePrefixes(shortName));
+ }
+
+ private void createLogGroup(String shortName, String nameWithPrefixes) {
+ logGroupByName.put(nameWithPrefixes, LogGroup.Builder.create(this, shortName)
+ .logGroupName(nameWithPrefixes)
+ .retention(Utils.getRetentionDays(instanceProperties.getInt(LOG_RETENTION_IN_DAYS)))
+ .build());
+ }
+
+ private String addStateMachineNamePrefixes(String shortName) {
+ return "/aws/vendedlogs/states/" + addNamePrefixes(shortName);
+ }
+
+ private String addNamePrefixes(String shortName) {
+ return String.join("-", "sleeper", Utils.cleanInstanceId(instanceProperties), shortName);
+ }
+}
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/PartitionSplittingStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/PartitionSplittingStack.java
index eaa990a76a..03628dc8e5 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/PartitionSplittingStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/PartitionSplittingStack.java
@@ -44,7 +44,6 @@
import java.util.Map;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.cdk.util.Utils.shouldDeployPaused;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.FIND_PARTITIONS_TO_SPLIT_DLQ_ARN;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.FIND_PARTITIONS_TO_SPLIT_DLQ_URL;
@@ -195,7 +194,7 @@ private void createTriggerFunction(InstanceProperties instanceProperties, Lambda
.handler("sleeper.splitter.lambda.FindPartitionsToSplitTriggerLambda::handleRequest")
.environment(environmentVariables)
.reservedConcurrentExecutions(1)
- .logGroup(createLambdaLogGroup(this, "FindPartitionsToSplitTriggerLogGroup", triggerFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(triggerFunctionName)));
// Cloudwatch rule to trigger this lambda
Rule rule = Rule.Builder
.create(this, "FindPartitionsToSplitPeriodicTrigger")
@@ -225,7 +224,7 @@ private void createFindPartitionsToSplitFunction(InstanceProperties instanceProp
.handler("sleeper.splitter.lambda.FindPartitionsToSplitLambda::handleRequest")
.environment(environmentVariables)
.reservedConcurrentExecutions(instanceProperties.getInt(FIND_PARTITIONS_TO_SPLIT_LAMBDA_CONCURRENCY_RESERVED))
- .logGroup(createLambdaLogGroup(this, "FindPartitionsToSplitLogGroup", functionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName)));
coreStacks.grantReadTablesMetadata(findPartitionsToSplitLambda);
partitionSplittingJobQueue.grantSendMessages(findPartitionsToSplitLambda);
@@ -251,7 +250,7 @@ private void createSplitPartitionFunction(InstanceProperties instanceProperties,
.reservedConcurrentExecutions(concurrency)
.handler("sleeper.splitter.lambda.SplitPartitionLambda::handleRequest")
.environment(environmentVariables)
- .logGroup(createLambdaLogGroup(this, "SplitPartitionLogGroup", splitFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(splitFunctionName)));
coreStacks.grantSplitPartitions(splitPartitionLambda);
splitPartitionLambda.addEventSource(SqsEventSource.Builder.create(partitionSplittingJobQueue)
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/PropertiesStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/PropertiesStack.java
index b03190b93d..ec4855d26a 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/PropertiesStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/PropertiesStack.java
@@ -32,8 +32,6 @@
import java.util.HashMap;
-import static sleeper.cdk.util.Utils.createCustomResourceProviderLogGroup;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.core.properties.instance.CommonProperty.JARS_BUCKET;
/**
@@ -61,14 +59,14 @@ public PropertiesStack(
.memorySize(2048)
.environment(Utils.createDefaultEnvironment(instanceProperties))
.description("Lambda for writing instance properties to S3 upon initialisation and teardown")
- .logGroup(createLambdaLogGroup(this, "PropertiesWriterLambdaLogGroup", functionName, instanceProperties))
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName))
.runtime(Runtime.JAVA_11));
coreStacks.grantWriteInstanceConfig(propertiesWriterLambda);
Provider propertiesWriterProvider = Provider.Builder.create(this, "PropertiesWriterProvider")
.onEventHandler(propertiesWriterLambda)
- .logGroup(createCustomResourceProviderLogGroup(this, "PropertiesWriterProviderLogGroup", functionName, instanceProperties))
+ .logGroup(coreStacks.getProviderLogGroupByFunctionName(functionName))
.build();
CustomResource.Builder.create(this, "InstanceProperties")
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/QueryStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/QueryStack.java
index 943f0a9d40..38f449e1a2 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/QueryStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/QueryStack.java
@@ -63,7 +63,6 @@
import java.util.Objects;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.cdk.util.Utils.removalPolicy;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.QUERY_TRACKER_TABLE_NAME;
import static sleeper.core.properties.instance.CommonProperty.ID;
@@ -131,14 +130,16 @@ public QueryStack(Construct scope,
* Creates a Lambda Function.
*
* @param id of the function to be created
- * @param queryJar the jar containing the code for the Lambda
+ * @param coreStacks the core stacks
* @param instanceProperties containing configuration details
+ * @param queryJar the jar containing the code for the Lambda
* @param functionName the name of the function
* @param handler the path for the method be be used as the entry point for the Lambda
* @param description a description for the function
* @return an IFunction
*/
- private IFunction createFunction(String id, LambdaCode queryJar, InstanceProperties instanceProperties,
+ private IFunction createFunction(
+ String id, CoreStacks coreStacks, InstanceProperties instanceProperties, LambdaCode queryJar,
String functionName, String handler, String description) {
return queryJar.buildFunction(this, id, builder -> builder
.functionName(functionName)
@@ -148,7 +149,7 @@ private IFunction createFunction(String id, LambdaCode queryJar, InstancePropert
.timeout(Duration.seconds(instanceProperties.getInt(QUERY_PROCESSOR_LAMBDA_TIMEOUT_IN_SECONDS)))
.handler(handler)
.environment(Utils.createDefaultEnvironment(instanceProperties))
- .logGroup(createLambdaLogGroup(this, id + "LogGroup", functionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName)));
}
/***
@@ -165,7 +166,7 @@ private IFunction setupQueryExecutorLambda(CoreStacks coreStacks, QueryQueueStac
IBucket jarsBucket, ITable queryTrackingTable) {
String functionName = String.join("-", "sleeper",
Utils.cleanInstanceId(instanceProperties), "query-executor");
- IFunction lambda = createFunction("QueryExecutorLambda", queryJar, instanceProperties, functionName,
+ IFunction lambda = createFunction("QueryExecutorLambda", coreStacks, instanceProperties, queryJar, functionName,
"sleeper.query.lambda.SqsQueryProcessorLambda::handleRequest",
"When a query arrives on the query SQS queue, this lambda is invoked to look for leaf partition queries");
@@ -212,10 +213,10 @@ private IFunction setupLeafPartitionQueryQueueAndLambda(
IBucket jarsBucket, ITable queryTrackingTable, List errorMetrics) {
Queue leafPartitionQueryQueue = setupLeafPartitionQueryQueue(instanceProperties, topic, errorMetrics);
Queue queryResultsQueue = setupResultsQueue(instanceProperties);
- IBucket queryResultsBucket = setupResultsBucket(instanceProperties, customResourcesJar);
+ IBucket queryResultsBucket = setupResultsBucket(instanceProperties, coreStacks, customResourcesJar);
String leafQueryFunctionName = String.join("-", "sleeper",
Utils.cleanInstanceId(instanceProperties), "query-leaf-partition");
- IFunction lambda = createFunction("QueryLeafPartitionExecutorLambda", queryJar, instanceProperties, leafQueryFunctionName,
+ IFunction lambda = createFunction("QueryLeafPartitionExecutorLambda", coreStacks, instanceProperties, queryJar, leafQueryFunctionName,
"sleeper.query.lambda.SqsLeafPartitionQueryLambda::handleRequest",
"When a query arrives on the query SQS queue, this lambda is invoked to execute the query");
@@ -346,12 +347,13 @@ private Queue setupResultsQueue(InstanceProperties instanceProperties) {
* @param customResourcesJar the jar for deploying custom CDK resources
* @return the bucket created
*/
- private IBucket setupResultsBucket(InstanceProperties instanceProperties, LambdaCode customResourcesJar) {
+ private IBucket setupResultsBucket(InstanceProperties instanceProperties, CoreStacks coreStacks, LambdaCode customResourcesJar) {
RemovalPolicy removalPolicy = removalPolicy(instanceProperties);
+ String bucketName = String.join("-", "sleeper",
+ Utils.cleanInstanceId(instanceProperties), "query-results");
Bucket resultsBucket = Bucket.Builder
.create(this, "QueryResultsBucket")
- .bucketName(String.join("-", "sleeper",
- Utils.cleanInstanceId(instanceProperties), "query-results"))
+ .bucketName(bucketName)
.versioned(false)
.blockPublicAccess(BlockPublicAccess.BLOCK_ALL)
.encryption(BucketEncryption.S3_MANAGED)
@@ -362,7 +364,7 @@ private IBucket setupResultsBucket(InstanceProperties instanceProperties, Lambda
instanceProperties.set(CdkDefinedInstanceProperty.QUERY_RESULTS_BUCKET, resultsBucket.getBucketName());
if (removalPolicy == RemovalPolicy.DESTROY) {
- AutoDeleteS3Objects.autoDeleteForBucket(this, customResourcesJar, instanceProperties, resultsBucket);
+ AutoDeleteS3Objects.autoDeleteForBucket(this, instanceProperties, coreStacks, customResourcesJar, resultsBucket, bucketName);
}
return resultsBucket;
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/StateStoreCommitterStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/StateStoreCommitterStack.java
index 7b5db6e584..d6b5285da4 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/StateStoreCommitterStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/StateStoreCommitterStack.java
@@ -23,7 +23,7 @@
import software.amazon.awscdk.services.iam.PolicyStatement;
import software.amazon.awscdk.services.lambda.IFunction;
import software.amazon.awscdk.services.lambda.eventsources.SqsEventSource;
-import software.amazon.awscdk.services.logs.LogGroup;
+import software.amazon.awscdk.services.logs.ILogGroup;
import software.amazon.awscdk.services.s3.Bucket;
import software.amazon.awscdk.services.s3.IBucket;
import software.amazon.awscdk.services.sns.Topic;
@@ -43,7 +43,6 @@
import java.util.Map;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.STATESTORE_COMMITTER_DLQ_ARN;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.STATESTORE_COMMITTER_DLQ_URL;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.STATESTORE_COMMITTER_EVENT_SOURCE_ID;
@@ -61,11 +60,13 @@ public class StateStoreCommitterStack extends NestedStack {
private final InstanceProperties instanceProperties;
private final Queue commitQueue;
+ @SuppressWarnings("checkstyle:ParameterNumberCheck")
public StateStoreCommitterStack(
Construct scope,
String id,
InstanceProperties instanceProperties,
BuiltJars jars,
+ LoggingStack loggingStack,
ConfigBucketStack configBucketStack,
TableIndexStack tableIndexStack,
StateStoreStacks stateStoreStacks,
@@ -80,7 +81,8 @@ public StateStoreCommitterStack(
LambdaCode committerJar = jars.lambdaCode(BuiltJar.STATESTORE, jarsBucket);
commitQueue = sqsQueueForStateStoreCommitter(policiesStack, topic, errorMetrics);
- lambdaToCommitStateStoreUpdates(policiesStack, committerJar,
+ lambdaToCommitStateStoreUpdates(
+ loggingStack, policiesStack, committerJar,
configBucketStack, tableIndexStack, stateStoreStacks,
compactionStatusStore, ingestStatusStore);
}
@@ -119,7 +121,7 @@ private Queue sqsQueueForStateStoreCommitter(ManagedPoliciesStack policiesStack,
}
private void lambdaToCommitStateStoreUpdates(
- ManagedPoliciesStack policiesStack, LambdaCode committerJar,
+ LoggingStack loggingStack, ManagedPoliciesStack policiesStack, LambdaCode committerJar,
ConfigBucketStack configBucketStack, TableIndexStack tableIndexStack, StateStoreStacks stateStoreStacks,
CompactionStatusStoreResources compactionStatusStore,
IngestStatusStoreResources ingestStatusStore) {
@@ -127,7 +129,7 @@ private void lambdaToCommitStateStoreUpdates(
String functionName = String.join("-", "sleeper",
Utils.cleanInstanceId(instanceProperties), "statestore-committer");
- LogGroup logGroup = createLambdaLogGroup(this, "StateStoreCommitterLogGroup", functionName, instanceProperties);
+ ILogGroup logGroup = loggingStack.getLogGroupByFunctionName(functionName);
instanceProperties.set(STATESTORE_COMMITTER_LOG_GROUP, logGroup.getLogGroupName());
IFunction handlerFunction = committerJar.buildFunction(this, "StateStoreCommitter", builder -> builder
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/TableDataStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/TableDataStack.java
index 4d34c04462..c6219f6a1f 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/TableDataStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/TableDataStack.java
@@ -38,15 +38,17 @@ public class TableDataStack extends NestedStack {
private final IBucket dataBucket;
public TableDataStack(
- Construct scope, String id, InstanceProperties instanceProperties, ManagedPoliciesStack policiesStack, BuiltJars jars) {
+ Construct scope, String id, InstanceProperties instanceProperties,
+ LoggingStack loggingStack, ManagedPoliciesStack policiesStack, BuiltJars jars) {
super(scope, id);
RemovalPolicy removalPolicy = removalPolicy(instanceProperties);
+ String bucketName = String.join("-", "sleeper",
+ Utils.cleanInstanceId(instanceProperties), "table-data");
dataBucket = Bucket.Builder
.create(this, "TableDataBucket")
- .bucketName(String.join("-", "sleeper",
- Utils.cleanInstanceId(instanceProperties), "table-data"))
+ .bucketName(bucketName)
.versioned(false)
.blockPublicAccess(BlockPublicAccess.BLOCK_ALL)
.encryption(BucketEncryption.S3_MANAGED)
@@ -54,7 +56,7 @@ public TableDataStack(
.build();
if (removalPolicy == RemovalPolicy.DESTROY) {
- AutoDeleteS3Objects.autoDeleteForBucket(this, jars, instanceProperties, dataBucket);
+ AutoDeleteS3Objects.autoDeleteForBucket(this, instanceProperties, loggingStack, jars, dataBucket, bucketName);
}
instanceProperties.set(DATA_BUCKET, dataBucket.getBucketName());
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/TableMetricsStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/TableMetricsStack.java
index 84e78f1053..6db39354a8 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/TableMetricsStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/TableMetricsStack.java
@@ -45,7 +45,6 @@
import java.util.List;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.cdk.util.Utils.shouldDeployPaused;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.TABLE_METRICS_DLQ_ARN;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.TABLE_METRICS_DLQ_URL;
@@ -80,7 +79,7 @@ public TableMetricsStack(
.reservedConcurrentExecutions(1)
.memorySize(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_TIMEOUT_IN_SECONDS)))
- .logGroup(createLambdaLogGroup(this, "MetricsTriggerLogGroup", triggerFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(triggerFunctionName)));
IFunction tableMetricsPublisher = metricsJar.buildFunction(this, "MetricsPublisher", builder -> builder
.functionName(publishFunctionName)
.description("Generates metrics for a Sleeper table based on info in its state store, and publishes them to CloudWatch")
@@ -90,7 +89,7 @@ public TableMetricsStack(
.reservedConcurrentExecutions(instanceProperties.getInt(METRICS_LAMBDA_CONCURRENCY_RESERVED))
.memorySize(1024)
.timeout(Duration.minutes(1))
- .logGroup(createLambdaLogGroup(this, "MetricsPublisherLogGroup", publishFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(publishFunctionName)));
instanceProperties.set(TABLE_METRICS_LAMBDA_FUNCTION, tableMetricsTrigger.getFunctionName());
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/TransactionLogSnapshotStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/TransactionLogSnapshotStack.java
index add6925423..3fa0e2b305 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/TransactionLogSnapshotStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/TransactionLogSnapshotStack.java
@@ -41,7 +41,6 @@
import java.util.List;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.cdk.util.Utils.shouldDeployPaused;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.TRANSACTION_LOG_SNAPSHOT_CREATION_DLQ_ARN;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.TRANSACTION_LOG_SNAPSHOT_CREATION_DLQ_URL;
@@ -94,7 +93,7 @@ private void createSnapshotCreationLambda(InstanceProperties instanceProperties,
.reservedConcurrentExecutions(1)
.memorySize(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_TIMEOUT_IN_SECONDS)))
- .logGroup(createLambdaLogGroup(this, "TransactionLogSnapshotCreationTriggerLogGroup", triggerFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(triggerFunctionName)));
IFunction snapshotCreationLambda = statestoreJar.buildFunction(this, "TransactionLogSnapshotCreation", builder -> builder
.functionName(creationFunctionName)
.description("Creates transaction log snapshots for tables")
@@ -104,7 +103,7 @@ private void createSnapshotCreationLambda(InstanceProperties instanceProperties,
.reservedConcurrentExecutions(instanceProperties.getInt(TRANSACTION_LOG_SNAPSHOT_CREATION_LAMBDA_CONCURRENCY_RESERVED))
.memorySize(1024)
.timeout(Duration.minutes(1))
- .logGroup(createLambdaLogGroup(this, "TransactionLogSnapshotCreationLogGroup", creationFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(creationFunctionName)));
Rule rule = Rule.Builder.create(this, "TransactionLogSnapshotCreationSchedule")
.ruleName(SleeperScheduleRule.TRANSACTION_LOG_SNAPSHOT_CREATION.buildRuleName(instanceProperties))
@@ -164,7 +163,7 @@ private void createSnapshotDeletionLambda(InstanceProperties instanceProperties,
.reservedConcurrentExecutions(1)
.memorySize(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_TIMEOUT_IN_SECONDS)))
- .logGroup(createLambdaLogGroup(this, "TransactionLogSnapshotDeletionTriggerLogGroup", triggerFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(triggerFunctionName)));
IFunction snapshotDeletionLambda = statestoreJar.buildFunction(this, "TransactionLogSnapshotDeletion", builder -> builder
.functionName(deletionFunctionName)
.description("Deletes old transaction log snapshots for tables")
@@ -174,7 +173,7 @@ private void createSnapshotDeletionLambda(InstanceProperties instanceProperties,
.reservedConcurrentExecutions(instanceProperties.getInt(TRANSACTION_LOG_SNAPSHOT_DELETION_LAMBDA_CONCURRENCY_RESERVED))
.memorySize(1024)
.timeout(Duration.minutes(1))
- .logGroup(createLambdaLogGroup(this, "TransactionLogSnapshotDeletionLogGroup", deletionFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(deletionFunctionName)));
Rule rule = Rule.Builder.create(this, "TransactionLogSnapshotDeletionSchedule")
.ruleName(SleeperScheduleRule.TRANSACTION_LOG_SNAPSHOT_DELETION.buildRuleName(instanceProperties))
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/TransactionLogTransactionStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/TransactionLogTransactionStack.java
index cd696e33ee..8be9944d68 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/TransactionLogTransactionStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/TransactionLogTransactionStack.java
@@ -41,7 +41,6 @@
import java.util.List;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.cdk.util.Utils.shouldDeployPaused;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.TRANSACTION_LOG_TRANSACTION_DELETION_DLQ_ARN;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.TRANSACTION_LOG_TRANSACTION_DELETION_DLQ_URL;
@@ -84,7 +83,7 @@ private void createTransactionDeletionLambda(InstanceProperties instanceProperti
.reservedConcurrentExecutions(1)
.memorySize(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TABLE_BATCHING_LAMBDAS_TIMEOUT_IN_SECONDS)))
- .logGroup(createLambdaLogGroup(this, "TransactionLogTransactionDeletionTriggerLogGroup", triggerFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(triggerFunctionName)));
IFunction transactionDeletionLambda = statestoreJar.buildFunction(this, "TransactionLogTransactionDeletion", builder -> builder
.functionName(deletionFunctionName)
.description("Deletes old transaction log transactions for tables")
@@ -94,7 +93,7 @@ private void createTransactionDeletionLambda(InstanceProperties instanceProperti
.reservedConcurrentExecutions(instanceProperties.getInt(TRANSACTION_LOG_TRANSACTION_DELETION_LAMBDA_CONCURRENCY_RESERVED))
.memorySize(1024)
.timeout(Duration.minutes(1))
- .logGroup(createLambdaLogGroup(this, "TransactionLogTransactionDeletionLogGroup", deletionFunctionName, instanceProperties)));
+ .logGroup(coreStacks.getLogGroupByFunctionName(deletionFunctionName)));
Rule rule = Rule.Builder.create(this, "TransactionLogTransactionDeletionSchedule")
.ruleName(SleeperScheduleRule.TRANSACTION_LOG_TRANSACTION_DELETION.buildRuleName(instanceProperties))
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/VpcStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/VpcStack.java
index 19fd2e4fa9..f7dc0f66fe 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/VpcStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/VpcStack.java
@@ -41,8 +41,6 @@
import java.util.HashMap;
import java.util.Map;
-import static sleeper.cdk.util.Utils.createCustomResourceProviderLogGroup;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.core.properties.instance.CommonProperty.REGION;
import static sleeper.core.properties.instance.CommonProperty.VPC_ENDPOINT_CHECK;
import static sleeper.core.properties.instance.CommonProperty.VPC_ID;
@@ -50,7 +48,7 @@
public class VpcStack extends NestedStack {
private static final Logger LOGGER = LoggerFactory.getLogger(VpcStack.class);
- public VpcStack(Construct scope, String id, InstanceProperties instanceProperties, BuiltJars jars) {
+ public VpcStack(Construct scope, String id, InstanceProperties instanceProperties, BuiltJars jars, LoggingStack logging) {
super(scope, id);
if (!instanceProperties.getBoolean(VPC_ENDPOINT_CHECK)) {
@@ -71,7 +69,7 @@ public VpcStack(Construct scope, String id, InstanceProperties instancePropertie
.handler("sleeper.cdk.custom.VpcCheckLambda::handleEvent")
.memorySize(2048)
.description("Lambda for checking the VPC has an associated S3 endpoint")
- .logGroup(createLambdaLogGroup(this, "VpcCheckLambdaLogGroup", functionName, instanceProperties))
+ .logGroup(logging.getLogGroupByFunctionName(functionName))
.runtime(Runtime.JAVA_11));
vpcCheckLambda.addToRolePolicy(new PolicyStatement(new PolicyStatementProps.Builder()
@@ -84,7 +82,7 @@ public VpcStack(Construct scope, String id, InstanceProperties instancePropertie
Provider provider = new Provider(this, "VpcCustomResourceProvider",
ProviderProps.builder()
.onEventHandler(vpcCheckLambda)
- .logGroup(createCustomResourceProviderLogGroup(this, "VpcCustomResourceProviderLogGroup", functionName, instanceProperties))
+ .logGroup(logging.getProviderLogGroupByFunctionName(functionName))
.build());
// Custom resource to check whether VPC is valid
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/WebSocketQueryStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/WebSocketQueryStack.java
index 4d078d9576..85c2020c59 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/WebSocketQueryStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/WebSocketQueryStack.java
@@ -48,8 +48,6 @@
import java.util.Collections;
import java.util.Map;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
-
public final class WebSocketQueryStack extends NestedStack {
private CfnApi webSocketApi;
@@ -86,7 +84,7 @@ protected void setupWebSocketApi(InstanceProperties instanceProperties, LambdaCo
.handler("sleeper.query.lambda.WebSocketQueryProcessorLambda::handleRequest")
.environment(env)
.memorySize(256)
- .logGroup(createLambdaLogGroup(this, "WebSocketApiHandlerLogGroup", functionName, instanceProperties))
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName))
.timeout(Duration.seconds(29))
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11));
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/BulkImportBucketStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/BulkImportBucketStack.java
index a090314a4a..f3e4233d51 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/BulkImportBucketStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/BulkImportBucketStack.java
@@ -36,9 +36,10 @@ public class BulkImportBucketStack extends NestedStack {
public BulkImportBucketStack(Construct scope, String id, InstanceProperties instanceProperties, CoreStacks coreStacks, BuiltJars jars) {
super(scope, id);
+ String bucketName = String.join("-", "sleeper",
+ Utils.cleanInstanceId(instanceProperties), "bulk-import");
importBucket = Bucket.Builder.create(this, "BulkImportBucket")
- .bucketName(String.join("-", "sleeper",
- Utils.cleanInstanceId(instanceProperties), "bulk-import"))
+ .bucketName(bucketName)
.blockPublicAccess(BlockPublicAccess.BLOCK_ALL)
.versioned(false)
.removalPolicy(RemovalPolicy.DESTROY)
@@ -46,7 +47,7 @@ public BulkImportBucketStack(Construct scope, String id, InstanceProperties inst
.build();
importBucket.grantWrite(coreStacks.getIngestByQueuePolicyForGrants());
instanceProperties.set(BULK_IMPORT_BUCKET, importBucket.getBucketName());
- AutoDeleteS3Objects.autoDeleteForBucket(this, jars, instanceProperties, importBucket);
+ AutoDeleteS3Objects.autoDeleteForBucket(this, instanceProperties, coreStacks, jars, importBucket, bucketName);
}
public IBucket getImportBucket() {
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/CommonEmrBulkImportHelper.java b/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/CommonEmrBulkImportHelper.java
index 43ac491e9f..18b72c9511 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/CommonEmrBulkImportHelper.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/CommonEmrBulkImportHelper.java
@@ -43,7 +43,6 @@
import java.util.stream.Collectors;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
import static sleeper.core.properties.instance.CommonProperty.JARS_BUCKET;
public class CommonEmrBulkImportHelper {
@@ -127,7 +126,7 @@ public IFunction createJobStarterFunction(
.environment(env)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.handler("sleeper.bulkimport.starter.BulkImportStarterLambda")
- .logGroup(createLambdaLogGroup(scope, "BulkImport" + platform + "JobStarterLogGroup", functionName, instanceProperties))
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName))
.events(Lists.newArrayList(SqsEventSource.Builder.create(jobQueue).batchSize(1).build())));
coreStacks.grantValidateBulkImport(function.getRole());
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/CommonEmrBulkImportStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/CommonEmrBulkImportStack.java
index 30344b777b..d354f350eb 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/CommonEmrBulkImportStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/CommonEmrBulkImportStack.java
@@ -20,10 +20,10 @@
import com.google.gson.Gson;
import com.google.gson.stream.JsonReader;
import software.amazon.awscdk.CfnJson;
-import software.amazon.awscdk.CfnJsonProps;
+import software.amazon.awscdk.Duration;
import software.amazon.awscdk.NestedStack;
+import software.amazon.awscdk.RemovalPolicy;
import software.amazon.awscdk.services.emr.CfnSecurityConfiguration;
-import software.amazon.awscdk.services.emr.CfnSecurityConfigurationProps;
import software.amazon.awscdk.services.iam.CfnInstanceProfile;
import software.amazon.awscdk.services.iam.CfnInstanceProfileProps;
import software.amazon.awscdk.services.iam.Effect;
@@ -35,6 +35,8 @@
import software.amazon.awscdk.services.iam.Role;
import software.amazon.awscdk.services.iam.RoleProps;
import software.amazon.awscdk.services.iam.ServicePrincipal;
+import software.amazon.awscdk.services.kms.IKey;
+import software.amazon.awscdk.services.kms.Key;
import software.amazon.awscdk.services.s3.Bucket;
import software.amazon.awscdk.services.s3.IBucket;
import software.constructs.Construct;
@@ -57,8 +59,14 @@
import static sleeper.core.properties.instance.CommonProperty.REGION;
import static sleeper.core.properties.instance.CommonProperty.SUBNETS;
import static sleeper.core.properties.instance.CommonProperty.VPC_ID;
+import static sleeper.core.properties.instance.EMRProperty.BULK_IMPORT_EMR_EBS_ENCRYPTION_KEY_ARN;
public class CommonEmrBulkImportStack extends NestedStack {
+
+ private static final String[] KMS_GRANTS = new String[]{
+ "kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey",
+ "kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"};
+
private final IRole ec2Role;
private final IRole emrRole;
private final CfnSecurityConfiguration securityConfiguration;
@@ -67,15 +75,16 @@ public CommonEmrBulkImportStack(
Construct scope, String id, InstanceProperties instanceProperties,
CoreStacks coreStacks, BulkImportBucketStack importBucketStack) {
super(scope, id);
+ IKey ebsKey = createEbsEncryptionKey(this, instanceProperties);
ec2Role = createEc2Role(this, instanceProperties,
- importBucketStack.getImportBucket(), coreStacks);
- emrRole = createEmrRole(this, instanceProperties, ec2Role);
- securityConfiguration = createSecurityConfiguration(this, instanceProperties);
+ importBucketStack.getImportBucket(), coreStacks, ebsKey);
+ emrRole = createEmrRole(this, instanceProperties, ec2Role, ebsKey);
+ securityConfiguration = createSecurityConfiguration(this, instanceProperties, ebsKey);
}
private static IRole createEc2Role(
Construct scope, InstanceProperties instanceProperties, IBucket importBucket,
- CoreStacks coreStacks) {
+ CoreStacks coreStacks, IKey ebsKey) {
// The EC2 Role is the role assumed by the EC2 instances and is the one
// we need to grant accesses to.
@@ -85,6 +94,7 @@ private static IRole createEc2Role(
.assumedBy(new ServicePrincipal("ec2.amazonaws.com"))
.build());
coreStacks.grantIngest(role);
+ ebsKey.grant(role, KMS_GRANTS);
// The role needs to be able to access the user's jars
IBucket jarsBucket = Bucket.fromBucketName(scope, "JarsBucket", instanceProperties.get(JARS_BUCKET));
@@ -127,7 +137,7 @@ private static IRole createEc2Role(
return role;
}
- private static IRole createEmrRole(Construct scope, InstanceProperties instanceProperties, IRole ec2Role) {
+ private static IRole createEmrRole(Construct scope, InstanceProperties instanceProperties, IRole ec2Role, IKey ebsKey) {
String instanceId = Utils.cleanInstanceId(instanceProperties);
String region = instanceProperties.get(REGION);
String account = instanceProperties.get(ACCOUNT);
@@ -184,28 +194,54 @@ private static IRole createEmrRole(Construct scope, InstanceProperties instanceP
.managedPolicies(Lists.newArrayList(emrManagedPolicy, customEmrManagedPolicy))
.assumedBy(new ServicePrincipal("elasticmapreduce.amazonaws.com"))
.build());
+ ebsKey.grant(role, KMS_GRANTS);
instanceProperties.set(BULK_IMPORT_EMR_CLUSTER_ROLE_NAME, role.getRoleName());
return role;
}
- private static CfnSecurityConfiguration createSecurityConfiguration(Construct scope, InstanceProperties instanceProperties) {
+ private static CfnSecurityConfiguration createSecurityConfiguration(Construct scope, InstanceProperties instanceProperties, IKey ebsKey) {
// See https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-create-security-configuration.html
- String jsonSecurityConf = "{\n" +
- " \"InstanceMetadataServiceConfiguration\" : {\n" +
- " \"MinimumInstanceMetadataServiceVersion\": 2,\n" +
- " \"HttpPutResponseHopLimit\": 1\n" +
- " }\n" +
- "}";
- CfnJsonProps jsonProps = CfnJsonProps.builder().value(jsonSecurityConf).build();
- CfnJson jsonObject = new CfnJson(scope, "EMRSecurityConfigurationJSONObject", jsonProps);
- CfnSecurityConfigurationProps securityConfigurationProps = CfnSecurityConfigurationProps.builder()
+ CfnJson jsonObject = CfnJson.Builder.create(scope, "EMRSecurityConfigurationJSONObject")
+ .value("{\n" +
+ " \"InstanceMetadataServiceConfiguration\": {\n" +
+ " \"MinimumInstanceMetadataServiceVersion\": 2,\n" +
+ " \"HttpPutResponseHopLimit\": 1\n" +
+ " },\n" +
+ " \"EncryptionConfiguration\": {\n" +
+ " \"EnableInTransitEncryption\": false,\n" +
+ " \"EnableAtRestEncryption\": true,\n" +
+ " \"AtRestEncryptionConfiguration\": {\n" +
+ " \"LocalDiskEncryptionConfiguration\": {\n" +
+ " \"EnableEbsEncryption\": true,\n" +
+ " \"EncryptionKeyProviderType\": \"AwsKms\",\n" +
+ " \"AwsKmsKey\": \"" + ebsKey.getKeyArn() + "\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}")
+ .build();
+ CfnSecurityConfiguration conf = CfnSecurityConfiguration.Builder.create(scope, "EMRSecurityConfiguration")
.name(String.join("-", "sleeper",
Utils.cleanInstanceId(instanceProperties), "EMRSecurityConfigurationProps"))
.securityConfiguration(jsonObject)
.build();
- instanceProperties.set(CdkDefinedInstanceProperty.BULK_IMPORT_EMR_SECURITY_CONF_NAME, securityConfigurationProps.getName());
- return new CfnSecurityConfiguration(scope, "EMRSecurityConfiguration", securityConfigurationProps);
+ instanceProperties.set(CdkDefinedInstanceProperty.BULK_IMPORT_EMR_SECURITY_CONF_NAME, conf.getName());
+ return conf;
+ }
+
+ private static IKey createEbsEncryptionKey(Construct scope, InstanceProperties instanceProperties) {
+ String ebsKeyArn = instanceProperties.get(BULK_IMPORT_EMR_EBS_ENCRYPTION_KEY_ARN);
+ if (ebsKeyArn == null) {
+ return Key.Builder.create(scope, "EbsKey")
+ .description("Key used to encrypt data at rest in the local filesystem in AWS EMR for Sleeper.")
+ .enableKeyRotation(true)
+ .removalPolicy(RemovalPolicy.DESTROY)
+ .pendingWindow(Duration.days(7))
+ .build();
+ } else {
+ return Key.fromKeyArn(scope, "EbsKey", ebsKeyArn);
+ }
}
public IRole getEc2Role() {
diff --git a/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/EksBulkImportStack.java b/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/EksBulkImportStack.java
index 1552c5f97c..05226062f3 100644
--- a/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/EksBulkImportStack.java
+++ b/java/cdk/src/main/java/sleeper/cdk/stack/bulkimport/EksBulkImportStack.java
@@ -77,7 +77,7 @@
import java.util.function.Function;
import static sleeper.cdk.util.Utils.createAlarmForDlq;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
+import static sleeper.cdk.util.Utils.createStateMachineLogOptions;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.BULK_IMPORT_EKS_JOB_QUEUE_ARN;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.BULK_IMPORT_EKS_JOB_QUEUE_URL;
import static sleeper.core.properties.instance.CommonProperty.ACCOUNT;
@@ -145,7 +145,7 @@ public EksBulkImportStack(
.environment(env)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.handler("sleeper.bulkimport.starter.BulkImportStarterLambda")
- .logGroup(createLambdaLogGroup(this, "BulkImportEKSJobStarterLogGroup", functionName, instanceProperties))
+ .logGroup(coreStacks.getLogGroupByFunctionName(functionName))
.events(Lists.newArrayList(SqsEventSource.Builder.create(bulkImportJobQueue).batchSize(1).build())));
configureJobStarterFunction(bulkImportJobStarter);
@@ -197,7 +197,7 @@ public EksBulkImportStack(
.forEach(sa -> sa.getNode().addDependency(namespace));
coreStacks.grantIngest(sparkServiceAccount.getRole());
- StateMachine stateMachine = createStateMachine(bulkImportCluster, instanceProperties, errorsTopic);
+ StateMachine stateMachine = createStateMachine(bulkImportCluster, instanceProperties, coreStacks, errorsTopic);
instanceProperties.set(CdkDefinedInstanceProperty.BULK_IMPORT_EKS_STATE_MACHINE_ARN, stateMachine.getStateMachineArn());
bulkImportCluster.getAwsAuth().addRoleMapping(stateMachine.getRole(), AwsAuthMapping.builder()
@@ -222,7 +222,7 @@ private static void configureJobStarterFunction(IFunction bulkImportJobStarter)
.build());
}
- private StateMachine createStateMachine(Cluster cluster, InstanceProperties instanceProperties, Topic errorsTopic) {
+ private StateMachine createStateMachine(Cluster cluster, InstanceProperties instanceProperties, CoreStacks coreStacks, Topic errorsTopic) {
String imageName = instanceProperties.get(ACCOUNT) +
".dkr.ecr." +
instanceProperties.get(REGION) +
@@ -267,6 +267,7 @@ private StateMachine createStateMachine(Cluster cluster, InstanceProperties inst
.stateJson(deleteJobState).build()))
.otherwise(createErrorMessage.next(publishError).next(Fail.Builder
.create(this, "FailedJobState").cause("Spark job failed").build())))))
+ .logs(createStateMachineLogOptions(coreStacks, "EksBulkImportStateMachine"))
.build();
}
diff --git a/java/cdk/src/main/java/sleeper/cdk/util/AutoDeleteS3Objects.java b/java/cdk/src/main/java/sleeper/cdk/util/AutoDeleteS3Objects.java
index 0119746859..38266020f5 100644
--- a/java/cdk/src/main/java/sleeper/cdk/util/AutoDeleteS3Objects.java
+++ b/java/cdk/src/main/java/sleeper/cdk/util/AutoDeleteS3Objects.java
@@ -16,9 +16,11 @@
package sleeper.cdk.util;
import software.amazon.awscdk.CustomResource;
+import software.amazon.awscdk.Duration;
import software.amazon.awscdk.customresources.Provider;
import software.amazon.awscdk.services.lambda.IFunction;
import software.amazon.awscdk.services.lambda.Runtime;
+import software.amazon.awscdk.services.logs.ILogGroup;
import software.amazon.awscdk.services.s3.Bucket;
import software.amazon.awscdk.services.s3.IBucket;
import software.constructs.Construct;
@@ -26,28 +28,59 @@
import sleeper.cdk.jars.BuiltJar;
import sleeper.cdk.jars.BuiltJars;
import sleeper.cdk.jars.LambdaCode;
+import sleeper.cdk.stack.CoreStacks;
+import sleeper.cdk.stack.LoggingStack;
import sleeper.core.properties.instance.InstanceProperties;
import java.util.Map;
-
-import static sleeper.cdk.util.Utils.createCustomResourceProviderLogGroup;
-import static sleeper.cdk.util.Utils.createLambdaLogGroup;
+import java.util.function.Function;
public class AutoDeleteS3Objects {
private AutoDeleteS3Objects() {
}
- public static void autoDeleteForBucket(Construct scope, BuiltJars jars, InstanceProperties instanceProperties, IBucket bucket) {
+ public static void autoDeleteForBucket(
+ Construct scope, InstanceProperties instanceProperties, CoreStacks coreStacks, BuiltJars jars,
+ IBucket bucket, String bucketName) {
+ autoDeleteForBucket(scope, instanceProperties, jars, bucket, bucketName, coreStacks::getLogGroupByFunctionName, coreStacks::getProviderLogGroupByFunctionName);
+ }
+
+ public static void autoDeleteForBucket(
+ Construct scope, InstanceProperties instanceProperties, LoggingStack logging, BuiltJars jars,
+ IBucket bucket, String bucketName) {
+ autoDeleteForBucket(scope, instanceProperties, jars, bucket, bucketName, logging::getLogGroupByFunctionName, logging::getProviderLogGroupByFunctionName);
+ }
+
+ public static void autoDeleteForBucket(
+ Construct scope, InstanceProperties instanceProperties, LoggingStack logging, LambdaCode customResourcesJar,
+ IBucket bucket, String bucketName) {
+ autoDeleteForBucket(scope, instanceProperties, customResourcesJar, bucket, bucketName, logging::getLogGroupByFunctionName, logging::getProviderLogGroupByFunctionName);
+ }
+
+ public static void autoDeleteForBucket(
+ Construct scope, InstanceProperties instanceProperties, CoreStacks coreStacks, LambdaCode customResourcesJar,
+ IBucket bucket, String bucketName) {
+ autoDeleteForBucket(scope, instanceProperties, customResourcesJar, bucket, bucketName, coreStacks::getLogGroupByFunctionName, coreStacks::getProviderLogGroupByFunctionName);
+ }
+
+ public static void autoDeleteForBucket(
+ Construct scope, InstanceProperties instanceProperties, BuiltJars jars, IBucket bucket, String bucketName,
+ Function getLogGroupByFunctionName,
+ Function getProviderLogGroupByFunctionName) {
IBucket jarsBucket = Bucket.fromBucketName(scope, "JarsBucket", jars.bucketName());
LambdaCode jar = jars.lambdaCode(BuiltJar.CUSTOM_RESOURCES, jarsBucket);
- autoDeleteForBucket(scope, jar, instanceProperties, bucket);
+ autoDeleteForBucket(scope, instanceProperties, jar, bucket, bucketName, getLogGroupByFunctionName, getProviderLogGroupByFunctionName);
}
- public static void autoDeleteForBucket(Construct scope, LambdaCode customResourcesJar, InstanceProperties instanceProperties, IBucket bucket) {
+ public static void autoDeleteForBucket(
+ Construct scope, InstanceProperties instanceProperties, LambdaCode customResourcesJar,
+ IBucket bucket, String bucketName,
+ Function getLogGroupByFunctionName,
+ Function getProviderLogGroupByFunctionName) {
String id = bucket.getNode().getId() + "-AutoDelete";
- String functionName = bucket.getBucketName() + "-autodelete";
+ String functionName = bucketName + "-autodelete";
IFunction lambda = customResourcesJar.buildFunction(scope, id + "Lambda", builder -> builder
.functionName(functionName)
@@ -55,20 +88,21 @@ public static void autoDeleteForBucket(Construct scope, LambdaCode customResourc
.memorySize(2048)
.environment(Utils.createDefaultEnvironmentNoConfigBucket(instanceProperties))
.description("Lambda for auto-deleting S3 objects")
- .logGroup(createLambdaLogGroup(scope, id + "LambdaLogGroup", functionName, instanceProperties))
- .runtime(Runtime.JAVA_11));
+ .logGroup(getLogGroupByFunctionName.apply(functionName))
+ .runtime(Runtime.JAVA_11)
+ .timeout(Duration.minutes(10)));
bucket.grantRead(lambda);
bucket.grantDelete(lambda);
Provider propertiesWriterProvider = Provider.Builder.create(scope, id + "Provider")
.onEventHandler(lambda)
- .logGroup(createCustomResourceProviderLogGroup(scope, id + "ProviderLogGroup", functionName, instanceProperties))
+ .logGroup(getProviderLogGroupByFunctionName.apply(functionName))
.build();
CustomResource.Builder.create(scope, id)
.resourceType("Custom::AutoDeleteS3Objects")
- .properties(Map.of("bucket", bucket.getBucketName()))
+ .properties(Map.of("bucket", bucketName))
.serviceToken(propertiesWriterProvider.getServiceToken())
.build();
}
diff --git a/java/cdk/src/main/java/sleeper/cdk/util/NewInstanceValidator.java b/java/cdk/src/main/java/sleeper/cdk/util/NewInstanceValidator.java
index e892a7c01f..2cd6059615 100644
--- a/java/cdk/src/main/java/sleeper/cdk/util/NewInstanceValidator.java
+++ b/java/cdk/src/main/java/sleeper/cdk/util/NewInstanceValidator.java
@@ -16,79 +16,83 @@
package sleeper.cdk.util;
-import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
-import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException;
-import com.amazonaws.services.s3.AmazonS3;
+import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
+import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.NoSuchBucketException;
import sleeper.core.properties.instance.InstanceProperties;
import java.nio.file.Path;
-import java.util.Arrays;
-import java.util.List;
import static sleeper.core.properties.instance.CommonProperty.ID;
-import static sleeper.core.properties.local.LoadLocalProperties.loadTablesFromInstancePropertiesFile;
-import static sleeper.core.properties.table.TableProperty.STATESTORE_CLASSNAME;
-import static sleeper.core.properties.table.TableProperty.TABLE_NAME;
class NewInstanceValidator {
- private final AmazonS3 amazonS3;
- private final AmazonDynamoDB amazonDynamoDB;
+ private final S3Client s3Client;
+ private final DynamoDbClient dynamoClient;
- NewInstanceValidator(AmazonS3 amazonS3, AmazonDynamoDB amazonDynamoDB) {
- this.amazonS3 = amazonS3;
- this.amazonDynamoDB = amazonDynamoDB;
+ NewInstanceValidator(S3Client s3Client, DynamoDbClient dynamoClient) {
+ this.s3Client = s3Client;
+ this.dynamoClient = dynamoClient;
}
void validate(InstanceProperties instanceProperties, Path instancePropertyPath) {
checkQueryResultsBucketDoesNotExist(instanceProperties);
- checkTableConfiguration(instanceProperties, instancePropertyPath);
+ checkDataBucketDoesNotExist(instanceProperties);
+ checkDynamoDBStateStoreDoesNotExist(instanceProperties);
+ checkS3StateStoreDoesNotExist(instanceProperties);
+ checkTransactionLogStateStoreDoesNotExist(instanceProperties);
}
private void checkQueryResultsBucketDoesNotExist(InstanceProperties instanceProperties) {
- String instanceName = instanceProperties.get(ID);
- String bucketName = String.join("-", "sleeper", instanceName, "query-results");
-
- if (amazonS3.doesBucketExistV2(bucketName)) {
+ String bucketName = String.join("-", "sleeper", instanceProperties.get(ID), "query", "results");
+ if (doesBucketExist(bucketName)) {
throw new IllegalArgumentException("Sleeper query results bucket exists: " + bucketName);
}
}
- private void checkTableConfiguration(InstanceProperties instanceProperties, Path instancePropertyPath) {
- String instanceName = instanceProperties.get(ID);
-
- loadTablesFromInstancePropertiesFile(instanceProperties, instancePropertyPath).forEach(tableProperties -> {
- String tableName = tableProperties.get(TABLE_NAME);
+ private void checkDataBucketDoesNotExist(InstanceProperties instanceProperties) {
+ String bucketName = String.join("-", "sleeper", instanceProperties.get(ID), "table", "data");
+ if (doesBucketExist(bucketName)) {
+ throw new IllegalArgumentException("Sleeper data bucket exists: " + bucketName);
+ }
+ }
- checkBucketExistsForTable(instanceName, tableName);
+ private void checkDynamoDBStateStoreDoesNotExist(InstanceProperties instanceProperties) {
+ String dynamodbTableName = String.join("-", "sleeper", instanceProperties.get(ID), "partitions");
+ if (doesDynamoTableExist(dynamodbTableName)) {
+ throw new IllegalArgumentException("Sleeper state store table exists: " + dynamodbTableName);
+ }
+ }
- if ("sleeper.statestore.dynamodb.DynamoDBStateStore".equalsIgnoreCase(tableProperties.get(STATESTORE_CLASSNAME))) {
- checkDynamoDBConfigurationExistsForTable(instanceName, tableName);
- }
- });
+ private void checkS3StateStoreDoesNotExist(InstanceProperties instanceProperties) {
+ String dynamodbTableName = String.join("-", "sleeper", instanceProperties.get(ID), "table", "revisions");
+ if (doesDynamoTableExist(dynamodbTableName)) {
+ throw new IllegalArgumentException("Sleeper state store table exists: " + dynamodbTableName);
+ }
}
- private void checkDynamoDBConfigurationExistsForTable(String instanceName, String tableName) {
- List tableTypes = Arrays.asList("active-files", "gc-files", "partitions");
- tableTypes.forEach(tableType -> {
- String dynamodbTableName = String.join("-", "sleeper", instanceName, "table", tableName, tableType);
- if (doesDynamoTableExist(dynamodbTableName)) {
- throw new IllegalArgumentException("Sleeper DynamoDBTable exists: " + dynamodbTableName);
- }
- });
+ private void checkTransactionLogStateStoreDoesNotExist(InstanceProperties instanceProperties) {
+ String dynamodbTableName = String.join("-", "sleeper", instanceProperties.get(ID), "partition", "transaction", "log");
+ if (doesDynamoTableExist(dynamodbTableName)) {
+ throw new IllegalArgumentException("Sleeper state store table exists: " + dynamodbTableName);
+ }
}
- private void checkBucketExistsForTable(String instanceName, String tableName) {
- String bucketName = String.join("-", "sleeper", instanceName, "table", tableName);
- if (amazonS3.doesBucketExistV2(bucketName)) {
- throw new IllegalArgumentException("Sleeper table bucket exists: " + bucketName);
+ private boolean doesBucketExist(String bucketName) {
+ try {
+ s3Client.headBucket(builder -> builder.bucket(bucketName));
+ return true;
+ } catch (NoSuchBucketException e) {
+ return false;
}
}
private boolean doesDynamoTableExist(String name) {
boolean tableExists = true;
+
try {
- amazonDynamoDB.describeTable(name);
+ dynamoClient.describeTable(builder -> builder.tableName(name));
} catch (ResourceNotFoundException e) {
tableExists = false;
}
diff --git a/java/cdk/src/main/java/sleeper/cdk/util/Utils.java b/java/cdk/src/main/java/sleeper/cdk/util/Utils.java
index 7246b2252c..8575c903c0 100644
--- a/java/cdk/src/main/java/sleeper/cdk/util/Utils.java
+++ b/java/cdk/src/main/java/sleeper/cdk/util/Utils.java
@@ -15,8 +15,6 @@
*/
package sleeper.cdk.util;
-import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.internal.BucketNameUtils;
import software.amazon.awscdk.Duration;
import software.amazon.awscdk.RemovalPolicy;
@@ -34,12 +32,17 @@
import software.amazon.awscdk.services.iam.ManagedPolicy;
import software.amazon.awscdk.services.iam.PolicyStatement;
import software.amazon.awscdk.services.lambda.IFunction;
-import software.amazon.awscdk.services.logs.LogGroup;
+import software.amazon.awscdk.services.logs.ILogGroup;
import software.amazon.awscdk.services.logs.RetentionDays;
import software.amazon.awscdk.services.sns.Topic;
import software.amazon.awscdk.services.sqs.Queue;
+import software.amazon.awscdk.services.stepfunctions.LogLevel;
+import software.amazon.awscdk.services.stepfunctions.LogOptions;
+import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
+import software.amazon.awssdk.services.s3.S3Client;
import software.constructs.Construct;
+import sleeper.cdk.stack.CoreStacks;
import sleeper.core.SleeperVersion;
import sleeper.core.properties.instance.CdkDefinedInstanceProperty;
import sleeper.core.properties.instance.InstanceProperties;
@@ -58,11 +61,9 @@
import java.util.regex.Pattern;
import java.util.stream.Stream;
-import static java.lang.String.format;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.CONFIG_BUCKET;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.VERSION;
import static sleeper.core.properties.instance.CommonProperty.ID;
-import static sleeper.core.properties.instance.CommonProperty.LOG_RETENTION_IN_DAYS;
import static sleeper.core.properties.instance.CommonProperty.RETAIN_INFRA_AFTER_DESTROY;
import static sleeper.core.properties.instance.CommonProperty.STACK_TAG_NAME;
import static sleeper.core.properties.instance.DashboardProperty.DASHBOARD_TIME_WINDOW_MINUTES;
@@ -124,55 +125,31 @@ private static String createToolOptions(InstanceProperties instanceProperties) {
* @return the cleaned up instance ID
*/
public static String cleanInstanceId(InstanceProperties properties) {
- return properties.get(ID)
- .toLowerCase(Locale.ROOT)
- .replace(".", "-");
- }
-
- /**
- * Configures a log group with the specified number of days. Valid values are taken from
- * here.
- * A value of -1 represents an infinite number of days.
- *
- * @param numberOfDays number of days you want to retain the logs
- * @return The RetentionDays equivalent
- */
- public static LogGroup createLogGroupWithRetentionDays(Construct scope, String id, int numberOfDays) {
- return LogGroup.Builder.create(scope, id)
- .retention(getRetentionDays(numberOfDays))
- .build();
+ return cleanInstanceId(properties.get(ID));
}
- public static LogGroup createLambdaLogGroup(
- Construct scope, String id, String functionName, InstanceProperties instanceProperties) {
- return LogGroup.Builder.create(scope, id)
- .logGroupName(functionName)
- .retention(getRetentionDays(instanceProperties.getInt(LOG_RETENTION_IN_DAYS)))
- .build();
+ public static String cleanInstanceId(String instanceId) {
+ return instanceId.toLowerCase(Locale.ROOT)
+ .replace(".", "-");
}
- public static LogGroup createCustomResourceProviderLogGroup(
- Construct scope, String id, String functionName, InstanceProperties instanceProperties) {
- return LogGroup.Builder.create(scope, id)
- .logGroupName(functionName + "-provider")
- .retention(getRetentionDays(instanceProperties.getInt(LOG_RETENTION_IN_DAYS)))
- .build();
+ public static LogDriver createECSContainerLogDriver(CoreStacks coreStacks, String id) {
+ ILogGroup logGroup = coreStacks.getLogGroupByECSLogDriverId(id);
+ return LogDriver.awsLogs(AwsLogDriverProps.builder()
+ .streamPrefix(logGroup.getLogGroupName())
+ .logGroup(logGroup)
+ .build());
}
- public static LogDriver createECSContainerLogDriver(Construct scope, InstanceProperties instanceProperties, String id) {
- String logGroupName = String.join("-", "sleeper", cleanInstanceId(instanceProperties), id);
- AwsLogDriverProps logDriverProps = AwsLogDriverProps.builder()
- .streamPrefix(logGroupName)
- .logGroup(LogGroup.Builder.create(scope, id)
- .logGroupName(logGroupName)
- .retention(getRetentionDays(instanceProperties.getInt(LOG_RETENTION_IN_DAYS)))
- .build())
+ public static LogOptions createStateMachineLogOptions(CoreStacks coreStacks, String id) {
+ return LogOptions.builder()
+ .destination(coreStacks.getLogGroupByStateMachineId(id))
+ .level(LogLevel.ALL)
+ .includeExecutionData(true)
.build();
- return LogDriver.awsLogs(logDriverProps);
}
- private static RetentionDays getRetentionDays(int numberOfDays) {
+ public static RetentionDays getRetentionDays(int numberOfDays) {
switch (numberOfDays) {
case -1:
return RetentionDays.INFINITE;
@@ -232,8 +209,9 @@ public static T loadInstanceProperties(
}
}
if ("true".equalsIgnoreCase(tryGetContext.apply("newinstance"))) {
- new NewInstanceValidator(AmazonS3ClientBuilder.defaultClient(),
- AmazonDynamoDBClientBuilder.defaultClient()).validate(properties, propertiesFile);
+ try (S3Client s3Client = S3Client.create(); DynamoDbClient dynamoClient = DynamoDbClient.create()) {
+ new NewInstanceValidator(s3Client, dynamoClient).validate(properties, propertiesFile);
+ }
}
String deployedVersion = properties.get(VERSION);
String localVersion = SleeperVersion.getVersion();
@@ -242,7 +220,7 @@ public static T loadInstanceProperties(
if (!"true".equalsIgnoreCase(tryGetContext.apply("skipVersionCheck"))
&& deployedVersion != null
&& !localVersion.equals(deployedVersion)) {
- throw new MismatchedVersionException(format("Local version %s does not match deployed version %s. " +
+ throw new MismatchedVersionException(String.format("Local version %s does not match deployed version %s. " +
"Please upgrade/downgrade to make these match",
localVersion, deployedVersion));
}
diff --git a/java/cdk/src/test/java/sleeper/cdk/jars/BuiltJarsIT.java b/java/cdk/src/test/java/sleeper/cdk/jars/BuiltJarsIT.java
index 0584462f2b..c4f045cf79 100644
--- a/java/cdk/src/test/java/sleeper/cdk/jars/BuiltJarsIT.java
+++ b/java/cdk/src/test/java/sleeper/cdk/jars/BuiltJarsIT.java
@@ -15,43 +15,26 @@
*/
package sleeper.cdk.jars;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.s3.model.BucketVersioningConfiguration;
-import com.amazonaws.services.s3.model.CreateBucketRequest;
-import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest;
import org.junit.jupiter.api.Test;
-import org.testcontainers.containers.localstack.LocalStackContainer;
-import org.testcontainers.junit.jupiter.Container;
-import org.testcontainers.junit.jupiter.Testcontainers;
-import org.testcontainers.utility.DockerImageName;
+import software.amazon.awssdk.services.s3.model.BucketVersioningStatus;
-import sleeper.core.CommonTestConstants;
+import sleeper.cdk.testutils.LocalStackTestBase;
import java.util.UUID;
-import static com.amazonaws.services.s3.model.BucketVersioningConfiguration.ENABLED;
import static org.assertj.core.api.Assertions.assertThat;
-import static sleeper.configuration.testutils.LocalStackAwsV1ClientHelper.buildAwsV1Client;
-@Testcontainers
-public class BuiltJarsIT {
-
- @Container
- public static LocalStackContainer localStackContainer = new LocalStackContainer(DockerImageName.parse(CommonTestConstants.LOCALSTACK_DOCKER_IMAGE))
- .withServices(LocalStackContainer.Service.S3);
-
- protected final AmazonS3 s3 = buildAwsV1Client(localStackContainer, LocalStackContainer.Service.S3, AmazonS3ClientBuilder.standard());
+public class BuiltJarsIT extends LocalStackTestBase {
private final String bucketName = UUID.randomUUID().toString();
- private final BuiltJars builtJars = new BuiltJars(s3, bucketName);
+ private final BuiltJars builtJars = new BuiltJars(s3Client, bucketName);
@Test
void shouldGetLatestVersionOfAJar() {
- s3.createBucket(new CreateBucketRequest(bucketName));
- s3.setBucketVersioningConfiguration(new SetBucketVersioningConfigurationRequest(bucketName,
- new BucketVersioningConfiguration(ENABLED)));
- String versionId = s3.putObject(bucketName, "test.jar", "data").getVersionId();
+ createBucket(bucketName);
+ s3Client.putBucketVersioning(put -> put.bucket(bucketName)
+ .versioningConfiguration(config -> config.status(BucketVersioningStatus.ENABLED)));
+ String versionId = putObject(bucketName, "test.jar", "data").versionId();
assertThat(builtJars.getLatestVersionId(BuiltJar.fromFormat("test.jar")))
.isEqualTo(versionId);
diff --git a/java/cdk/src/test/java/sleeper/cdk/testutils/LocalStackTestBase.java b/java/cdk/src/test/java/sleeper/cdk/testutils/LocalStackTestBase.java
new file mode 100644
index 0000000000..48a35e14df
--- /dev/null
+++ b/java/cdk/src/test/java/sleeper/cdk/testutils/LocalStackTestBase.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2022-2024 Crown Copyright
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package sleeper.cdk.testutils;
+
+import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
+import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.AmazonS3ClientBuilder;
+import org.testcontainers.containers.localstack.LocalStackContainer;
+import org.testcontainers.junit.jupiter.Container;
+import org.testcontainers.junit.jupiter.Testcontainers;
+import org.testcontainers.utility.DockerImageName;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.PutObjectResponse;
+import software.amazon.awssdk.services.s3.model.S3Object;
+
+import sleeper.core.CommonTestConstants;
+
+import java.util.List;
+
+import static java.util.stream.Collectors.toUnmodifiableList;
+import static sleeper.configuration.testutils.LocalStackAwsV1ClientHelper.buildAwsV1Client;
+
+@Testcontainers
+public abstract class LocalStackTestBase {
+
+ @Container
+ public static LocalStackContainer localStackContainer = new LocalStackContainer(DockerImageName.parse(CommonTestConstants.LOCALSTACK_DOCKER_IMAGE))
+ .withServices(LocalStackContainer.Service.S3, LocalStackContainer.Service.DYNAMODB);
+
+ protected final S3Client s3Client = buildAwsV2Client(localStackContainer, LocalStackContainer.Service.S3, S3Client.builder());
+ protected final AmazonS3 s3ClientV1 = buildAwsV1Client(localStackContainer, LocalStackContainer.Service.S3, AmazonS3ClientBuilder.standard());
+ protected final DynamoDbClient dynamoClient = buildAwsV2Client(localStackContainer, LocalStackContainer.Service.DYNAMODB, DynamoDbClient.builder());
+ protected final AmazonDynamoDB dynamoClientV1 = buildAwsV1Client(localStackContainer, LocalStackContainer.Service.DYNAMODB, AmazonDynamoDBClientBuilder.standard());
+
+ private static , T> T buildAwsV2Client(LocalStackContainer localStackContainer, LocalStackContainer.Service service, B builder) {
+ return builder
+ .endpointOverride(localStackContainer.getEndpointOverride(service))
+ .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(
+ localStackContainer.getAccessKey(), localStackContainer.getSecretKey())))
+ .region(Region.of(localStackContainer.getRegion()))
+ .build();
+ }
+
+ protected void createBucket(String bucketName) {
+ s3Client.createBucket(builder -> builder.bucket(bucketName));
+ }
+
+ protected PutObjectResponse putObject(String bucketName, String key, String content) {
+ return s3Client.putObject(builder -> builder.bucket(bucketName).key(key),
+ RequestBody.fromString(content));
+ }
+
+ protected List listObjectKeys(String bucketName) {
+ return s3Client.listObjectsV2Paginator(builder -> builder.bucket(bucketName))
+ .contents().stream().map(S3Object::key)
+ .collect(toUnmodifiableList());
+ }
+}
diff --git a/java/cdk/src/test/java/sleeper/cdk/util/NewInstanceValidatorIT.java b/java/cdk/src/test/java/sleeper/cdk/util/NewInstanceValidatorIT.java
index 174cb26cc7..02365d8cd9 100644
--- a/java/cdk/src/test/java/sleeper/cdk/util/NewInstanceValidatorIT.java
+++ b/java/cdk/src/test/java/sleeper/cdk/util/NewInstanceValidatorIT.java
@@ -16,70 +16,38 @@
package sleeper.cdk.util;
-import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
-import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
-import com.amazonaws.services.dynamodbv2.model.AttributeDefinition;
-import com.amazonaws.services.dynamodbv2.model.BillingMode;
-import com.amazonaws.services.dynamodbv2.model.CreateTableRequest;
-import com.amazonaws.services.dynamodbv2.model.KeySchemaElement;
-import com.amazonaws.services.dynamodbv2.model.KeyType;
-import com.amazonaws.services.dynamodbv2.model.ScalarAttributeType;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
-import org.testcontainers.containers.localstack.LocalStackContainer;
-import org.testcontainers.junit.jupiter.Container;
-import org.testcontainers.junit.jupiter.Testcontainers;
-import org.testcontainers.utility.DockerImageName;
-import sleeper.core.CommonTestConstants;
+import sleeper.cdk.testutils.LocalStackTestBase;
import sleeper.core.properties.instance.InstanceProperties;
+import sleeper.statestore.dynamodb.DynamoDBStateStore;
+import sleeper.statestore.dynamodb.DynamoDBStateStoreCreator;
import sleeper.statestore.s3.S3StateStore;
+import sleeper.statestore.s3.S3StateStoreCreator;
+import sleeper.statestore.transactionlog.DynamoDBTransactionLogStateStore;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.List;
import static org.assertj.core.api.Assertions.assertThatCode;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static sleeper.cdk.util.ValidatorTestHelper.setupTablesPropertiesFile;
-import static sleeper.configuration.testutils.LocalStackAwsV1ClientHelper.buildAwsV1Client;
-import static sleeper.core.properties.instance.CommonProperty.ID;
+import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.DATA_BUCKET;
+import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.QUERY_RESULTS_BUCKET;
+import static sleeper.core.properties.testutils.InstancePropertiesTestHelper.createTestInstanceProperties;
-@Testcontainers
-class NewInstanceValidatorIT {
- @Container
- public static LocalStackContainer localStackContainer = new LocalStackContainer(DockerImageName.parse(CommonTestConstants.LOCALSTACK_DOCKER_IMAGE))
- .withServices(LocalStackContainer.Service.S3, LocalStackContainer.Service.DYNAMODB);
+class NewInstanceValidatorIT extends LocalStackTestBase {
@TempDir
public Path temporaryFolder;
- private static AmazonS3 amazonS3;
- private static AmazonDynamoDB amazonDynamoDB;
- private NewInstanceValidator newInstanceValidator;
- private final InstanceProperties instanceProperties = new InstanceProperties();
-
- @BeforeAll
- public static void setup() {
- amazonS3 = getS3Client();
- amazonDynamoDB = createDynamoClient();
- }
-
- @BeforeEach
- public void setUp() {
- newInstanceValidator = new NewInstanceValidator(amazonS3, amazonDynamoDB);
- }
+ private final InstanceProperties instanceProperties = createTestInstanceProperties();
@Test
void shouldNotThrowAnErrorWhenNoBucketsOrTablesExist() throws IOException {
// Given
- instanceProperties.set(ID, "valid-id");
setupTablesPropertiesFile(temporaryFolder, "example-table", "sleeper.statestore.dynamodb.DynamoDBStateStore");
// When / Then
@@ -88,105 +56,68 @@ void shouldNotThrowAnErrorWhenNoBucketsOrTablesExist() throws IOException {
}
@Test
- void shouldThrowAnErrorWhenABucketExistsWithSameNameAsTable() throws IOException {
+ void shouldThrowAnErrorWhenDataBucketExists() throws IOException {
// Given
- String bucketName = String.join("-", "sleeper", "valid-id", "table", "example-table");
- instanceProperties.set(ID, "valid-id");
setupTablesPropertiesFile(temporaryFolder, "example-table", "sleeper.statestore.dynamodb.DynamoDBStateStore");
- amazonS3.createBucket(bucketName);
+ createBucket(instanceProperties.get(DATA_BUCKET));
// When / Then
assertThatThrownBy(this::validate)
.isInstanceOf(IllegalArgumentException.class)
- .hasMessage("Sleeper table bucket exists: sleeper-valid-id-table-example-table");
- amazonS3.deleteBucket(bucketName);
+ .hasMessage("Sleeper data bucket exists: " + instanceProperties.get(DATA_BUCKET));
}
@Test
void shouldThrowAnErrorWhenTheQueryResultsBucketExists() throws IOException {
// Given
- String bucketName = String.join("-", "sleeper", "valid-id", "query-results");
- instanceProperties.set(ID, "valid-id");
- setupTablesPropertiesFile(temporaryFolder, "example-table", "sleeper.statestore.dynamodb.DynamoDBStateStore");
- amazonS3.createBucket(bucketName);
+ setupTablesPropertiesFile(temporaryFolder, "example-table", DynamoDBStateStore.class.getName());
+ createBucket(instanceProperties.get(QUERY_RESULTS_BUCKET));
// When / Then
assertThatThrownBy(this::validate)
.isInstanceOf(IllegalArgumentException.class)
- .hasMessage("Sleeper query results bucket exists: " + bucketName);
- amazonS3.deleteBucket(bucketName);
- }
-
- @Test
- void shouldThrowAnErrorWhenDynamoTableExistsWithSameNameAsTableActiveFiles() throws IOException {
- checkErrorIsThrownWhenTableExists("sleeper-valid-id-table-example-table-active-files");
+ .hasMessage("Sleeper query results bucket exists: " + instanceProperties.get(QUERY_RESULTS_BUCKET));
}
@Test
- void shouldThrowAnErrorWhenADynamoTableExistsWithSameNameAsTableGCFiles() throws IOException {
- checkErrorIsThrownWhenTableExists("sleeper-valid-id-table-example-table-gc-files");
- }
+ void shouldThrowAnErrorWhenDynamoStateStoreExists() throws IOException {
+ // Given
+ new DynamoDBStateStoreCreator(instanceProperties, dynamoClientV1).create();
+ setupTablesPropertiesFile(temporaryFolder, "example-table", DynamoDBStateStore.class.getName());
- @Test
- void shouldThrowAnErrorWhenADynamoTableExistsWithSameNameAsTablePartitions() throws IOException {
- checkErrorIsThrownWhenTableExists("sleeper-valid-id-table-example-table-partitions");
+ // When
+ assertThatThrownBy(this::validate)
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessageStartingWith("Sleeper state store table exists: ");
}
@Test
- void shouldNotThrowAnErrorWhenTableExistsButUsingS3StateStore() throws IOException {
+ void shouldThrowAnErrorWhenS3StateStoreExists() throws IOException {
// Given
- String dynamoTable = "sleeper-valid-id-table-example-table-partitions";
- instanceProperties.set(ID, "valid-id");
- setupTablesPropertiesFile(temporaryFolder, "example-table", "sleeper.statestore.s3.S3StateStore");
- createDynamoTable(dynamoTable);
+ new S3StateStoreCreator(instanceProperties, dynamoClientV1).create();
+ setupTablesPropertiesFile(temporaryFolder, "example-table", S3StateStore.class.getName());
// When
- assertThatCode(this::validate)
- .doesNotThrowAnyException();
- amazonDynamoDB.deleteTable(dynamoTable);
+ assertThatThrownBy(this::validate)
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessageStartingWith("Sleeper state store table exists: ");
}
- private void checkErrorIsThrownWhenTableExists(String dynamoTable) throws IOException {
+ @Test
+ void shouldThrowAnErrorWhenTransactionLogStateStoreExists() throws IOException {
// Given
- instanceProperties.set(ID, "valid-id");
- setupTablesPropertiesFile(temporaryFolder, "example-table", "sleeper.statestore.dynamodb.DynamoDBStateStore");
- createDynamoTable(dynamoTable);
+ new DynamoDBStateStoreCreator(instanceProperties, dynamoClientV1).create();
+ setupTablesPropertiesFile(temporaryFolder, "example-table", DynamoDBTransactionLogStateStore.class.getName());
// When
assertThatThrownBy(this::validate)
.isInstanceOf(IllegalArgumentException.class)
- .hasMessage("Sleeper DynamoDBTable exists: " + dynamoTable);
- amazonDynamoDB.deleteTable(dynamoTable);
+ .hasMessageStartingWith("Sleeper state store table exists: ");
}
private void validate() throws IOException {
Path instancePropertiesPath = temporaryFolder.resolve("instance.properties");
Files.writeString(instancePropertiesPath, instanceProperties.saveAsString());
- newInstanceValidator.validate(instanceProperties, instancePropertiesPath);
- }
-
- private static AmazonS3 getS3Client() {
- return buildAwsV1Client(localStackContainer, LocalStackContainer.Service.S3, AmazonS3ClientBuilder.standard());
- }
-
- protected static AmazonDynamoDB createDynamoClient() {
- return buildAwsV1Client(localStackContainer, LocalStackContainer.Service.DYNAMODB, AmazonDynamoDBClient.builder());
- }
-
- private void createDynamoTable(String tableName) {
- // These attributes are for the S3 state store, but for these tests it
- // doesn't matter if the attributes are correct for the DynamoDB state
- // store as we just need the table to exist.
- List attributeDefinitions = new ArrayList<>();
- attributeDefinitions.add(new AttributeDefinition(S3StateStore.REVISION_ID_KEY, ScalarAttributeType.S));
- List keySchemaElements = new ArrayList<>();
- keySchemaElements.add(new KeySchemaElement(S3StateStore.REVISION_ID_KEY, KeyType.HASH));
- CreateTableRequest request = new CreateTableRequest()
- .withTableName(tableName)
- .withAttributeDefinitions(attributeDefinitions)
- .withKeySchema(keySchemaElements)
- .withBillingMode(BillingMode.PAY_PER_REQUEST);
- amazonDynamoDB.createTable(request);
-
+ new NewInstanceValidator(s3Client, dynamoClient).validate(instanceProperties, instancePropertiesPath);
}
}
diff --git a/java/clients/pom.xml b/java/clients/pom.xml
index 089c21f276..cac349d0af 100644
--- a/java/clients/pom.xml
+++ b/java/clients/pom.xml
@@ -38,52 +38,43 @@
software.amazon.awssdk
- sts
- ${aws-java-sdk-v2.version}
+ s3
software.amazon.awssdk
- ecr
- ${aws-java-sdk-v2.version}
+ sqs
- com.amazonaws
- aws-java-sdk-ecs
- ${aws-java-sdk.version}
+ software.amazon.awssdk
+ sts
software.amazon.awssdk
cloudwatchevents
- ${aws-java-sdk-v2.version}
software.amazon.awssdk
- s3
+ cloudwatchlogs
software.amazon.awssdk
- lambda
- ${aws-java-sdk-v2.version}
+ ecr
software.amazon.awssdk
- cloudwatchlogs
- ${aws-java-sdk-v2.version}
+ cloudformation
software.amazon.awssdk
- cloudformation
- ${aws-java-sdk-v2.version}
+ lambda
software.amazon.awssdk
emr
- ${aws-java-sdk-v2.version}
software.amazon.awssdk
emrserverless
- ${aws-java-sdk-v2.version}
org.java-websocket
diff --git a/java/clients/src/main/java/sleeper/clients/QueryLambdaClient.java b/java/clients/src/main/java/sleeper/clients/QueryLambdaClient.java
index 408d1181c8..2d15f1a266 100644
--- a/java/clients/src/main/java/sleeper/clients/QueryLambdaClient.java
+++ b/java/clients/src/main/java/sleeper/clients/QueryLambdaClient.java
@@ -19,8 +19,7 @@
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.sqs.AmazonSQS;
-import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
+import software.amazon.awssdk.services.sqs.SqsClient;
import sleeper.configuration.properties.S3InstanceProperties;
import sleeper.configuration.properties.S3TableProperties;
@@ -50,13 +49,13 @@
* execute the query.
*/
public class QueryLambdaClient extends QueryCommandLineClient {
- private final AmazonSQS sqsClient;
+ private final SqsClient sqsClient;
private final DynamoDBQueryTracker queryTracker;
private Map resultsPublisherConfig;
private final String queryQueueUrl;
private final QuerySerDe querySerDe;
- public QueryLambdaClient(AmazonS3 s3Client, AmazonDynamoDB dynamoDBClient, AmazonSQS sqsClient, InstanceProperties instanceProperties) {
+ public QueryLambdaClient(AmazonS3 s3Client, AmazonDynamoDB dynamoDBClient, SqsClient sqsClient, InstanceProperties instanceProperties) {
super(s3Client, dynamoDBClient, instanceProperties);
this.sqsClient = sqsClient;
this.queryTracker = new DynamoDBQueryTracker(instanceProperties, dynamoDBClient);
@@ -129,8 +128,9 @@ protected void runQueries(TableProperties tableProperties) throws InterruptedExc
}
public void submitQuery(Query query) {
- sqsClient.sendMessage(queryQueueUrl, querySerDe.toJson(
- query.withResultsPublisherConfig(resultsPublisherConfig)));
+ sqsClient.sendMessage(request -> request.queueUrl(queryQueueUrl)
+ .messageBody(querySerDe.toJson(
+ query.withResultsPublisherConfig(resultsPublisherConfig))));
}
public static void main(String[] args) throws StateStoreException, InterruptedException {
@@ -139,16 +139,14 @@ public static void main(String[] args) throws StateStoreException, InterruptedEx
}
AmazonS3 s3Client = AmazonS3ClientBuilder.defaultClient();
- AmazonSQS sqsClient = AmazonSQSClientBuilder.defaultClient();
AmazonDynamoDB dynamoDBClient = AmazonDynamoDBClientBuilder.defaultClient();
- try {
+ try (SqsClient sqsClient = SqsClient.create()) {
InstanceProperties instanceProperties = S3InstanceProperties.loadGivenInstanceId(s3Client, args[0]);
QueryLambdaClient queryLambdaClient = new QueryLambdaClient(s3Client, dynamoDBClient, sqsClient, instanceProperties);
queryLambdaClient.run();
} finally {
s3Client.shutdown();
- sqsClient.shutdown();
dynamoDBClient.shutdown();
}
}
diff --git a/java/clients/src/main/java/sleeper/clients/deploy/RestartTasks.java b/java/clients/src/main/java/sleeper/clients/deploy/RestartTasks.java
index 292fa8e1bc..156b4ad8d1 100644
--- a/java/clients/src/main/java/sleeper/clients/deploy/RestartTasks.java
+++ b/java/clients/src/main/java/sleeper/clients/deploy/RestartTasks.java
@@ -16,11 +16,9 @@
package sleeper.clients.deploy;
-import com.amazonaws.services.ecs.AmazonECS;
-import com.amazonaws.services.ecs.model.ListTasksRequest;
-import com.amazonaws.services.ecs.model.StopTaskRequest;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.ecs.EcsClient;
import software.amazon.awssdk.services.lambda.LambdaClient;
import sleeper.core.properties.instance.InstanceProperties;
@@ -34,7 +32,7 @@
public class RestartTasks {
private static final Logger LOGGER = LoggerFactory.getLogger(RestartTasks.class);
- private final AmazonECS ecs;
+ private final EcsClient ecs;
private final LambdaClient lambda;
private final InstanceProperties properties;
private final boolean skip;
@@ -70,12 +68,12 @@ private void restartTasks(InstanceProperty clusterProperty, InstanceProperty lam
}
private void stopTasksInCluster(String cluster) {
- ecs.listTasks(new ListTasksRequest().withCluster(cluster)).getTaskArns()
- .forEach(task -> ecs.stopTask(new StopTaskRequest().withTask(task).withCluster(cluster)));
+ ecs.listTasks(builder -> builder.cluster(cluster)).taskArns()
+ .forEach(task -> ecs.stopTask(builder -> builder.cluster(cluster).task(task)));
}
public static final class Builder {
- private AmazonECS ecs;
+ private EcsClient ecs;
private LambdaClient lambda;
private InstanceProperties properties;
private boolean skip;
@@ -83,7 +81,7 @@ public static final class Builder {
private Builder() {
}
- public Builder ecs(AmazonECS ecs) {
+ public Builder ecs(EcsClient ecs) {
this.ecs = ecs;
return this;
}
diff --git a/java/clients/src/main/java/sleeper/clients/docker/DeployDockerInstance.java b/java/clients/src/main/java/sleeper/clients/docker/DeployDockerInstance.java
index ab1f507561..f144eba83a 100644
--- a/java/clients/src/main/java/sleeper/clients/docker/DeployDockerInstance.java
+++ b/java/clients/src/main/java/sleeper/clients/docker/DeployDockerInstance.java
@@ -20,10 +20,9 @@
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.sqs.AmazonSQS;
-import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
import org.apache.hadoop.conf.Configuration;
import org.eclipse.jetty.io.RuntimeIOException;
+import software.amazon.awssdk.services.sqs.SqsClient;
import sleeper.clients.deploy.PopulateInstanceProperties;
import sleeper.clients.docker.stack.CompactionDockerStack;
@@ -45,10 +44,9 @@
import java.util.Objects;
import java.util.function.Consumer;
+import static sleeper.clients.util.AwsV2ClientHelper.buildAwsV2Client;
import static sleeper.configuration.utils.AwsV1ClientHelper.buildAwsV1Client;
-import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.COMPACTION_JOB_QUEUE_URL;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.CONFIG_BUCKET;
-import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.INGEST_JOB_QUEUE_URL;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.QUERY_RESULTS_BUCKET;
import static sleeper.core.properties.instance.CommonProperty.ACCOUNT;
import static sleeper.core.properties.instance.CommonProperty.ID;
@@ -63,7 +61,7 @@
public class DeployDockerInstance {
private final AmazonS3 s3Client;
private final AmazonDynamoDB dynamoDB;
- private final AmazonSQS sqsClient;
+ private final SqsClient sqsClient;
private final Configuration configuration;
private final Consumer extraTableProperties;
@@ -89,15 +87,13 @@ public static void main(String[] args) throws Exception {
String instanceId = args[0];
AmazonS3 s3Client = buildAwsV1Client(AmazonS3ClientBuilder.standard());
AmazonDynamoDB dynamoDB = buildAwsV1Client(AmazonDynamoDBClientBuilder.standard());
- AmazonSQS sqsClient = buildAwsV1Client(AmazonSQSClientBuilder.standard());
- try {
+ try (SqsClient sqsClient = buildAwsV2Client(SqsClient.builder())) {
DeployDockerInstance.builder().s3Client(s3Client).dynamoDB(dynamoDB).sqsClient(sqsClient)
.configuration(getConfigurationForClient()).build()
.deploy(instanceId);
} finally {
s3Client.shutdown();
dynamoDB.shutdown();
- sqsClient.shutdown();
}
}
@@ -114,6 +110,8 @@ public void deploy(InstanceProperties instanceProperties, List
ConfigurationDockerStack.from(instanceProperties, s3Client).deploy();
TableDockerStack.from(instanceProperties, s3Client, dynamoDB).deploy();
+ IngestDockerStack.from(instanceProperties, dynamoDB, sqsClient).deploy();
+ CompactionDockerStack.from(instanceProperties, dynamoDB, sqsClient).deploy();
S3InstanceProperties.saveToS3(s3Client, instanceProperties);
@@ -124,9 +122,6 @@ public void deploy(InstanceProperties instanceProperties, List
throw new RuntimeIOException(e);
}
}
-
- IngestDockerStack.from(instanceProperties, dynamoDB, sqsClient).deploy();
- CompactionDockerStack.from(instanceProperties, dynamoDB, sqsClient).deploy();
}
private static void setForcedInstanceProperties(InstanceProperties instanceProperties) {
@@ -137,8 +132,6 @@ private static void setForcedInstanceProperties(InstanceProperties instancePrope
instanceProperties.set(VPC_ID, "test-vpc");
instanceProperties.set(SUBNETS, "test-subnet");
instanceProperties.set(REGION, "us-east-1");
- instanceProperties.set(INGEST_JOB_QUEUE_URL, "sleeper-" + instanceId + "-IngestJobQ");
- instanceProperties.set(COMPACTION_JOB_QUEUE_URL, "sleeper-" + instanceId + "-CompactionJobQ");
instanceProperties.set(QUERY_RESULTS_BUCKET, "sleeper-" + instanceId + "-query-results");
instanceProperties.set(DEFAULT_ASYNC_COMMIT_BEHAVIOUR, DefaultAsyncCommitBehaviour.DISABLED.toString());
}
@@ -153,7 +146,7 @@ private static TableProperties generateTableProperties(InstanceProperties instan
public static final class Builder {
private AmazonS3 s3Client;
private AmazonDynamoDB dynamoDB;
- private AmazonSQS sqsClient;
+ private SqsClient sqsClient;
private Configuration configuration;
private Consumer extraTableProperties = tableProperties -> {
};
@@ -171,7 +164,7 @@ public Builder dynamoDB(AmazonDynamoDB dynamoDB) {
return this;
}
- public Builder sqsClient(AmazonSQS sqsClient) {
+ public Builder sqsClient(SqsClient sqsClient) {
this.sqsClient = sqsClient;
return this;
}
diff --git a/java/clients/src/main/java/sleeper/clients/docker/SendFilesToIngest.java b/java/clients/src/main/java/sleeper/clients/docker/SendFilesToIngest.java
index 4a15d2c9a7..ef1fe85198 100644
--- a/java/clients/src/main/java/sleeper/clients/docker/SendFilesToIngest.java
+++ b/java/clients/src/main/java/sleeper/clients/docker/SendFilesToIngest.java
@@ -18,9 +18,8 @@
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.sqs.AmazonSQS;
-import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import software.amazon.awssdk.services.sqs.SqsClient;
import sleeper.configuration.properties.S3InstanceProperties;
import sleeper.core.properties.instance.InstanceProperties;
@@ -33,6 +32,7 @@
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import static sleeper.clients.util.AwsV2ClientHelper.buildAwsV2Client;
import static sleeper.configuration.utils.AwsV1ClientHelper.buildAwsV1Client;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.DATA_BUCKET;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.INGEST_JOB_QUEUE_URL;
@@ -53,18 +53,16 @@ public static void main(String[] args) {
.filter(Files::isRegularFile)
.collect(Collectors.toList());
AmazonS3 s3Client = buildAwsV1Client(AmazonS3ClientBuilder.standard());
- AmazonSQS sqsClient = buildAwsV1Client(AmazonSQSClientBuilder.standard());
- try {
+ try (SqsClient sqsClient = buildAwsV2Client(SqsClient.builder())) {
InstanceProperties properties = S3InstanceProperties.loadGivenInstanceId(s3Client, instanceId);
uploadFilesAndSendJob(properties, tableName, filePaths, s3Client, sqsClient);
} finally {
s3Client.shutdown();
- sqsClient.shutdown();
}
}
public static void uploadFilesAndSendJob(
- InstanceProperties properties, String tableName, List filePaths, AmazonS3 s3Client, AmazonSQS sqsClient) {
+ InstanceProperties properties, String tableName, List filePaths, AmazonS3 s3Client, SqsClient sqsClient) {
uploadFiles(properties, filePaths, s3Client);
sendJobForFiles(properties, tableName, filePaths, sqsClient);
}
@@ -74,13 +72,15 @@ public static void uploadFiles(InstanceProperties properties, List filePat
"ingest/" + filePath.getFileName().toString(), filePath.toFile()));
}
- public static void sendJobForFiles(InstanceProperties properties, String tableName, List filePaths, AmazonSQS sqsClient) {
+ public static void sendJobForFiles(InstanceProperties properties, String tableName, List filePaths, SqsClient sqsClient) {
IngestJob job = IngestJob.builder()
.files(filePaths.stream()
.map(filePath -> properties.get(DATA_BUCKET) + "/ingest/" + filePath.getFileName().toString())
.collect(Collectors.toList()))
.tableName(tableName)
.build();
- sqsClient.sendMessage(properties.get(INGEST_JOB_QUEUE_URL), new IngestJobSerDe().toJson(job));
+ sqsClient.sendMessage(request -> request
+ .queueUrl(properties.get(INGEST_JOB_QUEUE_URL))
+ .messageBody(new IngestJobSerDe().toJson(job)));
}
}
diff --git a/java/clients/src/main/java/sleeper/clients/docker/TearDownDockerInstance.java b/java/clients/src/main/java/sleeper/clients/docker/TearDownDockerInstance.java
index 6b7f09ec16..794843e4bf 100644
--- a/java/clients/src/main/java/sleeper/clients/docker/TearDownDockerInstance.java
+++ b/java/clients/src/main/java/sleeper/clients/docker/TearDownDockerInstance.java
@@ -20,8 +20,7 @@
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.sqs.AmazonSQS;
-import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
+import software.amazon.awssdk.services.sqs.SqsClient;
import sleeper.clients.docker.stack.CompactionDockerStack;
import sleeper.clients.docker.stack.ConfigurationDockerStack;
@@ -30,6 +29,7 @@
import sleeper.configuration.properties.S3InstanceProperties;
import sleeper.core.properties.instance.InstanceProperties;
+import static sleeper.clients.util.AwsV2ClientHelper.buildAwsV2Client;
import static sleeper.configuration.utils.AwsV1ClientHelper.buildAwsV1Client;
public class TearDownDockerInstance {
@@ -46,18 +46,16 @@ public static void main(String[] args) {
String instanceId = args[0];
AmazonS3 s3Client = buildAwsV1Client(AmazonS3ClientBuilder.standard());
AmazonDynamoDB dynamoDBClient = buildAwsV1Client(AmazonDynamoDBClientBuilder.standard());
- AmazonSQS sqsClient = buildAwsV1Client(AmazonSQSClientBuilder.standard());
- try {
+ try (SqsClient sqsClient = buildAwsV2Client(SqsClient.builder())) {
tearDown(instanceId, s3Client, dynamoDBClient, sqsClient);
} finally {
s3Client.shutdown();
dynamoDBClient.shutdown();
- sqsClient.shutdown();
}
}
- public static void tearDown(String instanceId, AmazonS3 s3Client, AmazonDynamoDB dynamoDB, AmazonSQS sqsClient) {
+ public static void tearDown(String instanceId, AmazonS3 s3Client, AmazonDynamoDB dynamoDB, SqsClient sqsClient) {
InstanceProperties instanceProperties = S3InstanceProperties.loadGivenInstanceId(s3Client, instanceId);
ConfigurationDockerStack.from(instanceProperties, s3Client).tearDown();
diff --git a/java/clients/src/main/java/sleeper/clients/docker/stack/CompactionDockerStack.java b/java/clients/src/main/java/sleeper/clients/docker/stack/CompactionDockerStack.java
index 42b3782e11..025ccdd3b5 100644
--- a/java/clients/src/main/java/sleeper/clients/docker/stack/CompactionDockerStack.java
+++ b/java/clients/src/main/java/sleeper/clients/docker/stack/CompactionDockerStack.java
@@ -17,17 +17,18 @@
package sleeper.clients.docker.stack;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
-import com.amazonaws.services.sqs.AmazonSQS;
+import software.amazon.awssdk.services.sqs.SqsClient;
import sleeper.compaction.status.store.job.DynamoDBCompactionJobStatusStoreCreator;
import sleeper.compaction.status.store.task.DynamoDBCompactionTaskStatusStoreCreator;
import sleeper.core.properties.instance.InstanceProperties;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.COMPACTION_JOB_QUEUE_URL;
+import static sleeper.core.properties.instance.CommonProperty.ID;
public class CompactionDockerStack implements DockerStack {
private final InstanceProperties instanceProperties;
- private final AmazonSQS sqsClient;
+ private final SqsClient sqsClient;
private final AmazonDynamoDB dynamoDB;
private CompactionDockerStack(Builder builder) {
@@ -36,7 +37,7 @@ private CompactionDockerStack(Builder builder) {
dynamoDB = builder.dynamoDB;
}
- public static CompactionDockerStack from(InstanceProperties instanceProperties, AmazonDynamoDB dynamoDB, AmazonSQS sqsClient) {
+ public static CompactionDockerStack from(InstanceProperties instanceProperties, AmazonDynamoDB dynamoDB, SqsClient sqsClient) {
return builder().instanceProperties(instanceProperties).dynamoDB(dynamoDB).sqsClient(sqsClient)
.build();
}
@@ -44,14 +45,16 @@ public static CompactionDockerStack from(InstanceProperties instanceProperties,
public void deploy() {
DynamoDBCompactionJobStatusStoreCreator.create(instanceProperties, dynamoDB);
DynamoDBCompactionTaskStatusStoreCreator.create(instanceProperties, dynamoDB);
- sqsClient.createQueue(instanceProperties.get(COMPACTION_JOB_QUEUE_URL));
+ String queueName = "sleeper-" + instanceProperties.get(ID) + "-CompactionJobQ";
+ String queueUrl = sqsClient.createQueue(request -> request.queueName(queueName)).queueUrl();
+ instanceProperties.set(COMPACTION_JOB_QUEUE_URL, queueUrl);
}
@Override
public void tearDown() {
DynamoDBCompactionJobStatusStoreCreator.tearDown(instanceProperties, dynamoDB);
DynamoDBCompactionTaskStatusStoreCreator.tearDown(instanceProperties, dynamoDB);
- sqsClient.deleteQueue(instanceProperties.get(COMPACTION_JOB_QUEUE_URL));
+ sqsClient.deleteQueue(request -> request.queueUrl(instanceProperties.get(COMPACTION_JOB_QUEUE_URL)));
}
public static Builder builder() {
@@ -60,7 +63,7 @@ public static Builder builder() {
public static final class Builder {
private InstanceProperties instanceProperties;
- private AmazonSQS sqsClient;
+ private SqsClient sqsClient;
private AmazonDynamoDB dynamoDB;
private Builder() {
@@ -71,7 +74,7 @@ public Builder instanceProperties(InstanceProperties instanceProperties) {
return this;
}
- public Builder sqsClient(AmazonSQS sqsClient) {
+ public Builder sqsClient(SqsClient sqsClient) {
this.sqsClient = sqsClient;
return this;
}
diff --git a/java/clients/src/main/java/sleeper/clients/docker/stack/IngestDockerStack.java b/java/clients/src/main/java/sleeper/clients/docker/stack/IngestDockerStack.java
index 0f4bbef3da..6b0fad5027 100644
--- a/java/clients/src/main/java/sleeper/clients/docker/stack/IngestDockerStack.java
+++ b/java/clients/src/main/java/sleeper/clients/docker/stack/IngestDockerStack.java
@@ -17,17 +17,18 @@
package sleeper.clients.docker.stack;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
-import com.amazonaws.services.sqs.AmazonSQS;
+import software.amazon.awssdk.services.sqs.SqsClient;
import sleeper.core.properties.instance.InstanceProperties;
import sleeper.ingest.status.store.job.DynamoDBIngestJobStatusStoreCreator;
import sleeper.ingest.status.store.task.DynamoDBIngestTaskStatusStoreCreator;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.INGEST_JOB_QUEUE_URL;
+import static sleeper.core.properties.instance.CommonProperty.ID;
public class IngestDockerStack implements DockerStack {
private final InstanceProperties instanceProperties;
- private final AmazonSQS sqsClient;
+ private final SqsClient sqsClient;
private final AmazonDynamoDB dynamoDB;
private IngestDockerStack(Builder builder) {
@@ -42,7 +43,7 @@ public static Builder builder() {
public static IngestDockerStack from(
InstanceProperties instanceProperties,
- AmazonDynamoDB dynamoDB, AmazonSQS sqsClient) {
+ AmazonDynamoDB dynamoDB, SqsClient sqsClient) {
return builder().instanceProperties(instanceProperties)
.dynamoDB(dynamoDB).sqsClient(sqsClient)
.build();
@@ -51,18 +52,20 @@ public static IngestDockerStack from(
public void deploy() {
DynamoDBIngestJobStatusStoreCreator.create(instanceProperties, dynamoDB);
DynamoDBIngestTaskStatusStoreCreator.create(instanceProperties, dynamoDB);
- sqsClient.createQueue(instanceProperties.get(INGEST_JOB_QUEUE_URL));
+ String queueName = "sleeper-" + instanceProperties.get(ID) + "-IngestJobQ";
+ String queueUrl = sqsClient.createQueue(request -> request.queueName(queueName)).queueUrl();
+ instanceProperties.set(INGEST_JOB_QUEUE_URL, queueUrl);
}
public void tearDown() {
DynamoDBIngestJobStatusStoreCreator.tearDown(instanceProperties, dynamoDB);
DynamoDBIngestTaskStatusStoreCreator.tearDown(instanceProperties, dynamoDB);
- sqsClient.deleteQueue(instanceProperties.get(INGEST_JOB_QUEUE_URL));
+ sqsClient.deleteQueue(request -> request.queueUrl(instanceProperties.get(INGEST_JOB_QUEUE_URL)));
}
public static final class Builder {
private InstanceProperties instanceProperties;
- private AmazonSQS sqsClient;
+ private SqsClient sqsClient;
private AmazonDynamoDB dynamoDB;
public Builder() {
@@ -73,7 +76,7 @@ public Builder instanceProperties(InstanceProperties instanceProperties) {
return this;
}
- public Builder sqsClient(AmazonSQS sqsClient) {
+ public Builder sqsClient(SqsClient sqsClient) {
this.sqsClient = sqsClient;
return this;
}
diff --git a/java/clients/src/main/java/sleeper/clients/docker/stack/TableDockerStack.java b/java/clients/src/main/java/sleeper/clients/docker/stack/TableDockerStack.java
index d112c71e15..8d5bc5ef77 100644
--- a/java/clients/src/main/java/sleeper/clients/docker/stack/TableDockerStack.java
+++ b/java/clients/src/main/java/sleeper/clients/docker/stack/TableDockerStack.java
@@ -28,7 +28,7 @@
import java.util.Locale;
import static sleeper.clients.docker.Utils.tearDownBucket;
-import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.ACTIVE_FILES_TABLELENAME;
+import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.ACTIVE_FILES_TABLENAME;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.DATA_BUCKET;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.FILE_REFERENCE_COUNT_TABLENAME;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.PARTITION_TABLENAME;
@@ -74,7 +74,7 @@ public void deploy() {
instanceProperties.set(TABLE_ONLINE_INDEX_DYNAMO_TABLENAME, String.join("-", "sleeper", instanceId, "table-index-online-by-name"));
instanceProperties.set(TABLE_ID_INDEX_DYNAMO_TABLENAME, String.join("-", "sleeper", instanceId, "table-index-by-id"));
DynamoDBTableIndexCreator.create(dynamoDB, instanceProperties);
- instanceProperties.set(ACTIVE_FILES_TABLELENAME, String.join("-", "sleeper", instanceId, "active-files"));
+ instanceProperties.set(ACTIVE_FILES_TABLENAME, String.join("-", "sleeper", instanceId, "active-files"));
instanceProperties.set(FILE_REFERENCE_COUNT_TABLENAME, String.join("-", "sleeper", instanceId, "file-refs"));
instanceProperties.set(PARTITION_TABLENAME, String.join("-", "sleeper", instanceId, "partitions"));
new DynamoDBStateStoreCreator(instanceProperties, dynamoDB).create();
@@ -88,7 +88,7 @@ public void deploy() {
}
public void tearDown() {
- dynamoDB.deleteTable(instanceProperties.get(ACTIVE_FILES_TABLELENAME));
+ dynamoDB.deleteTable(instanceProperties.get(ACTIVE_FILES_TABLENAME));
dynamoDB.deleteTable(instanceProperties.get(PARTITION_TABLENAME));
dynamoDB.deleteTable(instanceProperties.get(REVISION_TABLENAME));
dynamoDB.deleteTable(instanceProperties.get(TRANSACTION_LOG_FILES_TABLENAME));
diff --git a/java/clients/src/main/java/sleeper/clients/status/report/DeadLettersStatusReport.java b/java/clients/src/main/java/sleeper/clients/status/report/DeadLettersStatusReport.java
index aa43c12b9e..0939d5fee0 100644
--- a/java/clients/src/main/java/sleeper/clients/status/report/DeadLettersStatusReport.java
+++ b/java/clients/src/main/java/sleeper/clients/status/report/DeadLettersStatusReport.java
@@ -21,9 +21,9 @@
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.sqs.AmazonSQS;
import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
-import com.amazonaws.services.sqs.model.Message;
-import com.amazonaws.services.sqs.model.ReceiveMessageRequest;
-import com.amazonaws.services.sqs.model.ReceiveMessageResult;
+import software.amazon.awssdk.services.sqs.SqsClient;
+import software.amazon.awssdk.services.sqs.model.Message;
+import software.amazon.awssdk.services.sqs.model.ReceiveMessageResponse;
import sleeper.compaction.job.CompactionJobSerDe;
import sleeper.configuration.properties.S3InstanceProperties;
@@ -37,6 +37,7 @@
import java.io.IOException;
import java.util.function.Function;
+import static sleeper.clients.util.AwsV2ClientHelper.buildAwsV2Client;
import static sleeper.configuration.utils.AwsV1ClientHelper.buildAwsV1Client;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.COMPACTION_JOB_DLQ_URL;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.INGEST_JOB_DLQ_URL;
@@ -49,13 +50,16 @@
*/
public class DeadLettersStatusReport {
private final InstanceProperties instanceProperties;
- private final AmazonSQS sqsClient;
+ private final SqsClient sqsClient;
+ private final QueueMessageCount.Client messageCount;
private final TablePropertiesProvider tablePropertiesProvider;
- public DeadLettersStatusReport(AmazonSQS sqsClient,
+ public DeadLettersStatusReport(SqsClient sqsClient,
+ QueueMessageCount.Client messageCount,
InstanceProperties instanceProperties,
TablePropertiesProvider tablePropertiesProvider) {
this.sqsClient = sqsClient;
+ this.messageCount = messageCount;
this.instanceProperties = instanceProperties;
this.tablePropertiesProvider = tablePropertiesProvider;
}
@@ -80,17 +84,14 @@ private void printStats(String queueUrl, String description, Function 0) {
- ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest()
- .withQueueUrl(queueUrl)
- .withMaxNumberOfMessages(10)
- .withVisibilityTimeout(1);
- ReceiveMessageResult result = sqsClient.receiveMessage(receiveMessageRequest);
- for (Message message : result.getMessages()) {
- System.out.println(decoder.apply(message.getBody()));
+ ReceiveMessageResponse response = sqsClient.receiveMessage(request -> request
+ .queueUrl(queueUrl).maxNumberOfMessages(10).visibilityTimeout(1));
+ for (Message message : response.messages()) {
+ System.out.println(decoder.apply(message.body()));
}
}
}
@@ -101,17 +102,18 @@ public static void main(String[] args) {
}
AmazonS3 s3Client = buildAwsV1Client(AmazonS3ClientBuilder.standard());
AmazonDynamoDB dynamoDBClient = buildAwsV1Client(AmazonDynamoDBClientBuilder.standard());
- AmazonSQS sqsClient = buildAwsV1Client(AmazonSQSClientBuilder.standard());
+ AmazonSQS sqsClientV1 = buildAwsV1Client(AmazonSQSClientBuilder.standard());
- try {
+ try (SqsClient sqsClient = buildAwsV2Client(SqsClient.builder())) {
InstanceProperties instanceProperties = S3InstanceProperties.loadGivenInstanceId(s3Client, args[0]);
TablePropertiesProvider tablePropertiesProvider = S3TableProperties.createProvider(instanceProperties, s3Client, dynamoDBClient);
- DeadLettersStatusReport statusReport = new DeadLettersStatusReport(sqsClient, instanceProperties, tablePropertiesProvider);
+ DeadLettersStatusReport statusReport = new DeadLettersStatusReport(
+ sqsClient, QueueMessageCount.withSqsClient(sqsClientV1), instanceProperties, tablePropertiesProvider);
statusReport.run();
} finally {
s3Client.shutdown();
dynamoDBClient.shutdown();
- sqsClient.shutdown();
+ sqsClientV1.shutdown();
}
}
}
diff --git a/java/clients/src/main/java/sleeper/clients/status/report/RetryMessages.java b/java/clients/src/main/java/sleeper/clients/status/report/RetryMessages.java
index d8666d887a..8b1398c443 100644
--- a/java/clients/src/main/java/sleeper/clients/status/report/RetryMessages.java
+++ b/java/clients/src/main/java/sleeper/clients/status/report/RetryMessages.java
@@ -17,14 +17,11 @@
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.sqs.AmazonSQS;
-import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
-import com.amazonaws.services.sqs.model.Message;
-import com.amazonaws.services.sqs.model.ReceiveMessageRequest;
-import com.amazonaws.services.sqs.model.ReceiveMessageResult;
-import com.amazonaws.services.sqs.model.SendMessageRequest;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
+import software.amazon.awssdk.services.sqs.SqsClient;
+import software.amazon.awssdk.services.sqs.model.Message;
+import software.amazon.awssdk.services.sqs.model.ReceiveMessageResponse;
import sleeper.configuration.properties.S3InstanceProperties;
import sleeper.core.properties.instance.InstanceProperties;
@@ -32,6 +29,7 @@
import java.util.HashSet;
import java.util.Set;
+import static sleeper.clients.util.AwsV2ClientHelper.buildAwsV2Client;
import static sleeper.configuration.utils.AwsV1ClientHelper.buildAwsV1Client;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.COMPACTION_JOB_DLQ_URL;
import static sleeper.core.properties.instance.CdkDefinedInstanceProperty.COMPACTION_JOB_QUEUE_URL;
@@ -46,11 +44,11 @@
*/
public class RetryMessages {
private final InstanceProperties instanceProperties;
- private final AmazonSQS sqsClient;
+ private final SqsClient sqsClient;
private final String stack;
private final int maxMessages;
- public RetryMessages(InstanceProperties instanceProperties, AmazonSQS sqsClient, String stack, int maxMessages) {
+ public RetryMessages(InstanceProperties instanceProperties, SqsClient sqsClient, String stack, int maxMessages) {
this.instanceProperties = instanceProperties;
this.sqsClient = sqsClient;
this.stack = stack;
@@ -64,21 +62,16 @@ public void run() {
int count = 0;
while (count < maxMessages) {
- ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest(deadLetterUrl)
- .withMaxNumberOfMessages(Math.min(maxMessages, 10))
- .withWaitTimeSeconds(1); // Must be >= 0 and <= 20
- ReceiveMessageResult receiveMessageResult = sqsClient.receiveMessage(receiveMessageRequest);
- if (receiveMessageResult.getMessages().isEmpty()) {
+ ReceiveMessageResponse response = sqsClient.receiveMessage(request -> request
+ .queueUrl(deadLetterUrl).maxNumberOfMessages(Math.min(maxMessages, 10)).waitTimeSeconds(1));
+ if (response.messages().isEmpty()) {
System.out.println("Received no messages, terminating");
break;
}
- System.out.println("Received " + receiveMessageResult.getMessages().size() + " messages");
- for (Message message : receiveMessageResult.getMessages()) {
- System.out.println("Received message with id " + message.getMessageId());
- SendMessageRequest sendMessageRequest = new SendMessageRequest()
- .withQueueUrl(originalQueueUrl)
- .withMessageBody(message.getBody());
- sqsClient.sendMessage(sendMessageRequest);
+ System.out.println("Received " + response.messages().size() + " messages");
+ for (Message message : response.messages()) {
+ System.out.println("Received message with id " + message.messageId());
+ sqsClient.sendMessage(request -> request.queueUrl(originalQueueUrl).messageBody(message.body()));
System.out.println("Sent message back to original queue");
count++;
}
@@ -121,12 +114,9 @@ public static void main(String[] args) {
s3Client.shutdown();
}
- AmazonSQS sqsClient = buildAwsV1Client(AmazonSQSClientBuilder.standard());
- try {
+ try (SqsClient sqsClient = buildAwsV2Client(SqsClient.builder())) {
RetryMessages retryMessages = new RetryMessages(instanceProperties, sqsClient, stack, maxMessages);
retryMessages.run();
- } finally {
- sqsClient.shutdown();
}
}
}
diff --git a/java/clients/src/main/java/sleeper/clients/status/report/StatusReport.java b/java/clients/src/main/java/sleeper/clients/status/report/StatusReport.java
index 5e7ffb6f38..2d24aa77d2 100644
--- a/java/clients/src/main/java/sleeper/clients/status/report/StatusReport.java
+++ b/java/clients/src/main/java/sleeper/clients/status/report/StatusReport.java
@@ -22,6 +22,7 @@
import com.amazonaws.services.sqs.AmazonSQS;
import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
import org.apache.hadoop.conf.Configuration;
+import software.amazon.awssdk.services.sqs.SqsClient;
import sleeper.clients.status.report.compaction.job.StandardCompactionJobStatusReporter;
import sleeper.clients.status.report.compaction.task.CompactionTaskQuery;
@@ -40,7 +41,9 @@
import sleeper.core.statestore.StateStore;
import sleeper.core.statestore.StateStoreException;
import sleeper.statestore.StateStoreFactory;
+import sleeper.task.common.QueueMessageCount;
+import static sleeper.clients.util.AwsV2ClientHelper.buildAwsV2Client;
import static sleeper.clients.util.ClientUtils.optionalArgument;
import static sleeper.configuration.utils.AwsV1ClientHelper.buildAwsV1Client;
@@ -55,14 +58,15 @@ public class StatusReport {
private final StateStore stateStore;
private final CompactionJobStatusStore compactionStatusStore;
private final CompactionTaskStatusStore compactionTaskStatusStore;
- private final AmazonSQS sqsClient;
+ private final SqsClient sqsClient;
+ private final QueueMessageCount.Client messageCount;
private final TablePropertiesProvider tablePropertiesProvider;
public StatusReport(
InstanceProperties instanceProperties, TableProperties tableProperties,
boolean verbose, StateStore stateStore,
CompactionJobStatusStore compactionStatusStore, CompactionTaskStatusStore compactionTaskStatusStore,
- AmazonSQS sqsClient, TablePropertiesProvider tablePropertiesProvider) {
+ SqsClient sqsClient, QueueMessageCount.Client messageCount, TablePropertiesProvider tablePropertiesProvider) {
this.instanceProperties = instanceProperties;
this.tableProperties = tableProperties;
this.verbose = verbose;
@@ -70,6 +74,7 @@ public StatusReport(
this.compactionStatusStore = compactionStatusStore;
this.compactionTaskStatusStore = compactionTaskStatusStore;
this.sqsClient = sqsClient;
+ this.messageCount = messageCount;
this.tablePropertiesProvider = tablePropertiesProvider;
}
@@ -93,7 +98,7 @@ private void run() throws StateStoreException {
CompactionTaskQuery.UNFINISHED).run();
// Dead letters
- new DeadLettersStatusReport(sqsClient, instanceProperties, tablePropertiesProvider).run();
+ new DeadLettersStatusReport(sqsClient, messageCount, instanceProperties, tablePropertiesProvider).run();
}
public static void main(String[] args) throws StateStoreException {
@@ -108,8 +113,8 @@ public static void main(String[] args) throws StateStoreException {
AmazonS3 s3Client = buildAwsV1Client(AmazonS3ClientBuilder.standard());
AmazonDynamoDB dynamoDBClient = buildAwsV1Client(AmazonDynamoDBClientBuilder.standard());
- AmazonSQS sqsClient = buildAwsV1Client(AmazonSQSClientBuilder.standard());
- try {
+ AmazonSQS sqsClientV1 = buildAwsV1Client(AmazonSQSClientBuilder.standard());
+ try (SqsClient sqsClient = buildAwsV2Client(SqsClient.builder())) {
InstanceProperties instanceProperties = S3InstanceProperties.loadGivenInstanceId(s3Client, instanceId);
TablePropertiesProvider tablePropertiesProvider = S3TableProperties.createProvider(instanceProperties, s3Client, dynamoDBClient);
TableProperties tableProperties = tablePropertiesProvider.getByName(tableName);
@@ -121,12 +126,12 @@ public static void main(String[] args) throws StateStoreException {
StatusReport statusReport = new StatusReport(
instanceProperties, tableProperties, verbose,
stateStore, compactionStatusStore, compactionTaskStatusStore,
- sqsClient, tablePropertiesProvider);
+ sqsClient, QueueMessageCount.withSqsClient(sqsClientV1), tablePropertiesProvider);
statusReport.run();
} finally {
s3Client.shutdown();
dynamoDBClient.shutdown();
- sqsClient.shutdown();
+ sqsClientV1.shutdown();
}
}
}
diff --git a/java/clients/src/main/java/sleeper/clients/teardown/ShutdownSystemProcesses.java b/java/clients/src/main/java/sleeper/clients/teardown/ShutdownSystemProcesses.java
index 69a275f6f0..2c3086fc13 100644
--- a/java/clients/src/main/java/sleeper/clients/teardown/ShutdownSystemProcesses.java
+++ b/java/clients/src/main/java/sleeper/clients/teardown/ShutdownSystemProcesses.java
@@ -15,13 +15,10 @@
*/
package sleeper.clients.teardown;
-import com.amazonaws.services.ecs.AmazonECS;
-import com.amazonaws.services.ecs.model.ListTasksRequest;
-import com.amazonaws.services.ecs.model.ListTasksResult;
-import com.amazonaws.services.ecs.model.StopTaskRequest;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.cloudwatchevents.CloudWatchEventsClient;
+import software.amazon.awssdk.services.ecs.EcsClient;
import software.amazon.awssdk.services.emr.EmrClient;
import software.amazon.awssdk.services.emr.model.ListClustersResponse;
import software.amazon.awssdk.services.emrserverless.EmrServerlessClient;
@@ -32,6 +29,7 @@
import sleeper.core.properties.SleeperProperty;
import sleeper.core.properties.instance.InstanceProperties;
import sleeper.core.util.StaticRateLimit;
+import sleeper.core.util.ThreadSleep;
import java.util.List;
import java.util.function.Consumer;
@@ -46,24 +44,27 @@ public class ShutdownSystemProcesses {
private static final Logger LOGGER = LoggerFactory.getLogger(ShutdownSystemProcesses.class);
private final CloudWatchEventsClient cloudWatch;
- private final AmazonECS ecs;
+ private final EcsClient ecs;
private final EmrClient emrClient;
private final EmrServerlessClient emrServerlessClient;
private final StaticRateLimit listActiveClustersLimit;
+ private final ThreadSleep threadSleep;
public ShutdownSystemProcesses(TearDownClients clients) {
- this(clients.getCloudWatch(), clients.getEcs(), clients.getEmr(), clients.getEmrServerless(), EmrUtils.LIST_ACTIVE_CLUSTERS_LIMIT);
+ this(clients.getCloudWatch(), clients.getEcs(), clients.getEmr(), clients.getEmrServerless(), EmrUtils.LIST_ACTIVE_CLUSTERS_LIMIT, Thread::sleep);
}
public ShutdownSystemProcesses(
- CloudWatchEventsClient cloudWatch, AmazonECS ecs,
+ CloudWatchEventsClient cloudWatch, EcsClient ecs,
EmrClient emrClient, EmrServerlessClient emrServerlessClient,
- StaticRateLimit listActiveClustersLimit) {
+ StaticRateLimit listActiveClustersLimit,
+ ThreadSleep threadSleep) {
this.cloudWatch = cloudWatch;
this.ecs = ecs;
this.emrClient = emrClient;
this.emrServerlessClient = emrServerlessClient;
this.listActiveClustersLimit = listActiveClustersLimit;
+ this.threadSleep = threadSleep;
}
public void shutdown(InstanceProperties instanceProperties, List extraECSClusters) throws InterruptedException {
@@ -82,40 +83,36 @@ private void stopECSTasks(InstanceProperties instanceProperties, List ex
}
private void stopEMRClusters(InstanceProperties properties) throws InterruptedException {
- new TerminateEMRClusters(emrClient, properties.get(ID), listActiveClustersLimit).run();
+ new TerminateEMRClusters(emrClient, properties.get(ID), listActiveClustersLimit, threadSleep).run();
}
private void stopEMRServerlessApplication(InstanceProperties properties) throws InterruptedException {
new TerminateEMRServerlessApplications(emrServerlessClient, properties).run();
}
- public static void stopTasks(AmazonECS ecs, SleeperProperties properties, T property) {
+ public static void stopTasks(EcsClient ecs, SleeperProperties properties, T property) {
if (!properties.isSet(property)) {
return;
}
stopTasks(ecs, properties.get(property));
}
- private static void stopTasks(AmazonECS ecs, String clusterName) {
+ private static void stopTasks(EcsClient ecs, String clusterName) {
LOGGER.info("Stopping tasks for ECS cluster {}", clusterName);
forEachTaskArn(ecs, clusterName, taskArn -> {
// Rate limit for ECS StopTask is 100 burst, 40 sustained:
// https://docs.aws.amazon.com/AmazonECS/latest/APIReference/request-throttling.html
sleepForSustainedRatePerSecond(30);
- ecs.stopTask(new StopTaskRequest().withCluster(clusterName).withTask(taskArn)
- .withReason("Cleaning up before cdk destroy"));
+ ecs.stopTask(builder -> builder.cluster(clusterName).task(taskArn)
+ .reason("Cleaning up before cdk destroy"));
});
}
- private static void forEachTaskArn(AmazonECS ecs, String clusterName, Consumer consumer) {
- String nextToken = null;
- do {
- ListTasksResult result = ecs.listTasks(
- new ListTasksRequest().withCluster(clusterName).withNextToken(nextToken));
-
- LOGGER.info("Found {} tasks", result.getTaskArns().size());
- result.getTaskArns().forEach(consumer);
- nextToken = result.getNextToken();
- } while (nextToken != null);
+ private static void forEachTaskArn(EcsClient ecs, String clusterName, Consumer consumer) {
+ ecs.listTasksPaginator(builder -> builder.cluster(clusterName))
+ .stream()
+ .peek(response -> LOGGER.info("Found {} tasks", response.taskArns().size()))
+ .flatMap(response -> response.taskArns().stream())
+ .forEach(consumer);
}
}
diff --git a/java/clients/src/main/java/sleeper/clients/teardown/TearDownClients.java b/java/clients/src/main/java/sleeper/clients/teardown/TearDownClients.java
index 66eecf20e8..be3bf0e440 100644
--- a/java/clients/src/main/java/sleeper/clients/teardown/TearDownClients.java
+++ b/java/clients/src/main/java/sleeper/clients/teardown/TearDownClients.java
@@ -16,13 +16,12 @@
package sleeper.clients.teardown;
-import com.amazonaws.services.ecs.AmazonECS;
-import com.amazonaws.services.ecs.AmazonECSClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import software.amazon.awssdk.services.cloudformation.CloudFormationClient;
import software.amazon.awssdk.services.cloudwatchevents.CloudWatchEventsClient;
import software.amazon.awssdk.services.ecr.EcrClient;
+import software.amazon.awssdk.services.ecs.EcsClient;
import software.amazon.awssdk.services.emr.EmrClient;
import software.amazon.awssdk.services.emrserverless.EmrServerlessClient;
import software.amazon.awssdk.services.s3.S3Client;
@@ -35,7 +34,7 @@ public class TearDownClients {
private final AmazonS3 s3;
private final S3Client s3v2;
private final CloudWatchEventsClient cloudWatch;
- private final AmazonECS ecs;
+ private final EcsClient ecs;
private final EcrClient ecr;
private final EmrClient emr;
private final EmrServerlessClient emrServerless;
@@ -54,10 +53,10 @@ private TearDownClients(Builder builder) {
public static void withDefaults(TearDownOperation operation) throws IOException, InterruptedException {
AmazonS3 s3Client = AmazonS3ClientBuilder.defaultClient();
- AmazonECS ecsClient = AmazonECSClientBuilder.defaultClient();
try (S3Client s3v2Client = S3Client.create();
CloudWatchEventsClient cloudWatchClient = CloudWatchEventsClient.create();
EcrClient ecrClient = EcrClient.create();
+ EcsClient ecsClient = EcsClient.create();
EmrClient emrClient = EmrClient.create();
EmrServerlessClient emrServerless = EmrServerlessClient.create();
CloudFormationClient cloudFormationClient = CloudFormationClient.create()) {
@@ -74,7 +73,6 @@ public static void withDefaults(TearDownOperation operation) throws IOException,
operation.tearDown(clients);
} finally {
s3Client.shutdown();
- ecsClient.shutdown();
}
}
@@ -94,7 +92,7 @@ public CloudWatchEventsClient getCloudWatch() {
return cloudWatch;
}
- public AmazonECS getEcs() {
+ public EcsClient getEcs() {
return ecs;
}
@@ -118,7 +116,7 @@ public static final class Builder {
private AmazonS3 s3;
private S3Client s3v2;
private CloudWatchEventsClient cloudWatch;
- private AmazonECS ecs;
+ private EcsClient ecs;
private EcrClient ecr;
private EmrClient emr;
private EmrServerlessClient emrServerless;
@@ -142,7 +140,7 @@ public Builder cloudWatch(CloudWatchEventsClient cloudWatch) {
return this;
}
- public Builder ecs(AmazonECS ecs) {
+ public Builder ecs(EcsClient ecs) {
this.ecs = ecs;
return this;
}
diff --git a/java/clients/src/main/java/sleeper/clients/teardown/TearDownInstance.java b/java/clients/src/main/java/sleeper/clients/teardown/TearDownInstance.java
index 3b35a7a6b3..560ac767b6 100644
--- a/java/clients/src/main/java/sleeper/clients/teardown/TearDownInstance.java
+++ b/java/clients/src/main/java/sleeper/clients/teardown/TearDownInstance.java
@@ -126,7 +126,8 @@ public static Builder builder() {
private static InstanceProperties loadInstancePropertiesOrGenerateDefaults(AmazonS3 s3, String instanceId, Path scriptsDir) {
if (instanceId == null) {
- InstanceProperties instanceProperties = LoadLocalProperties.loadInstancePropertiesFromDirectory(scriptsDir.resolve("generated"));
+ InstanceProperties instanceProperties = LoadLocalProperties
+ .loadInstancePropertiesNoValidationFromDirectory(scriptsDir.resolve("generated"));
instanceId = instanceProperties.get(ID);
}
return loadInstancePropertiesOrGenerateDefaults(s3, instanceId);
@@ -135,7 +136,7 @@ private static InstanceProperties loadInstancePropertiesOrGenerateDefaults(Amazo
public static InstanceProperties loadInstancePropertiesOrGenerateDefaults(AmazonS3 s3, String instanceId) {
LOGGER.info("Loading configuration for instance {}", instanceId);
try {
- return S3InstanceProperties.loadGivenInstanceId(s3, instanceId);
+ return S3InstanceProperties.loadGivenInstanceIdNoValidation(s3, instanceId);
} catch (AmazonS3Exception e) {
LOGGER.info("Failed to download configuration, using default properties");
return PopulateInstanceProperties.generateTearDownDefaultsFromInstanceId(instanceId);
diff --git a/java/clients/src/main/java/sleeper/clients/teardown/TerminateEMRClusters.java b/java/clients/src/main/java/sleeper/clients/teardown/TerminateEMRClusters.java
index 636269d052..811283bf8a 100644
--- a/java/clients/src/main/java/sleeper/clients/teardown/TerminateEMRClusters.java
+++ b/java/clients/src/main/java/sleeper/clients/teardown/TerminateEMRClusters.java
@@ -24,6 +24,7 @@
import sleeper.core.util.PollWithRetries;
import sleeper.core.util.StaticRateLimit;
+import sleeper.core.util.ThreadSleep;
import java.time.Duration;
import java.util.List;
@@ -42,11 +43,13 @@ public class TerminateEMRClusters {
private final EmrClient emrClient;
private final String clusterPrefix;
private final StaticRateLimit listActiveClustersLimit;
+ private final ThreadSleep threadSleep;
- public TerminateEMRClusters(EmrClient emrClient, String instanceId, StaticRateLimit listActiveClustersLimit) {
+ public TerminateEMRClusters(EmrClient emrClient, String instanceId, StaticRateLimit listActiveClustersLimit, ThreadSleep threadSleep) {
this.emrClient = emrClient;
this.clusterPrefix = "sleeper-" + instanceId + "-";
this.listActiveClustersLimit = listActiveClustersLimit;
+ this.threadSleep = threadSleep;
}
public void run() throws InterruptedException {
@@ -75,7 +78,7 @@ private void terminateClusters(List clusters) {
LOGGER.info("Terminated {} clusters out of {}", endIndex, clusters.size());
// Sustained limit of 0.5 calls per second
// See https://docs.aws.amazon.com/general/latest/gr/emr.html
- sleepForSustainedRatePerSecond(0.2);
+ sleepForSustainedRatePerSecond(0.2, threadSleep);
}
}
@@ -103,7 +106,7 @@ public static void main(String[] args) throws InterruptedException {
String instanceId = args[0];
try (EmrClient emrClient = EmrClient.create()) {
- TerminateEMRClusters terminateClusters = new TerminateEMRClusters(emrClient, instanceId, StaticRateLimit.none());
+ TerminateEMRClusters terminateClusters = new TerminateEMRClusters(emrClient, instanceId, StaticRateLimit.none(), Thread::sleep);
terminateClusters.run();
}
}
diff --git a/java/clients/src/main/java/sleeper/clients/util/EstimateSplitPoints.java b/java/clients/src/main/java/sleeper/clients/util/EstimateSplitPoints.java
index aa99c74f48..5599782d96 100644
--- a/java/clients/src/main/java/sleeper/clients/util/EstimateSplitPoints.java
+++ b/java/clients/src/main/java/sleeper/clients/util/EstimateSplitPoints.java
@@ -22,15 +22,17 @@
import sleeper.core.schema.Field;
import sleeper.core.schema.Schema;
import sleeper.core.schema.type.ByteArrayType;
+import sleeper.sketches.Sketches;
import java.util.Arrays;
import java.util.Collections;
-import java.util.Comparator;
import java.util.List;
import java.util.Objects;
import java.util.SortedSet;
import java.util.TreeSet;
-import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static java.util.stream.Collectors.toList;
public class EstimateSplitPoints {
private final Field rowKey1;
@@ -58,7 +60,7 @@ public List