Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add cluster_elb_service_role IAM policy to allow creation of ELB service-linked role #72

Merged
merged 2 commits into from
Aug 23, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,7 @@
**/.build-harness
**/build-harness

**/pkg

# Rendered yaml config
**/configmap-auth.yaml
2 changes: 1 addition & 1 deletion examples/complete/fixtures.us-east-2.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ stage = "test"

name = "eks"

kubernetes_version = "1.15"
kubernetes_version = "1.17"

oidc_provider_enabled = true

Expand Down
6 changes: 3 additions & 3 deletions examples/complete/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ locals {
}

module "vpc" {
source = "git::https:/cloudposse/terraform-aws-vpc.git?ref=tags/0.8.1"
source = "git::https:/cloudposse/terraform-aws-vpc.git?ref=tags/0.16.1"
namespace = var.namespace
stage = var.stage
name = var.name
Expand All @@ -37,7 +37,7 @@ module "vpc" {
}

module "subnets" {
source = "git::https:/cloudposse/terraform-aws-dynamic-subnets.git?ref=tags/0.19.0"
source = "git::https:/cloudposse/terraform-aws-dynamic-subnets.git?ref=tags/0.27.0"
availability_zones = var.availability_zones
namespace = var.namespace
stage = var.stage
Expand Down Expand Up @@ -81,7 +81,7 @@ data "null_data_source" "wait_for_cluster_and_kubernetes_configmap" {
}

module "eks_node_group" {
source = "git::https:/cloudposse/terraform-aws-eks-node-group.git?ref=tags/0.4.0"
source = "git::https:/cloudposse/terraform-aws-eks-node-group.git?ref=tags/0.7.1"
namespace = var.namespace
stage = var.stage
name = var.name
Expand Down
2 changes: 1 addition & 1 deletion examples/complete/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ variable "tags" {

variable "kubernetes_version" {
type = string
default = "1.15"
default = "1.17"
description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used"
}

Expand Down
2 changes: 1 addition & 1 deletion examples/complete/versions.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
terraform {
required_version = "~> 0.12.0"
required_version = ">= 0.12.0"

required_providers {
aws = "~> 2.0"
Expand Down
59 changes: 59 additions & 0 deletions iam.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
data "aws_iam_policy_document" "assume_role" {
count = var.enabled ? 1 : 0

statement {
effect = "Allow"
actions = ["sts:AssumeRole"]

principals {
type = "Service"
identifiers = ["eks.amazonaws.com"]
}
}
}

resource "aws_iam_role" "default" {
count = var.enabled ? 1 : 0
name = module.label.id
assume_role_policy = join("", data.aws_iam_policy_document.assume_role.*.json)
tags = module.label.tags
}

resource "aws_iam_role_policy_attachment" "amazon_eks_cluster_policy" {
count = var.enabled ? 1 : 0
policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", join("", data.aws_partition.current.*.partition))
role = join("", aws_iam_role.default.*.name)
}

resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" {
count = var.enabled ? 1 : 0
policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", join("", data.aws_partition.current.*.partition))
role = join("", aws_iam_role.default.*.name)
}

# AmazonEKSClusterPolicy managed policy doesn't contain all necessary permissions to create
# ELB service-linked role required during LB provisioning by Kubernetes.
# Because of that, on a new AWS account (where load balancers have not been provisioned yet, `nginx-ingress` fails to provision a load balancer

data "aws_iam_policy_document" "cluster_elb_service_role" {
count = var.enabled ? 1 : 0

statement {
effect = "Allow"
actions = [
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DescribeInternetGateways",
"elasticloadbalancing:SetIpAddressType",
"elasticloadbalancing:SetSubnets"
]
resources = ["*"]
}
}

resource "aws_iam_role_policy" "cluster_elb_service_role" {
count = var.enabled ? 1 : 0
name = module.label.id
role = join("", aws_iam_role.default.*.name)
policy = join("", data.aws_iam_policy_document.cluster_elb_service_role.*.json)
}
85 changes: 0 additions & 85 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,91 +21,6 @@ data "aws_partition" "current" {
count = var.enabled ? 1 : 0
}

data "aws_iam_policy_document" "assume_role" {
count = var.enabled ? 1 : 0

statement {
effect = "Allow"
actions = ["sts:AssumeRole"]

principals {
type = "Service"
identifiers = ["eks.amazonaws.com"]
}
}
}

resource "aws_iam_role" "default" {
count = var.enabled ? 1 : 0
name = module.label.id
assume_role_policy = join("", data.aws_iam_policy_document.assume_role.*.json)
tags = module.label.tags
}

resource "aws_iam_role_policy_attachment" "amazon_eks_cluster_policy" {
count = var.enabled ? 1 : 0
policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", join("", data.aws_partition.current.*.partition))
role = join("", aws_iam_role.default.*.name)
}

resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" {
count = var.enabled ? 1 : 0
policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", join("", data.aws_partition.current.*.partition))
role = join("", aws_iam_role.default.*.name)
}

resource "aws_security_group" "default" {
count = var.enabled ? 1 : 0
name = module.label.id
description = "Security Group for EKS cluster"
vpc_id = var.vpc_id
tags = module.label.tags
}

resource "aws_security_group_rule" "egress" {
count = var.enabled ? 1 : 0
description = "Allow all egress traffic"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = join("", aws_security_group.default.*.id)
type = "egress"
}

resource "aws_security_group_rule" "ingress_workers" {
count = var.enabled ? length(var.workers_security_group_ids) : 0
description = "Allow the cluster to receive communication from the worker nodes"
from_port = 0
to_port = 65535
protocol = "-1"
source_security_group_id = var.workers_security_group_ids[count.index]
security_group_id = join("", aws_security_group.default.*.id)
type = "ingress"
}

resource "aws_security_group_rule" "ingress_security_groups" {
count = var.enabled ? length(var.allowed_security_groups) : 0
description = "Allow inbound traffic from existing Security Groups"
from_port = 0
to_port = 65535
protocol = "-1"
source_security_group_id = var.allowed_security_groups[count.index]
security_group_id = join("", aws_security_group.default.*.id)
type = "ingress"
}

resource "aws_security_group_rule" "ingress_cidr_blocks" {
count = var.enabled && length(var.allowed_cidr_blocks) > 0 ? 1 : 0
description = "Allow inbound traffic from CIDR blocks"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = var.allowed_cidr_blocks
security_group_id = join("", aws_security_group.default.*.id)
type = "ingress"
}

resource "aws_cloudwatch_log_group" "default" {
count = var.enabled && length(var.enabled_cluster_log_types) > 0 ? 1 : 0
name = "/aws/eks/${module.label.id}/cluster"
Expand Down
51 changes: 51 additions & 0 deletions sg.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
resource "aws_security_group" "default" {
count = var.enabled ? 1 : 0
name = module.label.id
description = "Security Group for EKS cluster"
vpc_id = var.vpc_id
tags = module.label.tags
}

resource "aws_security_group_rule" "egress" {
count = var.enabled ? 1 : 0
description = "Allow all egress traffic"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = join("", aws_security_group.default.*.id)
type = "egress"
}

resource "aws_security_group_rule" "ingress_workers" {
count = var.enabled ? length(var.workers_security_group_ids) : 0
description = "Allow the cluster to receive communication from the worker nodes"
from_port = 0
to_port = 65535
protocol = "-1"
source_security_group_id = var.workers_security_group_ids[count.index]
security_group_id = join("", aws_security_group.default.*.id)
type = "ingress"
}

resource "aws_security_group_rule" "ingress_security_groups" {
count = var.enabled ? length(var.allowed_security_groups) : 0
description = "Allow inbound traffic from existing Security Groups"
from_port = 0
to_port = 65535
protocol = "-1"
source_security_group_id = var.allowed_security_groups[count.index]
security_group_id = join("", aws_security_group.default.*.id)
type = "ingress"
}

resource "aws_security_group_rule" "ingress_cidr_blocks" {
count = var.enabled && length(var.allowed_cidr_blocks) > 0 ? 1 : 0
description = "Allow inbound traffic from CIDR blocks"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = var.allowed_cidr_blocks
security_group_id = join("", aws_security_group.default.*.id)
type = "ingress"
}
6 changes: 3 additions & 3 deletions test/src/Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
export TF_DATA_DIR ?= $(CURDIR)/.terraform
export TF_CLI_ARGS_init ?= -get-plugins=true
export TERRAFORM_VERSION ?= $(shell curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version' | cut -d. -f1-2)

.DEFAULT_GOAL : all

Expand All @@ -21,10 +21,10 @@ test: init
## Run tests in docker container
docker/test:
docker run --name terratest --rm -it -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_SESSION_TOKEN -e GITHUB_TOKEN \
-e PATH="/usr/local/terraform/0.12/bin:/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
-e PATH="/usr/local/terraform/$(TERRAFORM_VERSION)/bin:/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
-v $(CURDIR)/../../:/module/ cloudposse/test-harness:latest -C /module/test/src test

.PHONY : clean
## Clean up files
clean:
rm -rf $(TF_DATA_DIR) ../../examples/complete/*.tfstate*
rm -rf ../../examples/complete/*.tfstate*
2 changes: 0 additions & 2 deletions test/src/go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -361,8 +361,6 @@ k8s.io/apimachinery v0.18.5/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCk
k8s.io/client-go v0.16.8/go.mod h1:WmPuN0yJTKHXoklExKxzo3jSXmr3EnN+65uaTb5VuNs=
k8s.io/client-go v0.17.0 h1:8QOGvUGdqDMFrm9sD6IUFl256BcffynGoe80sxgTEDg=
k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=
k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
k8s.io/code-generator v0.16.8/go.mod h1:wFdrXdVi/UC+xIfLi+4l9elsTT/uEF61IfcN2wOLULQ=
k8s.io/component-base v0.16.8/go.mod h1:Q8UWOWShpP3MZZny4n/15gOncfaaVtc9SbCdkM5MhUE=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
Expand Down