From 6bfa649cb6ac1ea9963b349ba342c75a495249de Mon Sep 17 00:00:00 2001 From: Andriy Knysh Date: Sun, 23 Aug 2020 00:37:36 -0400 Subject: [PATCH] Add `cluster_elb_service_role` IAM policy to allow creation of ELB service-linked role (#72) --- .gitignore | 2 + examples/complete/fixtures.us-east-2.tfvars | 2 +- examples/complete/main.tf | 6 +- examples/complete/variables.tf | 2 +- examples/complete/versions.tf | 2 +- iam.tf | 59 ++++++++++++++ main.tf | 85 --------------------- sg.tf | 51 +++++++++++++ test/src/Makefile | 6 +- test/src/go.sum | 2 - 10 files changed, 121 insertions(+), 96 deletions(-) create mode 100644 iam.tf create mode 100644 sg.tf diff --git a/.gitignore b/.gitignore index 13f63cb4..39604ce5 100644 --- a/.gitignore +++ b/.gitignore @@ -11,5 +11,7 @@ **/.build-harness **/build-harness +**/pkg + # Rendered yaml config **/configmap-auth.yaml diff --git a/examples/complete/fixtures.us-east-2.tfvars b/examples/complete/fixtures.us-east-2.tfvars index 5853698f..ec72312e 100644 --- a/examples/complete/fixtures.us-east-2.tfvars +++ b/examples/complete/fixtures.us-east-2.tfvars @@ -8,7 +8,7 @@ stage = "test" name = "eks" -kubernetes_version = "1.15" +kubernetes_version = "1.17" oidc_provider_enabled = true diff --git a/examples/complete/main.tf b/examples/complete/main.tf index 9f989077..28897f37 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -27,7 +27,7 @@ locals { } module "vpc" { - source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=tags/0.8.1" + source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=tags/0.16.1" namespace = var.namespace stage = var.stage name = var.name @@ -37,7 +37,7 @@ module "vpc" { } module "subnets" { - source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=tags/0.19.0" + source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=tags/0.27.0" availability_zones = var.availability_zones namespace = var.namespace stage = var.stage @@ -81,7 +81,7 @@ data "null_data_source" "wait_for_cluster_and_kubernetes_configmap" { } module "eks_node_group" { - source = "git::https://github.com/cloudposse/terraform-aws-eks-node-group.git?ref=tags/0.4.0" + source = "git::https://github.com/cloudposse/terraform-aws-eks-node-group.git?ref=tags/0.7.1" namespace = var.namespace stage = var.stage name = var.name diff --git a/examples/complete/variables.tf b/examples/complete/variables.tf index fae76e50..abaf2453 100644 --- a/examples/complete/variables.tf +++ b/examples/complete/variables.tf @@ -43,7 +43,7 @@ variable "tags" { variable "kubernetes_version" { type = string - default = "1.15" + default = "1.17" description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used" } diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf index 62bb08b5..ce2b5be8 100644 --- a/examples/complete/versions.tf +++ b/examples/complete/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = "~> 0.12.0" + required_version = ">= 0.12.0" required_providers { aws = "~> 2.0" diff --git a/iam.tf b/iam.tf new file mode 100644 index 00000000..f0785f8b --- /dev/null +++ b/iam.tf @@ -0,0 +1,59 @@ +data "aws_iam_policy_document" "assume_role" { + count = var.enabled ? 1 : 0 + + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["eks.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "default" { + count = var.enabled ? 1 : 0 + name = module.label.id + assume_role_policy = join("", data.aws_iam_policy_document.assume_role.*.json) + tags = module.label.tags +} + +resource "aws_iam_role_policy_attachment" "amazon_eks_cluster_policy" { + count = var.enabled ? 1 : 0 + policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", join("", data.aws_partition.current.*.partition)) + role = join("", aws_iam_role.default.*.name) +} + +resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" { + count = var.enabled ? 1 : 0 + policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", join("", data.aws_partition.current.*.partition)) + role = join("", aws_iam_role.default.*.name) +} + +# AmazonEKSClusterPolicy managed policy doesn't contain all necessary permissions to create +# ELB service-linked role required during LB provisioning by Kubernetes. +# Because of that, on a new AWS account (where load balancers have not been provisioned yet, `nginx-ingress` fails to provision a load balancer + +data "aws_iam_policy_document" "cluster_elb_service_role" { + count = var.enabled ? 1 : 0 + + statement { + effect = "Allow" + actions = [ + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeInternetGateways", + "elasticloadbalancing:SetIpAddressType", + "elasticloadbalancing:SetSubnets" + ] + resources = ["*"] + } +} + +resource "aws_iam_role_policy" "cluster_elb_service_role" { + count = var.enabled ? 1 : 0 + name = module.label.id + role = join("", aws_iam_role.default.*.name) + policy = join("", data.aws_iam_policy_document.cluster_elb_service_role.*.json) +} diff --git a/main.tf b/main.tf index fd0310ae..d9bc3d70 100644 --- a/main.tf +++ b/main.tf @@ -21,91 +21,6 @@ data "aws_partition" "current" { count = var.enabled ? 1 : 0 } -data "aws_iam_policy_document" "assume_role" { - count = var.enabled ? 1 : 0 - - statement { - effect = "Allow" - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["eks.amazonaws.com"] - } - } -} - -resource "aws_iam_role" "default" { - count = var.enabled ? 1 : 0 - name = module.label.id - assume_role_policy = join("", data.aws_iam_policy_document.assume_role.*.json) - tags = module.label.tags -} - -resource "aws_iam_role_policy_attachment" "amazon_eks_cluster_policy" { - count = var.enabled ? 1 : 0 - policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", join("", data.aws_partition.current.*.partition)) - role = join("", aws_iam_role.default.*.name) -} - -resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" { - count = var.enabled ? 1 : 0 - policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", join("", data.aws_partition.current.*.partition)) - role = join("", aws_iam_role.default.*.name) -} - -resource "aws_security_group" "default" { - count = var.enabled ? 1 : 0 - name = module.label.id - description = "Security Group for EKS cluster" - vpc_id = var.vpc_id - tags = module.label.tags -} - -resource "aws_security_group_rule" "egress" { - count = var.enabled ? 1 : 0 - description = "Allow all egress traffic" - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - security_group_id = join("", aws_security_group.default.*.id) - type = "egress" -} - -resource "aws_security_group_rule" "ingress_workers" { - count = var.enabled ? length(var.workers_security_group_ids) : 0 - description = "Allow the cluster to receive communication from the worker nodes" - from_port = 0 - to_port = 65535 - protocol = "-1" - source_security_group_id = var.workers_security_group_ids[count.index] - security_group_id = join("", aws_security_group.default.*.id) - type = "ingress" -} - -resource "aws_security_group_rule" "ingress_security_groups" { - count = var.enabled ? length(var.allowed_security_groups) : 0 - description = "Allow inbound traffic from existing Security Groups" - from_port = 0 - to_port = 65535 - protocol = "-1" - source_security_group_id = var.allowed_security_groups[count.index] - security_group_id = join("", aws_security_group.default.*.id) - type = "ingress" -} - -resource "aws_security_group_rule" "ingress_cidr_blocks" { - count = var.enabled && length(var.allowed_cidr_blocks) > 0 ? 1 : 0 - description = "Allow inbound traffic from CIDR blocks" - from_port = 0 - to_port = 65535 - protocol = "-1" - cidr_blocks = var.allowed_cidr_blocks - security_group_id = join("", aws_security_group.default.*.id) - type = "ingress" -} - resource "aws_cloudwatch_log_group" "default" { count = var.enabled && length(var.enabled_cluster_log_types) > 0 ? 1 : 0 name = "/aws/eks/${module.label.id}/cluster" diff --git a/sg.tf b/sg.tf new file mode 100644 index 00000000..6c57d35b --- /dev/null +++ b/sg.tf @@ -0,0 +1,51 @@ +resource "aws_security_group" "default" { + count = var.enabled ? 1 : 0 + name = module.label.id + description = "Security Group for EKS cluster" + vpc_id = var.vpc_id + tags = module.label.tags +} + +resource "aws_security_group_rule" "egress" { + count = var.enabled ? 1 : 0 + description = "Allow all egress traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = join("", aws_security_group.default.*.id) + type = "egress" +} + +resource "aws_security_group_rule" "ingress_workers" { + count = var.enabled ? length(var.workers_security_group_ids) : 0 + description = "Allow the cluster to receive communication from the worker nodes" + from_port = 0 + to_port = 65535 + protocol = "-1" + source_security_group_id = var.workers_security_group_ids[count.index] + security_group_id = join("", aws_security_group.default.*.id) + type = "ingress" +} + +resource "aws_security_group_rule" "ingress_security_groups" { + count = var.enabled ? length(var.allowed_security_groups) : 0 + description = "Allow inbound traffic from existing Security Groups" + from_port = 0 + to_port = 65535 + protocol = "-1" + source_security_group_id = var.allowed_security_groups[count.index] + security_group_id = join("", aws_security_group.default.*.id) + type = "ingress" +} + +resource "aws_security_group_rule" "ingress_cidr_blocks" { + count = var.enabled && length(var.allowed_cidr_blocks) > 0 ? 1 : 0 + description = "Allow inbound traffic from CIDR blocks" + from_port = 0 + to_port = 65535 + protocol = "-1" + cidr_blocks = var.allowed_cidr_blocks + security_group_id = join("", aws_security_group.default.*.id) + type = "ingress" +} diff --git a/test/src/Makefile b/test/src/Makefile index aa2aa726..2707cd23 100644 --- a/test/src/Makefile +++ b/test/src/Makefile @@ -1,5 +1,5 @@ -export TF_DATA_DIR ?= $(CURDIR)/.terraform export TF_CLI_ARGS_init ?= -get-plugins=true +export TERRAFORM_VERSION ?= $(shell curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version' | cut -d. -f1-2) .DEFAULT_GOAL : all @@ -21,10 +21,10 @@ test: init ## Run tests in docker container docker/test: docker run --name terratest --rm -it -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_SESSION_TOKEN -e GITHUB_TOKEN \ - -e PATH="/usr/local/terraform/0.12/bin:/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \ + -e PATH="/usr/local/terraform/$(TERRAFORM_VERSION)/bin:/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \ -v $(CURDIR)/../../:/module/ cloudposse/test-harness:latest -C /module/test/src test .PHONY : clean ## Clean up files clean: - rm -rf $(TF_DATA_DIR) ../../examples/complete/*.tfstate* + rm -rf ../../examples/complete/*.tfstate* diff --git a/test/src/go.sum b/test/src/go.sum index b256968c..4d8d86da 100644 --- a/test/src/go.sum +++ b/test/src/go.sum @@ -361,8 +361,6 @@ k8s.io/apimachinery v0.18.5/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCk k8s.io/client-go v0.16.8/go.mod h1:WmPuN0yJTKHXoklExKxzo3jSXmr3EnN+65uaTb5VuNs= k8s.io/client-go v0.17.0 h1:8QOGvUGdqDMFrm9sD6IUFl256BcffynGoe80sxgTEDg= k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= -k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= -k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/code-generator v0.16.8/go.mod h1:wFdrXdVi/UC+xIfLi+4l9elsTT/uEF61IfcN2wOLULQ= k8s.io/component-base v0.16.8/go.mod h1:Q8UWOWShpP3MZZny4n/15gOncfaaVtc9SbCdkM5MhUE= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=