From d7b5e74e291f5895607ee7d49c934e246eac54ec Mon Sep 17 00:00:00 2001 From: jpolchlo Date: Tue, 14 Feb 2023 10:00:58 -0500 Subject: [PATCH 1/3] Discontinue the use of the infrastructure module; it was not being used elsewhere, and so the benefits of being a separate module were questionable; in the end, however, I didn't really gain much from defctoring it, as it didn't make it possible to again use the kubernetes provider in the setup of this stage; though I did figure out how to get around this, that solution is independent of the defactor (cold start flag); this would be much easier to swallow if this change did not require a complete tear-down and rebuild of the cluster to put into use --- .../aws-terraform/0-hardware/cluster.tf | 132 ++++++++++++++++-- deployment/aws-terraform/0-hardware/config.tf | 21 +++ deployment/aws-terraform/0-hardware/ebs.tf | 57 ++++++++ deployment/aws-terraform/0-hardware/irsa.tf | 16 +++ deployment/aws-terraform/0-hardware/kms.tf | 14 ++ deployment/aws-terraform/0-hardware/locals.tf | 11 ++ deployment/aws-terraform/0-hardware/output.tf | 2 +- .../aws-terraform/0-hardware/variables.tf | 24 ++++ deployment/aws-terraform/0-hardware/vpc.tf | 29 ++++ .../aws-terraform/1-services/providers.tf | 5 + deployment/aws-terraform/1-services/rbac.tf | 16 +++ deployment/aws-terraform/1-services/rds.tf | 1 + .../aws-terraform/1-services/variables.tf | 5 + 13 files changed, 322 insertions(+), 11 deletions(-) create mode 100644 deployment/aws-terraform/0-hardware/ebs.tf create mode 100644 deployment/aws-terraform/0-hardware/irsa.tf create mode 100644 deployment/aws-terraform/0-hardware/kms.tf create mode 100644 deployment/aws-terraform/0-hardware/locals.tf create mode 100644 deployment/aws-terraform/0-hardware/vpc.tf create mode 100644 deployment/aws-terraform/1-services/rbac.tf diff --git a/deployment/aws-terraform/0-hardware/cluster.tf b/deployment/aws-terraform/0-hardware/cluster.tf index 0a920ae..8012198 100644 --- a/deployment/aws-terraform/0-hardware/cluster.tf +++ b/deployment/aws-terraform/0-hardware/cluster.tf @@ -1,11 +1,123 @@ -module "k8s" { - source="../../../modules/aws/infrastructure" - - app_name=var.project_prefix - environment=var.environment - aws_region=var.aws_region - cluster_version=var.cluster_version - num_base_instances=var.num_base_instances - base_instance_type=var.base_instance_type - user_map=var.user_map +# module "k8s" { +# source="../../../modules/aws/infrastructure" + +# app_name=var.project_prefix +# environment=var.environment +# aws_region=var.aws_region +# cluster_version=var.cluster_version +# num_base_instances=var.num_base_instances +# base_instance_type=var.base_instance_type +# user_map=var.user_map +# role_map=var.role_map +# } + +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "18.31.2" + + cluster_name = local.cluster_name + cluster_version = var.cluster_version + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true + + cluster_addons = { + coredns = { + resolve_conflicts = "OVERWRITE" + } + kube-proxy = {} + vpc-cni = { + resolve_conflicts = "OVERWRITE" + service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + } + aws-ebs-csi-driver = {} + } + + # cluster_encryption_config = [{ + # provider_key_arn = aws_kms_key.eks.arn + # resources = ["secrets"] + # }] + + cluster_tags = { + # This should not affect the name of the cluster primary security group + # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006 + # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008 + Name = var.project_prefix + GithubRepo = var.repo_name + GithubOrg = "azavea" + } + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + + # This feature doesn't always work when creating a new cluster from scratch. + # Allow the cold start flag to dictate if this is our first time applying. + # Cluster users won't be properly set up until the second go around (cold_start=false). + manage_aws_auth_configmap = !var.cold_start + aws_auth_roles = var.role_map + aws_auth_users = var.user_map + + # Extend cluster security group rules + cluster_security_group_additional_rules = { + egress_nodes_ephemeral_ports_tcp = { + description = "To node 1025-65535" + protocol = "tcp" + from_port = 1025 + to_port = 65535 + type = "egress" + source_node_security_group = true + } + } + + # Extend node-to-node security group rules + node_security_group_additional_rules = { + ingress_self_all = { + description = "Node to node all ports/protocols" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "ingress" + self = true + } + egress_all = { + description = "Node all egress" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "egress" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] + } + } + + eks_managed_node_group_defaults = { + ami_type = "AL2_x86_64" + instance_types = [var.base_instance_type] + + iam_role_attach_cni_policy = true + } + + eks_managed_node_groups = { + base = { + create_launch_template = false + launch_template_name = "" + instance_types = [var.base_instance_type] + capacity_type = var.base_instance_capacity_type + min_size = 1 + max_size = var.num_base_instances + desired_size = var.num_base_instances + labels = { + node-type = "core" + "hub.jupyter.org/node-purpose" = "core" + } + } + } + + tags = local.tags } + +# resource "null_resource" "kubectl" { +# depends_on = [module.eks.kubeconfig] +# provisioner "local-exec" { +# command = "aws eks --region ${var.aws_region} update-kubeconfig --name ${module.eks.cluster_id}" +# } +# } diff --git a/deployment/aws-terraform/0-hardware/config.tf b/deployment/aws-terraform/0-hardware/config.tf index d295e11..08c6017 100644 --- a/deployment/aws-terraform/0-hardware/config.tf +++ b/deployment/aws-terraform/0-hardware/config.tf @@ -1,8 +1,29 @@ provider "aws" {} +provider "kubernetes" { + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } +} + terraform { + required_version = ">= 1.0.0" + backend "s3" { region = local.region encrypt = "true" } + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.18.0" + } + } } diff --git a/deployment/aws-terraform/0-hardware/ebs.tf b/deployment/aws-terraform/0-hardware/ebs.tf new file mode 100644 index 0000000..e0fca77 --- /dev/null +++ b/deployment/aws-terraform/0-hardware/ebs.tf @@ -0,0 +1,57 @@ +# data "aws_caller_identity" "current" {} + +# # This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes +# data "aws_iam_policy_document" "ebs" { +# # Copy of default KMS policy that lets you manage it +# statement { +# sid = "Enable IAM User Permissions" +# actions = ["kms:*"] +# resources = ["*"] + +# principals { +# type = "AWS" +# identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] +# } +# } + +# # Required for EKS +# statement { +# sid = "Allow service-linked role use of the CMK" +# actions = [ +# "kms:Encrypt", +# "kms:Decrypt", +# "kms:ReEncrypt*", +# "kms:GenerateDataKey*", +# "kms:DescribeKey" +# ] +# resources = ["*"] + +# principals { +# type = "AWS" +# identifiers = [ +# "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes +# module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs +# ] +# } +# } + +# statement { +# sid = "Allow attachment of persistent resources" +# actions = ["kms:CreateGrant"] +# resources = ["*"] + +# principals { +# type = "AWS" +# identifiers = [ +# "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes +# module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs +# ] +# } + +# condition { +# test = "Bool" +# variable = "kms:GrantIsForAWSResource" +# values = ["true"] +# } +# } +# } diff --git a/deployment/aws-terraform/0-hardware/irsa.tf b/deployment/aws-terraform/0-hardware/irsa.tf new file mode 100644 index 0000000..98f2d8a --- /dev/null +++ b/deployment/aws-terraform/0-hardware/irsa.tf @@ -0,0 +1,16 @@ +module "vpc_cni_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + + role_name_prefix = "VPC-CNI-IRSA" + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + + tags = local.tags +} diff --git a/deployment/aws-terraform/0-hardware/kms.tf b/deployment/aws-terraform/0-hardware/kms.tf new file mode 100644 index 0000000..f6490b2 --- /dev/null +++ b/deployment/aws-terraform/0-hardware/kms.tf @@ -0,0 +1,14 @@ +# # This key would be needed for cluster encryption +# # resource "aws_kms_key" "eks" { +# # description = "EKS Secret Encryption Key" +# # deletion_window_in_days = 7 +# # enable_key_rotation = true + +# # tags = local.tags +# # } + +# resource "aws_kms_key" "ebs" { +# description = "Customer managed key to encrypt EKS managed node group volumes" +# deletion_window_in_days = 7 +# policy = data.aws_iam_policy_document.ebs.json +# } diff --git a/deployment/aws-terraform/0-hardware/locals.tf b/deployment/aws-terraform/0-hardware/locals.tf new file mode 100644 index 0000000..4ccc239 --- /dev/null +++ b/deployment/aws-terraform/0-hardware/locals.tf @@ -0,0 +1,11 @@ +locals { + region = var.aws_region + cluster_name = "${var.project_prefix}-${var.environment}" + + tags = { + Name = var.project_prefix + Environment = var.environment + GithubRepo = var.repo_name + GithubOrg = "azavea" + } +} diff --git a/deployment/aws-terraform/0-hardware/output.tf b/deployment/aws-terraform/0-hardware/output.tf index be1e899..4209b15 100644 --- a/deployment/aws-terraform/0-hardware/output.tf +++ b/deployment/aws-terraform/0-hardware/output.tf @@ -1,5 +1,5 @@ output "cluster_arn" { - value = module.k8s.cluster.cluster_arn + value = module.eks.cluster_arn } output "cluster_name" { diff --git a/deployment/aws-terraform/0-hardware/variables.tf b/deployment/aws-terraform/0-hardware/variables.tf index d323622..196c2c7 100644 --- a/deployment/aws-terraform/0-hardware/variables.tf +++ b/deployment/aws-terraform/0-hardware/variables.tf @@ -13,12 +13,24 @@ variable "project_prefix" { description="The project name prefix used to identify cluster resources. This will be set by wrapper scripts; avoid setting in the .tfvars file!" } +variable "repo_name" { + type = string + description = "Name of the Github repo hosting the deployment (for tagging)" + default = "kubernetes" +} + variable "cluster_version" { type = string description = "The Kubernetes version to deploy" default = null } +variable "cold_start" { + type = bool + description = "A flag to indicate that this is the first time we are applying this base infrastructure; not all features are applied correctly for a brand new cluster; run once with this variable set to true; subsequent runs should set this to false" + default = false +} + variable "num_base_instances" { type = number description = "Number of instances to be provided in the base group" @@ -31,8 +43,20 @@ variable "base_instance_type" { default = "t3.medium" } +variable "base_instance_capacity_type" { + type = string + description = "The capacity type of the always-on core instance (SPOT, ON_DEMAND)" + default = "ON_DEMAND" +} + variable "user_map" { type = list(object({username: string, userarn: string, groups: list(string)})) description = "A list of {\"username\": string, \"userarn\": string, \"groups\": list(string)} objects describing the users who should have RBAC access to the cluster; note: system:masters should be reserved for those who need the highest level of admin access (including modifying RBAC)" default = [] } + +variable "role_map" { + type = list(object({rolearn: string, username: string, groups: list(string)})) + description = "A list of {\"rolearn\": string, \"username\": string, \"groups\": list(string)} objects describing the mapping of IAM roles to cluster users who should have RBAC access to the cluster; note: system:masters should be used for admin access" + default = [] +} diff --git a/deployment/aws-terraform/0-hardware/vpc.tf b/deployment/aws-terraform/0-hardware/vpc.tf new file mode 100644 index 0000000..e71d462 --- /dev/null +++ b/deployment/aws-terraform/0-hardware/vpc.tf @@ -0,0 +1,29 @@ +data "aws_availability_zones" "available" {} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "3.2.0" + + name = "${var.project_prefix}-vpc" + cidr = "10.0.0.0/16" + azs = data.aws_availability_zones.available.names + private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + tags = { + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + "kubernetes.io/role/elb" = "1" + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } +} diff --git a/deployment/aws-terraform/1-services/providers.tf b/deployment/aws-terraform/1-services/providers.tf index 4703147..bb1667a 100644 --- a/deployment/aws-terraform/1-services/providers.tf +++ b/deployment/aws-terraform/1-services/providers.tf @@ -7,6 +7,11 @@ terraform { } required_providers { + null = { + source = "hashicorp/null" + version = "3.1.0" + } + kubernetes = { source = "hashicorp/kubernetes" version = "~> 2.10.0" diff --git a/deployment/aws-terraform/1-services/rbac.tf b/deployment/aws-terraform/1-services/rbac.tf new file mode 100644 index 0000000..018baf2 --- /dev/null +++ b/deployment/aws-terraform/1-services/rbac.tf @@ -0,0 +1,16 @@ +resource "kubectl_manifest" "viewers_crb" { + yaml_body = <<-YAML +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: viewers +subjects: +- kind: Group + name: viewer # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: view + apiGroup: rbac.authorization.k8s.io +YAML +} diff --git a/deployment/aws-terraform/1-services/rds.tf b/deployment/aws-terraform/1-services/rds.tf index 9ed7cdf..58a24ad 100644 --- a/deployment/aws-terraform/1-services/rds.tf +++ b/deployment/aws-terraform/1-services/rds.tf @@ -78,6 +78,7 @@ module "database" { backup_window = var.rds_backup_window maintenance_window = var.rds_maintenance_window auto_minor_version_upgrade = var.rds_auto_minor_version_upgrade + snapshot_identifier = var.rds_source_snapshot_identifier final_snapshot_identifier = var.rds_final_snapshot_identifier skip_final_snapshot = var.rds_skip_final_snapshot copy_tags_to_snapshot = var.rds_copy_tags_to_snapshot diff --git a/deployment/aws-terraform/1-services/variables.tf b/deployment/aws-terraform/1-services/variables.tf index e7dae1a..08b65fc 100644 --- a/deployment/aws-terraform/1-services/variables.tf +++ b/deployment/aws-terraform/1-services/variables.tf @@ -90,6 +90,11 @@ variable "rds_database_password" { default = null } +variable "rds_source_snapshot_identifier" { + type = string + default = null +} + variable "rds_final_snapshot_identifier" { default = "rds-snapshot" type = string From 75ec1789564b4086e9e208312f7801cb30d192ef Mon Sep 17 00:00:00 2001 From: jpolchlo Date: Tue, 14 Feb 2023 10:45:09 -0500 Subject: [PATCH 2/3] Remove unused infrastructure module --- modules/aws/infrastructure/config.tf | 20 ----- modules/aws/infrastructure/ebs.tf | 57 -------------- modules/aws/infrastructure/eks.tf | 106 --------------------------- modules/aws/infrastructure/inputs.tf | 48 ------------ modules/aws/infrastructure/irsa.tf | 16 ---- modules/aws/infrastructure/kms.tf | 14 ---- modules/aws/infrastructure/locals.tf | 11 --- modules/aws/infrastructure/output.tf | 11 --- modules/aws/infrastructure/rbac.tf | 20 ----- modules/aws/infrastructure/vpc.tf | 29 -------- 10 files changed, 332 deletions(-) delete mode 100644 modules/aws/infrastructure/config.tf delete mode 100644 modules/aws/infrastructure/ebs.tf delete mode 100644 modules/aws/infrastructure/eks.tf delete mode 100644 modules/aws/infrastructure/inputs.tf delete mode 100644 modules/aws/infrastructure/irsa.tf delete mode 100644 modules/aws/infrastructure/kms.tf delete mode 100644 modules/aws/infrastructure/locals.tf delete mode 100644 modules/aws/infrastructure/output.tf delete mode 100644 modules/aws/infrastructure/rbac.tf delete mode 100644 modules/aws/infrastructure/vpc.tf diff --git a/modules/aws/infrastructure/config.tf b/modules/aws/infrastructure/config.tf deleted file mode 100644 index 8ca2c52..0000000 --- a/modules/aws/infrastructure/config.tf +++ /dev/null @@ -1,20 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 4.18.0" - } - - null = { - source = "hashicorp/null" - version = "3.1.0" - } - - kubectl = { - source = "gavinbunney/kubectl" - version = "~> 1.14" - } - } -} diff --git a/modules/aws/infrastructure/ebs.tf b/modules/aws/infrastructure/ebs.tf deleted file mode 100644 index 5933ccd..0000000 --- a/modules/aws/infrastructure/ebs.tf +++ /dev/null @@ -1,57 +0,0 @@ -data "aws_caller_identity" "current" {} - -# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes -data "aws_iam_policy_document" "ebs" { - # Copy of default KMS policy that lets you manage it - statement { - sid = "Enable IAM User Permissions" - actions = ["kms:*"] - resources = ["*"] - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] - } - } - - # Required for EKS - statement { - sid = "Allow service-linked role use of the CMK" - actions = [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:DescribeKey" - ] - resources = ["*"] - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - } - - statement { - sid = "Allow attachment of persistent resources" - actions = ["kms:CreateGrant"] - resources = ["*"] - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - - condition { - test = "Bool" - variable = "kms:GrantIsForAWSResource" - values = ["true"] - } - } -} diff --git a/modules/aws/infrastructure/eks.tf b/modules/aws/infrastructure/eks.tf deleted file mode 100644 index 8a0938e..0000000 --- a/modules/aws/infrastructure/eks.tf +++ /dev/null @@ -1,106 +0,0 @@ -module "eks" { - source = "terraform-aws-modules/eks/aws" - version = "18.31.2" - - cluster_name = local.cluster_name - cluster_version = var.cluster_version - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true - - cluster_addons = { - coredns = { - resolve_conflicts = "OVERWRITE" - } - kube-proxy = {} - vpc-cni = { - resolve_conflicts = "OVERWRITE" - service_account_role_arn = module.vpc_cni_irsa.iam_role_arn - } - aws-ebs-csi-driver = {} - } - - # cluster_encryption_config = [{ - # provider_key_arn = aws_kms_key.eks.arn - # resources = ["secrets"] - # }] - - cluster_tags = { - # This should not affect the name of the cluster primary security group - # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006 - # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008 - Name = var.app_name - GithubRepo = var.repo_name - GithubOrg = "azavea" - } - - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets - - #manage_aws_auth_configmap = true - #aws_auth_users = var.user_map - - # Extend cluster security group rules - cluster_security_group_additional_rules = { - egress_nodes_ephemeral_ports_tcp = { - description = "To node 1025-65535" - protocol = "tcp" - from_port = 1025 - to_port = 65535 - type = "egress" - source_node_security_group = true - } - } - - # Extend node-to-node security group rules - node_security_group_additional_rules = { - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - self = true - } - egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } - } - - eks_managed_node_group_defaults = { - ami_type = "AL2_x86_64" - instance_types = [var.base_instance_type] - - iam_role_attach_cni_policy = true - } - - eks_managed_node_groups = { - base = { - create_launch_template = false - launch_template_name = "" - instance_types = [var.base_instance_type] - capacity_type = var.base_instance_capacity_type - min_size = 1 - max_size = var.num_base_instances - desired_size = var.num_base_instances - labels = { - node-type = "core" - "hub.jupyter.org/node-purpose" = "core" - } - } - } - - tags = local.tags -} - -resource "null_resource" "kubectl" { - depends_on = [module.eks.kubeconfig] - provisioner "local-exec" { - command = "aws eks --region ${var.aws_region} update-kubeconfig --name ${module.eks.cluster_id}" - } -} diff --git a/modules/aws/infrastructure/inputs.tf b/modules/aws/infrastructure/inputs.tf deleted file mode 100644 index 8d41017..0000000 --- a/modules/aws/infrastructure/inputs.tf +++ /dev/null @@ -1,48 +0,0 @@ -variable "aws_region" { - type=string - description="The AWS region to deploy into" -} - -variable "app_name" { - default = "k8s-application" -} - -variable "environment" { - type = string - description = "Name of target environment (e.g., production, staging, QA, etc.)" -} - -variable "repo_name" { - type = string - description = "Name of the Github repo hosting the deployment (for tagging)" - default = "kubernetes" -} - -variable "cluster_version" { - type = string - default = "1.23" -} - -variable "num_base_instances" { - type = number - description = "Number of instances to be provided in the base group" - default = 1 -} - -variable "base_instance_type" { - type = string - description = "The instance type to use for the always-on core instance running system pods" - default = "t3.medium" -} - -variable "base_instance_capacity_type" { - type = string - description = "The capacity type of the always-on core instance (SPOT, ON_DEMAND)" - default = "ON_DEMAND" -} - -variable "user_map" { - type = list(object({username: string, userarn: string, groups: list(string)})) - description = "A list of {\"username\": string, \"userarn\": string, \"groups\": list(string)} objects describing the users who should have RBAC access to the cluster; note: system:masters should be used for admin access" - default = [] -} diff --git a/modules/aws/infrastructure/irsa.tf b/modules/aws/infrastructure/irsa.tf deleted file mode 100644 index 98f2d8a..0000000 --- a/modules/aws/infrastructure/irsa.tf +++ /dev/null @@ -1,16 +0,0 @@ -module "vpc_cni_irsa" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - - role_name_prefix = "VPC-CNI-IRSA" - attach_vpc_cni_policy = true - vpc_cni_enable_ipv4 = true - - oidc_providers = { - main = { - provider_arn = module.eks.oidc_provider_arn - namespace_service_accounts = ["kube-system:aws-node"] - } - } - - tags = local.tags -} diff --git a/modules/aws/infrastructure/kms.tf b/modules/aws/infrastructure/kms.tf deleted file mode 100644 index 0076520..0000000 --- a/modules/aws/infrastructure/kms.tf +++ /dev/null @@ -1,14 +0,0 @@ -# This key would be needed for cluster encryption -# resource "aws_kms_key" "eks" { -# description = "EKS Secret Encryption Key" -# deletion_window_in_days = 7 -# enable_key_rotation = true - -# tags = local.tags -# } - -resource "aws_kms_key" "ebs" { - description = "Customer managed key to encrypt EKS managed node group volumes" - deletion_window_in_days = 7 - policy = data.aws_iam_policy_document.ebs.json -} diff --git a/modules/aws/infrastructure/locals.tf b/modules/aws/infrastructure/locals.tf deleted file mode 100644 index 4af632a..0000000 --- a/modules/aws/infrastructure/locals.tf +++ /dev/null @@ -1,11 +0,0 @@ -locals { - region = var.aws_region - cluster_name = "${var.app_name}-${var.environment}" - - tags = { - Name = var.app_name - Environment = var.environment - GithubRepo = var.repo_name - GithubOrg = "azavea" - } -} diff --git a/modules/aws/infrastructure/output.tf b/modules/aws/infrastructure/output.tf deleted file mode 100644 index 0a65fed..0000000 --- a/modules/aws/infrastructure/output.tf +++ /dev/null @@ -1,11 +0,0 @@ -output "cluster_name" { - value = local.cluster_name -} - -output "cluster" { - value = module.eks -} - -output "vpc" { - value = module.vpc -} diff --git a/modules/aws/infrastructure/rbac.tf b/modules/aws/infrastructure/rbac.tf deleted file mode 100644 index 3baec3e..0000000 --- a/modules/aws/infrastructure/rbac.tf +++ /dev/null @@ -1,20 +0,0 @@ -resource "kubectl_manifest" "viewers_crb" { - yaml_body = <<-YAML -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: viewers -subjects: -- kind: Group - name: viewer # Name is case sensitive - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: view - apiGroup: rbac.authorization.k8s.io -YAML - - depends_on = [ - null_resource.kubectl - ] -} diff --git a/modules/aws/infrastructure/vpc.tf b/modules/aws/infrastructure/vpc.tf deleted file mode 100644 index ae7ef0f..0000000 --- a/modules/aws/infrastructure/vpc.tf +++ /dev/null @@ -1,29 +0,0 @@ -data "aws_availability_zones" "available" {} - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "3.2.0" - - name = "${var.app_name}-vpc" - cidr = "10.0.0.0/16" - azs = data.aws_availability_zones.available.names - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - } - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" - } -} From 1605106e2cc4718f6d3aa0cb5a1825855f6377a9 Mon Sep 17 00:00:00 2001 From: jpolchlo Date: Tue, 14 Feb 2023 10:56:16 -0500 Subject: [PATCH 3/3] [WIP] Configure Argo to use an externally-generated certificate --- .../aws-terraform/application/argo/argo.tf | 5 +++ .../aws-terraform/application/argo/rbac.tf | 11 +++++++ .../aws-terraform/application/argo/route53.tf | 31 +++++++++++++++++++ .../argo/yaml/argo-workflows-values.yaml | 5 +-- 4 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 deployment/aws-terraform/application/argo/rbac.tf diff --git a/deployment/aws-terraform/application/argo/argo.tf b/deployment/aws-terraform/application/argo/argo.tf index 8712f6b..0e180a3 100644 --- a/deployment/aws-terraform/application/argo/argo.tf +++ b/deployment/aws-terraform/application/argo/argo.tf @@ -37,6 +37,11 @@ resource "helm_release" "argo_workflows" { name = "server.sso.issuer" value = "https://${var.cognito_user_pool_endpoint}" } + + set { + name = "server.serviceAnnotations.service\\.beta\\.kubernetes\\.io/aws-load-balancer-ssl-cert" + value = aws_acm_certificate.argo.arn + } } resource "kubernetes_config_map" "default_repository" { diff --git a/deployment/aws-terraform/application/argo/rbac.tf b/deployment/aws-terraform/application/argo/rbac.tf new file mode 100644 index 0000000..a63ceb4 --- /dev/null +++ b/deployment/aws-terraform/application/argo/rbac.tf @@ -0,0 +1,11 @@ +# This file can be customized to include rules for mapping users to service accounts +resource "kubernetes_service_account_v1" "default_rbac" { + metadata { + name = "argo-read-write-user" + namespace = "argo" + annotations = { + "workflows.argoproj.io/rbac-rule" = "true" + "workflows.argoproj.io/rbac-rule-precedence" = "0" + } + } +} diff --git a/deployment/aws-terraform/application/argo/route53.tf b/deployment/aws-terraform/application/argo/route53.tf index 24df786..1a8e214 100644 --- a/deployment/aws-terraform/application/argo/route53.tf +++ b/deployment/aws-terraform/application/argo/route53.tf @@ -28,3 +28,34 @@ resource "aws_route53_record" "argo_server" { evaluate_target_health = true } } + +resource "aws_acm_certificate" "argo" { + domain_name = local.argo_subdomain + validation_method = "DNS" + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_route53_record" "base" { + for_each = { + for dvo in aws_acm_certificate.argo.domain_validation_options : dvo.domain_name => { + name = dvo.resource_record_name + record = dvo.resource_record_value + type = dvo.resource_record_type + } + } + + allow_overwrite = true + name = each.value.name + records = [each.value.record] + ttl = 60 + type = each.value.type + zone_id = data.aws_route53_zone.external.zone_id +} + +resource "aws_acm_certificate_validation" "example" { + certificate_arn = aws_acm_certificate.argo.arn + validation_record_fqdns = [for record in aws_route53_record.base : record.fqdn] +} diff --git a/deployment/aws-terraform/application/argo/yaml/argo-workflows-values.yaml b/deployment/aws-terraform/application/argo/yaml/argo-workflows-values.yaml index f0ab02e..ba0747f 100644 --- a/deployment/aws-terraform/application/argo/yaml/argo-workflows-values.yaml +++ b/deployment/aws-terraform/application/argo/yaml/argo-workflows-values.yaml @@ -5,12 +5,13 @@ server: serviceType: LoadBalancer servicePort: 443 servicePortName: https + serviceAnnotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http nodeSelector: node-type: core extraArgs: - --auth-mode=sso - - --insecure-skip-verify - secure: true + secure: false sso: clientId: name: argo-server-oauth