diff --git a/CHANGELOG.md b/CHANGELOG.md index 2809752..9f27ff2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +## 16.0.0 +* upgrade to EKS module 20.8.5 +* introducing eks access entries + ## 15.0.0 * Upgrade to EKS 1.28 diff --git a/README.md b/README.md index c948407..27c4649 100644 --- a/README.md +++ b/README.md @@ -38,15 +38,14 @@ Note that this example may create resources which cost money. Run `terraform des |------|---------| | [aws](#provider\_aws) | >= 5.0 | | [kubectl](#provider\_kubectl) | ~> 1.14.0 | -| [kubernetes](#provider\_kubernetes) | >= 2.0 | | [null](#provider\_null) | >= 3.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [eks](#module\_eks) | terraform-aws-modules/eks/aws | 19.0.4 | -| [eks\_managed\_node\_groups](#module\_eks\_managed\_node\_groups) | terraform-aws-modules/eks/aws//modules/eks-managed-node-group | 19.21.0 | +| [eks](#module\_eks) | terraform-aws-modules/eks/aws | 20.8.5 | +| [eks\_managed\_node\_groups](#module\_eks\_managed\_node\_groups) | terraform-aws-modules/eks/aws//modules/eks-managed-node-group | 20.8.5 | | [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | 5.33 | ## Resources @@ -56,6 +55,10 @@ Note that this example may create resources which cost money. Run `terraform des | [aws_autoscaling_attachment.eks_managed_node_groups_alb_attachment](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_attachment) | resource | | [aws_autoscaling_attachment.eks_managed_node_groups_proxy_attachment](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_attachment) | resource | | [aws_autoscaling_attachment.eks_managed_node_groups_shared_attachment](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_attachment) | resource | +| [aws_eks_access_entry.cluster_admin](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource | +| [aws_eks_access_entry.delete_ebs_volume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource | +| [aws_eks_access_policy_association.cluster_admin](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_policy_association) | resource | +| [aws_eks_access_policy_association.delete_ebs_volume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_policy_association) | resource | | [aws_iam_policy.cloudwatch_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_policy.node_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_policy.ssm_managed_instance](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | @@ -117,9 +120,6 @@ Note that this example may create resources which cost money. Run `terraform des | [aws_wafv2_web_acl_association.cms_waf_assoc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl_association) | resource | | [aws_wafv2_web_acl_association.cms_waf_priv_assoc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl_association) | resource | | [kubectl_manifest.batcave_namespace](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | -| [kubernetes_cluster_role.persistent_volume_management](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/cluster_role) | resource | -| [kubernetes_cluster_role_binding.delete_ebs_volumes_lambda](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/cluster_role_binding) | resource | -| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | | [null_resource.kubernetes_requirements](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [aws_acm_certificate.acm_certificate](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/acm_certificate) | data source | | [aws_ami.eks_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | @@ -138,6 +138,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [acm\_cert\_base\_domain](#input\_acm\_cert\_base\_domain) | Base domain of the certificate used for the ALB Proxy | `string` | `""` | no | +| [admin\_principal\_arns](#input\_admin\_principal\_arns) | List of principal\_arns that require admin access to the cluster | `list(string)` | `[]` | no | | [alb\_deletion\_protection](#input\_alb\_deletion\_protection) | Enable/Disable ALB deletion protection for both ALBs | `bool` | `false` | no | | [alb\_drop\_invalid\_header\_fields](#input\_alb\_drop\_invalid\_header\_fields) | Indicates whether HTTP headers with header fields that are not valid are removed by the load balancer (true) or routed to targets (false). The default is false. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens. Only valid for Load Balancers of type application | `bool` | `true` | no | | [alb\_idle\_timeout](#input\_alb\_idle\_timeout) | Default idle request timeout for the ALB | `string` | `"60"` | no | @@ -163,22 +164,22 @@ Note that this example may create resources which cost money. Run `terraform des | [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler"
]
| no | | [cluster\_name](#input\_cluster\_name) | n/a | `string` | n/a | yes | | [cluster\_security\_group\_additional\_rules](#input\_cluster\_security\_group\_additional\_rules) | Map of security group rules to attach to the cluster security group, as you cannot change cluster security groups without replacing the instance | `map(any)` | `{}` | no | +| [cluster\_service\_cidr](#input\_cluster\_service\_cidr) | n/a | `string` | `"172.20.0.0/16"` | no | | [cluster\_version](#input\_cluster\_version) | n/a | `string` | `"1.28"` | no | -| [configmap\_custom\_roles](#input\_configmap\_custom\_roles) | A custom list of IAM role names to include in the aws-auth configmap | `list(string)` | `[]` | no | | [create\_alb\_proxy](#input\_create\_alb\_proxy) | Create an Application Load Balancer proxy to live in front of the K8s ALB and act as a proxy from the public Internet | `bool` | `false` | no | | [create\_alb\_shared](#input\_create\_alb\_shared) | Creaes an ALB in the shared subnet | `bool` | `false` | no | | [create\_cosign\_iam\_role](#input\_create\_cosign\_iam\_role) | Flag to create Cosign IAM role | `bool` | `false` | no | | [custom\_node\_policy\_arns](#input\_custom\_node\_policy\_arns) | Custom node policy arns | `set(string)` | `[]` | no | | [custom\_node\_pools](#input\_custom\_node\_pools) | n/a | `any` | `{}` | no | +| [delete\_ebs\_volume\_role\_arn](#input\_delete\_ebs\_volume\_role\_arn) | principal\_arn for delete ebs volume role | `string` | `""` | no | +| [enable\_cluster\_creator\_admin\_permissions](#input\_enable\_cluster\_creator\_admin\_permissions) | Grants the user who created the cluster admin permissions | `bool` | `true` | no | | [enable\_eks\_managed\_nodes](#input\_enable\_eks\_managed\_nodes) | Enables eks managed nodes | `bool` | `false` | no | | [enable\_hoplimit](#input\_enable\_hoplimit) | Enables a IMDSv2 hop limit of 1 on all nodes. Defaults to false | `bool` | `false` | no | | [enable\_self\_managed\_nodes](#input\_enable\_self\_managed\_nodes) | Enables self managed nodes | `bool` | `true` | no | | [enable\_ssm\_patching](#input\_enable\_ssm\_patching) | Enables Systems Manager to patch nodes | `bool` | `false` | no | | [environment](#input\_environment) | n/a | `string` | `"dev"` | no | -| [federated\_access\_role](#input\_federated\_access\_role) | Federated access role | `string` | `"ct-ado-batcave-application-admin"` | no | | [force\_update\_version](#input\_force\_update\_version) | Force update version | `bool` | `true` | no | | [general\_node\_pool](#input\_general\_node\_pool) | General node pool, required for hosting core services | `any` |
{
"desired_size": 3,
"instance_type": "c5.2xlarge",
"labels": {
"general": "true"
},
"max_size": 5,
"min_size": 2,
"taints": {}
}
| no | -| [github\_actions\_role](#input\_github\_actions\_role) | Github actions role | `string` | `"batcave-github-actions-role"` | no | | [grant\_delete\_ebs\_volumes\_lambda\_access](#input\_grant\_delete\_ebs\_volumes\_lambda\_access) | When set to true, a cluster role and permissions will be created to grant the delete-ebs-volumes Lambda access to the PersistentVolumes API. | `bool` | `false` | no | | [host\_subnets](#input\_host\_subnets) | Override the ec2 instance subnets. By default, they are launche in private\_subnets, just like the EKS control plane. | `list(any)` | `[]` | no | | [iam\_role\_path](#input\_iam\_role\_path) | n/a | `string` | `"/delegatedadmin/developer/"` | no | @@ -204,7 +205,6 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | n/a | | [batcave\_alb\_proxy\_dns](#output\_batcave\_alb\_proxy\_dns) | DNS value of ALB created for proxying request | | [batcave\_alb\_shared\_dns](#output\_batcave\_alb\_shared\_dns) | DNS value of ALB created for proxying requests through an ALB in the shared subnet | | [batcave\_lb\_dns](#output\_batcave\_lb\_dns) | DNS value of NLB created for routing traffic to apps | diff --git a/eks-access-entries.tf b/eks-access-entries.tf new file mode 100644 index 0000000..f2a9206 --- /dev/null +++ b/eks-access-entries.tf @@ -0,0 +1,68 @@ + +################################################################################# +# Access Entry for Cluster access +################################################################################# +## The resources access entry and policy association is targeting roles that require cluster admins +## it can be repeated for roles that require different cluster policy +resource "aws_eks_access_entry" "cluster_admin" { + for_each = toset(var.admin_principal_arns) + + cluster_name = local.name + kubernetes_groups = [] + principal_arn = each.value + type = "STANDARD" + user_name = try(each.value.user_name, null) + + depends_on = [ + module.eks_managed_node_groups, + ] +} + +resource "aws_eks_access_policy_association" "cluster_admin" { + for_each = toset(var.admin_principal_arns) + + access_scope { + namespaces = [] + type = "cluster" + } + + cluster_name = local.name + + policy_arn = "arn:${data.aws_partition.current.partition}:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" + principal_arn = each.value + + depends_on = [ + aws_eks_access_entry.cluster_admin, + ] +} + +## Creating access entry for delete_ebs_volumes_lambda with namespaced adminpolicy +resource "aws_eks_access_entry" "delete_ebs_volume" { + + cluster_name = local.name + kubernetes_groups = [] + principal_arn = var.delete_ebs_volume_role_arn + type = "STANDARD" + user_name = (null) + + depends_on = [ + module.eks_managed_node_groups, + kubectl_manifest.batcave_namespace + ] +} +resource "aws_eks_access_policy_association" "delete_ebs_volume" { + + access_scope { + namespaces = ["batcave"] + type = "namespace" + } + + cluster_name = local.name + + policy_arn = "arn:${data.aws_partition.current.partition}:eks::aws:cluster-access-policy/AmazonEKSAdminPolicy" + principal_arn = var.delete_ebs_volume_role_arn + + depends_on = [ + aws_eks_access_entry.delete_ebs_volume, + ] +} diff --git a/kubernetes.tf b/kubernetes.tf index 5bca10b..1b71768 100644 --- a/kubernetes.tf +++ b/kubernetes.tf @@ -4,127 +4,6 @@ provider "kubernetes" { token = data.aws_eks_cluster_auth.cluster.token } -locals { - static_master_roles = ["aolytix-role", var.github_actions_role, var.federated_access_role] - merged_master_roles = concat(local.static_master_roles, var.configmap_custom_roles) - custom_configmap_master_roles = (length(local.merged_master_roles) > 0 ? ([ - for custom_iam_role_name in local.merged_master_roles : { - rolearn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${custom_iam_role_name}" - username = custom_iam_role_name, - groups = ["system:masters"] - } - ]) : - []) - eks_managed_node_role = ([ - { - rolearn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/eks-node-${var.cluster_name}-role" - username = "system:node:{{EC2PrivateDNSName}}" - groups = tolist([ - "system:bootstrappers", - "system:nodes" - ]) - }, - { - rolearn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role${var.iam_role_path}eks-node-${var.cluster_name}-role" - username = "system:node:{{EC2PrivateDNSName}}" - groups = tolist([ - "system:bootstrappers", - "system:nodes" - ]) - } - ]) -} - -locals { - configmap_roles = [for k, v in module.eks.self_managed_node_groups : { - rolearn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${v.iam_role_name}" - username = "system:node:{{EC2PrivateDNSName}}" - groups = tolist([ - "system:bootstrappers", - "system:nodes" - ]) - }] -} - -resource "kubernetes_cluster_role" "persistent_volume_management" { - count = var.grant_delete_ebs_volumes_lambda_access ? 1 : 0 - - metadata { - name = "batcave:persistent-volume-management" - } - - rule { - api_groups = [""] - resources = ["persistentvolumes"] - verbs = ["create", "delete", "get", "list", "update", "watch"] - } - depends_on = [null_resource.kubernetes_requirements] -} - -locals { - delete_ebs_volumes_lambda_subject_name = "batcave:persistent-volume-managers" -} -resource "kubernetes_cluster_role_binding" "delete_ebs_volumes_lambda" { - count = var.grant_delete_ebs_volumes_lambda_access ? 1 : 0 - - metadata { - name = "batcave:persistent-volume-managers" - } - role_ref { - api_group = "rbac.authorization.k8s.io" - kind = "ClusterRole" - name = kubernetes_cluster_role.persistent_volume_management[0].metadata[0].name - } - subject { - api_group = "rbac.authorization.k8s.io" - kind = "Group" - name = local.delete_ebs_volumes_lambda_subject_name - } - depends_on = [null_resource.kubernetes_requirements] -} - -locals { - delete_ebs_volumes_lambda_role_mapping = (var.grant_delete_ebs_volumes_lambda_access ? - ([{ - rolearn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/delete_ebs_volumes_lambda_role", - username = "batcave:delete-ebs-volumes-lambda", - groups = [local.delete_ebs_volumes_lambda_subject_name] - }]) : - []) -} - -resource "kubernetes_config_map" "aws_auth" { - metadata { - name = "aws-auth" - namespace = "kube-system" - labels = merge( - { - "app.kubernetes.io/managed-by" = "Terraform" - } - ) - } - data = { - mapRoles = yamlencode( - distinct(concat( - tolist(local.configmap_roles), - tolist(local.delete_ebs_volumes_lambda_role_mapping), - local.custom_configmap_master_roles, - local.eks_managed_node_role, - )) - ) - } - depends_on = [ - null_resource.kubernetes_requirements, - kubernetes_cluster_role_binding.delete_ebs_volumes_lambda, - ] - # EKS managed nodes will update this configmap on their own, so we need to ignore changes to it - # This will avoid terraform overwriting the configmap with the old values - # lifecycle { - # ignore_changes = [data] - # } - -} - provider "kubectl" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) diff --git a/main.tf b/main.tf index baae257..3923215 100644 --- a/main.tf +++ b/main.tf @@ -238,9 +238,8 @@ locals { module "eks" { ## https://github.com/terraform-aws-modules/terraform-aws-eks - source = "terraform-aws-modules/eks/aws" - version = "19.0.4" - + source = "terraform-aws-modules/eks/aws" + version = "20.8.5" cluster_name = local.name cluster_version = local.cluster_version @@ -249,9 +248,9 @@ module "eks" { cluster_encryption_policy_path = var.iam_role_path # create_iam_role = false # iam_role_arn = aws_iam_role.eks_node.arn - - vpc_id = var.vpc_id - subnet_ids = var.private_subnets + enable_cluster_creator_admin_permissions = var.enable_cluster_creator_admin_permissions + vpc_id = var.vpc_id + subnet_ids = var.private_subnets cluster_endpoint_private_access = true cluster_endpoint_public_access = false @@ -279,7 +278,11 @@ module "eks" { } ## CLUSTER Addons - cluster_addons = {} + cluster_addons = { + eks-pod-identity-agent = { + most_recent = true + } + } # Worker groups (using Launch Configurations) self_managed_node_groups = var.enable_self_managed_nodes ? local.custom_node_pools : {} @@ -290,7 +293,7 @@ module "eks" { module "eks_managed_node_groups" { source = "terraform-aws-modules/eks/aws//modules/eks-managed-node-group" - version = "19.21.0" + version = "20.8.5" for_each = var.enable_eks_managed_nodes ? local.eks_node_pools : {} @@ -318,6 +321,7 @@ module "eks_managed_node_groups" { force_update_version = var.force_update_version cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id vpc_security_group_ids = [module.eks.node_security_group_id] + cluster_service_cidr = var.cluster_service_cidr } diff --git a/outputs.tf b/outputs.tf index 0725903..b2d938a 100644 --- a/outputs.tf +++ b/outputs.tf @@ -153,9 +153,9 @@ output "eks_managed_node_group" { # Additional ################################################################################ -output "aws_auth_configmap_yaml" { - value = module.eks.aws_auth_configmap_yaml -} +# output "aws_auth_configmap_yaml" { +# value = module.eks.aws_auth_configmap_yaml +# } output "cosign_iam_role_arn" { value = try(aws_iam_role.cosign[0].arn, "") diff --git a/variables.tf b/variables.tf index 7426b95..b17e875 100644 --- a/variables.tf +++ b/variables.tf @@ -8,6 +8,10 @@ variable "cluster_version" { default = "1.28" type = string } +variable "cluster_service_cidr" { + default = "172.20.0.0/16" + type = string +} variable "ami_date" { default = "" @@ -293,31 +297,11 @@ variable "enable_hoplimit" { description = "Enables a IMDSv2 hop limit of 1 on all nodes. Defaults to false" } -variable "configmap_custom_roles" { - default = [] - description = "A custom list of IAM role names to include in the aws-auth configmap" - type = list(string) -} - variable "vpc_cidr_blocks" { description = "List of VPC CIDR blocks" type = list(string) } -variable "github_actions_role" { - type = string - default = "batcave-github-actions-role" - description = "Github actions role" -} - -### Federated role will be added to the ConfigMap so that the users can have access to the Kubernetes objects of the cluster. -### By default the users will not have access when the cluster is created by GitHub runner. -variable "federated_access_role" { - type = string - default = "ct-ado-batcave-application-admin" - description = "Federated access role" -} - variable "enable_self_managed_nodes" { type = bool @@ -366,3 +350,27 @@ variable "ssm_tag_patch_window" { default = "ITOPS-Wave1-Non-Mktplc-DevTestImpl-MW" description = "SSM Patching window for instances. For more information: https://cloud.cms.gov/patching-prerequisites" } + +variable "enable_cluster_creator_admin_permissions" { + type = bool + default = true + description = "Grants the user who created the cluster admin permissions" +} + +# ################################################################################ +# # Access Entry +# ################################################################################ + +## variable below holds the list of principal arns that require cluster access +variable "admin_principal_arns" { + description = "List of principal_arns that require admin access to the cluster" + default = [] + type = list(string) +} + + +variable "delete_ebs_volume_role_arn" { + description = "principal_arn for delete ebs volume role" + default = "" + type = string +}