diff --git a/README.md b/README.md index 344c21c4..b5991af2 100644 --- a/README.md +++ b/README.md @@ -92,6 +92,16 @@ The module provisions the following resources: __NOTE:__ The module works with [Terraform Cloud](https://www.terraform.io/docs/cloud/index.html). +__NOTE:__ In `auth.tf`, we added `ignore_changes = [data["mapRoles"]]` to the `kubernetes_config_map` for the following reason: +- We provision the EKS cluster and then the Kubernetes Auth ConfigMap to map additional roles/users/accounts to Kubernetes groups +- Then we wait for the cluster to become available and for the ConfigMap to get provisioned (see `data "null_data_source" "wait_for_cluster_and_kubernetes_configmap"` in `examples/complete/main.tf`) +- Then we provision a managed Node Group +- Then EKS updates the Auth ConfigMap and adds worker roles to it (for the worker nodes to join the cluster) +- Since the ConfigMap is modified outside of Terraform state, Terraform wants to update it (remove the roles that EKS added) on each `plan/apply` + +If you want to modify the Node Group (e.g. add more Node Groups to the cluster) or need to map other IAM roles to Kubernetes groups, +set the variable `kubernetes_config_map_ignore_role_changes` to `false` and re-provision the module. Then set `kubernetes_config_map_ignore_role_changes` back to `true`. + ## Usage @@ -308,6 +318,7 @@ Available targets: | endpoint_private_access | Indicates whether or not the Amazon EKS private API server endpoint is enabled. Default to AWS EKS resource and it is false | bool | `false` | no | | endpoint_public_access | Indicates whether or not the Amazon EKS public API server endpoint is enabled. Default to AWS EKS resource and it is true | bool | `true` | no | | environment | Environment, e.g. 'prod', 'staging', 'dev', 'pre-prod', 'UAT' | string | `` | no | +| kubernetes_config_map_ignore_role_changes | Set to `true` to ignore IAM role changes in the Kubernetes Auth ConfigMap | bool | `true` | no | | kubernetes_version | Desired Kubernetes master version. If you do not specify a value, the latest available version is used | string | `1.15` | no | | local_exec_interpreter | shell to use for local_exec | list(string) | `` | no | | map_additional_aws_accounts | Additional AWS account numbers to add to `config-map-aws-auth` ConfigMap | list(string) | `` | no | @@ -323,8 +334,8 @@ Available targets: | tags | Additional tags (e.g. `map('BusinessUnit','XYZ')` | map(string) | `` | no | | vpc_id | VPC ID for the EKS cluster | string | - | yes | | wait_for_cluster_command | `local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint are available as environment variable `ENDPOINT` | string | `curl --silent --fail --retry 60 --retry-delay 5 --retry-connrefused --insecure --output /dev/null $ENDPOINT/healthz` | no | -| workers_role_arns | List of Role ARNs of the worker nodes | list(string) | - | yes | -| workers_security_group_ids | Security Group IDs of the worker nodes | list(string) | - | yes | +| workers_role_arns | List of Role ARNs of the worker nodes | list(string) | `` | no | +| workers_security_group_ids | Security Group IDs of the worker nodes | list(string) | `` | no | ## Outputs @@ -337,7 +348,9 @@ Available targets: | eks_cluster_identity_oidc_issuer | The OIDC Identity issuer for the cluster | | eks_cluster_identity_oidc_issuer_arn | The OIDC Identity issuer ARN for the cluster that can be used to associate IAM roles with a service account | | eks_cluster_managed_security_group_id | Security Group ID that was created by EKS for the cluster. EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads | +| eks_cluster_role_arn | ARN of the EKS cluster IAM role | | eks_cluster_version | The Kubernetes server version of the cluster | +| kubernetes_config_map_id | ID of `aws-auth` Kubernetes ConfigMap | | security_group_arn | ARN of the EKS cluster Security Group | | security_group_id | ID of the EKS cluster Security Group | | security_group_name | Name of the EKS cluster Security Group | diff --git a/README.yaml b/README.yaml index 96d78e21..3c2efaf7 100644 --- a/README.yaml +++ b/README.yaml @@ -77,6 +77,16 @@ introduction: |- __NOTE:__ The module works with [Terraform Cloud](https://www.terraform.io/docs/cloud/index.html). + __NOTE:__ In `auth.tf`, we added `ignore_changes = [data["mapRoles"]]` to the `kubernetes_config_map` for the following reason: + - We provision the EKS cluster and then the Kubernetes Auth ConfigMap to map additional roles/users/accounts to Kubernetes groups + - Then we wait for the cluster to become available and for the ConfigMap to get provisioned (see `data "null_data_source" "wait_for_cluster_and_kubernetes_configmap"` in `examples/complete/main.tf`) + - Then we provision a managed Node Group + - Then EKS updates the Auth ConfigMap and adds worker roles to it (for the worker nodes to join the cluster) + - Since the ConfigMap is modified outside of Terraform state, Terraform wants to update it (remove the roles that EKS added) on each `plan/apply` + + If you want to modify the Node Group (e.g. add more Node Groups to the cluster) or need to map other IAM roles to Kubernetes groups, + set the variable `kubernetes_config_map_ignore_role_changes` to `false` and re-provision the module. Then set `kubernetes_config_map_ignore_role_changes` back to `true`. + # How to use this project usage: |- diff --git a/auth.tf b/auth.tf index 9f44e00c..8e6aac4f 100644 --- a/auth.tf +++ b/auth.tf @@ -32,7 +32,8 @@ locals { certificate_authority_data_map = local.certificate_authority_data_list_internal[0] certificate_authority_data = local.certificate_authority_data_map["data"] - # Add worker nodes role ARNs (could be from many worker groups) to the ConfigMap + # Add worker nodes role ARNs (could be from many un-managed worker groups) to the ConfigMap + # Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically map_worker_roles = [ for role_arn in var.workers_role_arns : { rolearn : role_arn @@ -80,8 +81,28 @@ provider "kubernetes" { load_config_file = false } +resource "kubernetes_config_map" "aws_auth_ignore_changes" { + count = var.enabled && var.apply_config_map_aws_auth && var.kubernetes_config_map_ignore_role_changes ? 1 : 0 + depends_on = [null_resource.wait_for_cluster[0]] + + metadata { + name = "aws-auth" + namespace = "kube-system" + } + + data = { + mapRoles = yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles))) + mapUsers = yamlencode(var.map_additional_iam_users) + mapAccounts = yamlencode(var.map_additional_aws_accounts) + } + + lifecycle { + ignore_changes = [data["mapRoles"]] + } +} + resource "kubernetes_config_map" "aws_auth" { - count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0 + count = var.enabled && var.apply_config_map_aws_auth && var.kubernetes_config_map_ignore_role_changes == false ? 1 : 0 depends_on = [null_resource.wait_for_cluster[0]] metadata { diff --git a/docs/terraform.md b/docs/terraform.md index 72f346a1..08ebd1cd 100644 --- a/docs/terraform.md +++ b/docs/terraform.md @@ -13,6 +13,7 @@ | endpoint_private_access | Indicates whether or not the Amazon EKS private API server endpoint is enabled. Default to AWS EKS resource and it is false | bool | `false` | no | | endpoint_public_access | Indicates whether or not the Amazon EKS public API server endpoint is enabled. Default to AWS EKS resource and it is true | bool | `true` | no | | environment | Environment, e.g. 'prod', 'staging', 'dev', 'pre-prod', 'UAT' | string | `` | no | +| kubernetes_config_map_ignore_role_changes | Set to `true` to ignore IAM role changes in the Kubernetes Auth ConfigMap | bool | `true` | no | | kubernetes_version | Desired Kubernetes master version. If you do not specify a value, the latest available version is used | string | `1.15` | no | | local_exec_interpreter | shell to use for local_exec | list(string) | `` | no | | map_additional_aws_accounts | Additional AWS account numbers to add to `config-map-aws-auth` ConfigMap | list(string) | `` | no | @@ -28,8 +29,8 @@ | tags | Additional tags (e.g. `map('BusinessUnit','XYZ')` | map(string) | `` | no | | vpc_id | VPC ID for the EKS cluster | string | - | yes | | wait_for_cluster_command | `local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint are available as environment variable `ENDPOINT` | string | `curl --silent --fail --retry 60 --retry-delay 5 --retry-connrefused --insecure --output /dev/null $ENDPOINT/healthz` | no | -| workers_role_arns | List of Role ARNs of the worker nodes | list(string) | - | yes | -| workers_security_group_ids | Security Group IDs of the worker nodes | list(string) | - | yes | +| workers_role_arns | List of Role ARNs of the worker nodes | list(string) | `` | no | +| workers_security_group_ids | Security Group IDs of the worker nodes | list(string) | `` | no | ## Outputs @@ -42,7 +43,9 @@ | eks_cluster_identity_oidc_issuer | The OIDC Identity issuer for the cluster | | eks_cluster_identity_oidc_issuer_arn | The OIDC Identity issuer ARN for the cluster that can be used to associate IAM roles with a service account | | eks_cluster_managed_security_group_id | Security Group ID that was created by EKS for the cluster. EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads | +| eks_cluster_role_arn | ARN of the EKS cluster IAM role | | eks_cluster_version | The Kubernetes server version of the cluster | +| kubernetes_config_map_id | ID of `aws-auth` Kubernetes ConfigMap | | security_group_arn | ARN of the EKS cluster Security Group | | security_group_id | ID of the EKS cluster Security Group | | security_group_name | Name of the EKS cluster Security Group | diff --git a/examples/complete/fixtures.us-east-2.tfvars b/examples/complete/fixtures.us-east-2.tfvars index 19cd3480..5853698f 100644 --- a/examples/complete/fixtures.us-east-2.tfvars +++ b/examples/complete/fixtures.us-east-2.tfvars @@ -8,28 +8,22 @@ stage = "test" name = "eks" -instance_type = "t2.small" - -health_check_type = "EC2" - -wait_for_capacity_timeout = "10m" - -max_size = 3 +kubernetes_version = "1.15" -min_size = 2 +oidc_provider_enabled = true -autoscaling_policies_enabled = true +enabled_cluster_log_types = ["audit"] -cpu_utilization_high_threshold_percent = 80 +cluster_log_retention_period = 7 -cpu_utilization_low_threshold_percent = 20 +instance_types = ["t3.small"] -associate_public_ip_address = true +desired_size = 2 -kubernetes_version = "1.15" +max_size = 3 -oidc_provider_enabled = true +min_size = 2 -enabled_cluster_log_types = ["audit"] +disk_size = 20 -cluster_log_retention_period = 7 +kubernetes_labels = {} diff --git a/examples/complete/main.tf b/examples/complete/main.tf index ae0d48e6..76656e06 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -51,33 +51,6 @@ module "subnets" { tags = local.tags } -module "eks_workers" { - source = "git::https://github.com/cloudposse/terraform-aws-eks-workers.git?ref=tags/0.12.0" - namespace = var.namespace - stage = var.stage - name = var.name - attributes = var.attributes - tags = var.tags - instance_type = var.instance_type - eks_worker_ami_name_filter = local.eks_worker_ami_name_filter - vpc_id = module.vpc.vpc_id - subnet_ids = module.subnets.public_subnet_ids - associate_public_ip_address = var.associate_public_ip_address - health_check_type = var.health_check_type - min_size = var.min_size - max_size = var.max_size - wait_for_capacity_timeout = var.wait_for_capacity_timeout - cluster_name = module.label.id - cluster_endpoint = module.eks_cluster.eks_cluster_endpoint - cluster_certificate_authority_data = module.eks_cluster.eks_cluster_certificate_authority_data - cluster_security_group_id = module.eks_cluster.security_group_id - - # Auto-scaling policies and CloudWatch metric alarms - autoscaling_policies_enabled = var.autoscaling_policies_enabled - cpu_utilization_high_threshold_percent = var.cpu_utilization_high_threshold_percent - cpu_utilization_low_threshold_percent = var.cpu_utilization_low_threshold_percent -} - module "eks_cluster" { source = "../../" namespace = var.namespace @@ -93,7 +66,33 @@ module "eks_cluster" { oidc_provider_enabled = var.oidc_provider_enabled enabled_cluster_log_types = var.enabled_cluster_log_types cluster_log_retention_period = var.cluster_log_retention_period +} + +# Ensure ordering of resource creation to eliminate the race conditions when applying the Kubernetes Auth ConfigMap. +# Do not create Node Group before the EKS cluster is created and the `aws-auth` Kubernetes ConfigMap is applied. +# Otherwise, EKS will create the ConfigMap first and add the managed node role ARNs to it, +# and the kubernetes provider will throw an error that the ConfigMap already exists (because it can't update the map, only create it). +# If we create the ConfigMap first (to add additional roles/users/accounts), EKS will just update it by adding the managed node role ARNs. +data "null_data_source" "wait_for_cluster_and_kubernetes_configmap" { + inputs = { + cluster_name = module.eks_cluster.eks_cluster_id + kubernetes_config_map_id = module.eks_cluster.kubernetes_config_map_id + } +} - workers_role_arns = [module.eks_workers.workers_role_arn] - workers_security_group_ids = [module.eks_workers.security_group_id] +module "eks_node_group" { + source = "git::https://github.com/cloudposse/terraform-aws-eks-node-group.git?ref=tags/0.4.0" + namespace = var.namespace + stage = var.stage + name = var.name + attributes = var.attributes + tags = var.tags + subnet_ids = module.subnets.public_subnet_ids + cluster_name = data.null_data_source.wait_for_cluster_and_kubernetes_configmap.outputs["cluster_name"] + instance_types = var.instance_types + desired_size = var.desired_size + min_size = var.min_size + max_size = var.max_size + kubernetes_labels = var.kubernetes_labels + disk_size = var.disk_size } diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf index f9036cd7..1d59915e 100644 --- a/examples/complete/outputs.tf +++ b/examples/complete/outputs.tf @@ -53,87 +53,37 @@ output "eks_cluster_identity_oidc_issuer" { value = module.eks_cluster.eks_cluster_identity_oidc_issuer } -output "workers_launch_template_id" { - description = "ID of the launch template" - value = module.eks_workers.launch_template_id -} - -output "workers_launch_template_arn" { - description = "ARN of the launch template" - value = module.eks_workers.launch_template_arn -} - -output "workers_autoscaling_group_id" { - description = "The AutoScaling Group ID" - value = module.eks_workers.autoscaling_group_id -} - -output "workers_autoscaling_group_name" { - description = "The AutoScaling Group name" - value = module.eks_workers.autoscaling_group_name -} - -output "workers_autoscaling_group_arn" { - description = "ARN of the AutoScaling Group" - value = module.eks_workers.autoscaling_group_arn -} - -output "workers_autoscaling_group_min_size" { - description = "The minimum size of the AutoScaling Group" - value = module.eks_workers.autoscaling_group_min_size -} - -output "workers_autoscaling_group_max_size" { - description = "The maximum size of the AutoScaling Group" - value = module.eks_workers.autoscaling_group_max_size -} - -output "workers_autoscaling_group_desired_capacity" { - description = "The number of Amazon EC2 instances that should be running in the group" - value = module.eks_workers.autoscaling_group_desired_capacity -} - -output "workers_autoscaling_group_default_cooldown" { - description = "Time between a scaling activity and the succeeding scaling activity" - value = module.eks_workers.autoscaling_group_default_cooldown -} - -output "workers_autoscaling_group_health_check_grace_period" { - description = "Time after instance comes into service before checking health" - value = module.eks_workers.autoscaling_group_health_check_grace_period -} - -output "workers_autoscaling_group_health_check_type" { - description = "`EC2` or `ELB`. Controls how health checking is done" - value = module.eks_workers.autoscaling_group_health_check_type +output "eks_cluster_managed_security_group_id" { + description = "Security Group ID that was created by EKS for the cluster. EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads" + value = module.eks_cluster.eks_cluster_managed_security_group_id } -output "workers_security_group_id" { - description = "ID of the worker nodes Security Group" - value = module.eks_workers.security_group_id +output "eks_node_group_role_arn" { + description = "ARN of the worker nodes IAM role" + value = module.eks_node_group.eks_node_group_role_arn } -output "workers_security_group_arn" { - description = "ARN of the worker nodes Security Group" - value = module.eks_workers.security_group_arn +output "eks_node_group_role_name" { + description = "Name of the worker nodes IAM role" + value = module.eks_node_group.eks_node_group_role_name } -output "workers_security_group_name" { - description = "Name of the worker nodes Security Group" - value = module.eks_workers.security_group_name +output "eks_node_group_id" { + description = "EKS Cluster name and EKS Node Group name separated by a colon" + value = module.eks_node_group.eks_node_group_id } -output "workers_role_arn" { - description = "ARN of the worker nodes IAM role" - value = module.eks_workers.workers_role_arn +output "eks_node_group_arn" { + description = "Amazon Resource Name (ARN) of the EKS Node Group" + value = module.eks_node_group.eks_node_group_arn } -output "workers_role_name" { - description = "Name of the worker nodes IAM role" - value = module.eks_workers.workers_role_name +output "eks_node_group_resources" { + description = "List of objects containing information about underlying resources of the EKS Node Group" + value = module.eks_node_group.eks_node_group_resources } -output "eks_cluster_managed_security_group_id" { - description = "Security Group ID that was created by EKS for the cluster. EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads" - value = module.eks_cluster.eks_cluster_managed_security_group_id +output "eks_node_group_status" { + description = "Status of the EKS Node Group" + value = module.eks_node_group.eks_node_group_status } diff --git a/examples/complete/variables.tf b/examples/complete/variables.tf index 82108d19..fae76e50 100644 --- a/examples/complete/variables.tf +++ b/examples/complete/variables.tf @@ -41,57 +41,12 @@ variable "tags" { description = "Additional tags (e.g. `map('BusinessUnit`,`XYZ`)" } -variable "instance_type" { - type = string - description = "Instance type to launch" -} - variable "kubernetes_version" { type = string - default = "" + default = "1.15" description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used" } -variable "health_check_type" { - type = string - description = "Controls how health checking is done. Valid values are `EC2` or `ELB`" -} - -variable "associate_public_ip_address" { - type = bool - description = "Associate a public IP address with an instance in a VPC" -} - -variable "max_size" { - type = number - description = "The maximum size of the AutoScaling Group" -} - -variable "min_size" { - type = number - description = "The minimum size of the AutoScaling Group" -} - -variable "wait_for_capacity_timeout" { - type = string - description = "A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. Setting this to '0' causes Terraform to skip all Capacity Waiting behavior" -} - -variable "autoscaling_policies_enabled" { - type = bool - description = "Whether to create `aws_autoscaling_policy` and `aws_cloudwatch_metric_alarm` resources to control Auto Scaling" -} - -variable "cpu_utilization_high_threshold_percent" { - type = number - description = "Worker nodes AutoScaling Group CPU utilization high threshold percent" -} - -variable "cpu_utilization_low_threshold_percent" { - type = number - description = "Worker nodes AutoScaling Group CPU utilization low threshold percent" -} - variable "enabled_cluster_log_types" { type = list(string) default = [] @@ -145,3 +100,34 @@ variable "local_exec_interpreter" { default = ["/bin/sh", "-c"] description = "shell to use for local_exec" } + +variable "disk_size" { + type = number + description = "Disk size in GiB for worker nodes. Defaults to 20. Terraform will only perform drift detection if a configuration value is provided" +} + +variable "instance_types" { + type = list(string) + description = "Set of instance types associated with the EKS Node Group. Defaults to [\"t3.medium\"]. Terraform will only perform drift detection if a configuration value is provided" +} + +variable "kubernetes_labels" { + type = map(string) + description = "Key-value mapping of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed" + default = {} +} + +variable "desired_size" { + type = number + description = "Desired number of worker nodes" +} + +variable "max_size" { + type = number + description = "The maximum size of the AutoScaling Group" +} + +variable "min_size" { + type = number + description = "The minimum size of the AutoScaling Group" +} diff --git a/main.tf b/main.tf index c341b272..599c8e97 100644 --- a/main.tf +++ b/main.tf @@ -139,7 +139,8 @@ resource "aws_iam_openid_connect_provider" "default" { url = join("", aws_eks_cluster.default.*.identity.0.oidc.0.issuer) client_id_list = ["sts.amazonaws.com"] - # it's thumbprint won't change for many years :) + + # it's thumbprint won't change for many years # https://github.com/terraform-providers/terraform-provider-aws/issues/10104 thumbprint_list = ["9e99a48a9960b14926bb7f3b02e22da2b0ab7280"] } diff --git a/outputs.tf b/outputs.tf index 0ef103d4..c6e50057 100644 --- a/outputs.tf +++ b/outputs.tf @@ -52,3 +52,13 @@ output "eks_cluster_managed_security_group_id" { description = "Security Group ID that was created by EKS for the cluster. EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads" value = join("", aws_eks_cluster.default.*.vpc_config.0.cluster_security_group_id) } + +output "eks_cluster_role_arn" { + description = "ARN of the EKS cluster IAM role" + value = join("", aws_iam_role.default.*.arn) +} + +output "kubernetes_config_map_id" { + description = "ID of `aws-auth` Kubernetes ConfigMap" + value = var.kubernetes_config_map_ignore_role_changes ? join("", kubernetes_config_map.aws_auth_ignore_changes.*.id) : join("", kubernetes_config_map.aws_auth.*.id) +} diff --git a/test/src/examples_complete_test.go b/test/src/examples_complete_test.go index c79ddd91..651e47f3 100644 --- a/test/src/examples_complete_test.go +++ b/test/src/examples_complete_test.go @@ -87,34 +87,29 @@ func TestExamplesComplete(t *testing.T) { assert.Equal(t, []string{"172.16.96.0/19", "172.16.128.0/19"}, publicSubnetCidrs) // Run `terraform output` to get the value of an output variable - workersAutoscalingGroupName := terraform.Output(t, terraformOptions, "workers_autoscaling_group_name") - // Verify we're getting back the outputs we expect - assert.Contains(t, workersAutoscalingGroupName, "eg-test-eks") - - // Run `terraform output` to get the value of an output variable - workersLaunchTemplateArn := terraform.Output(t, terraformOptions, "workers_launch_template_arn") + eksClusterId := terraform.Output(t, terraformOptions, "eks_cluster_id") // Verify we're getting back the outputs we expect - assert.Contains(t, workersLaunchTemplateArn, "arn:aws:ec2:us-east-2:126450723953:launch-template") + assert.Equal(t, "eg-test-eks-cluster", eksClusterId) // Run `terraform output` to get the value of an output variable - workersSecurityGroupName := terraform.Output(t, terraformOptions, "workers_security_group_name") + eksClusterSecurityGroupName := terraform.Output(t, terraformOptions, "eks_cluster_security_group_name") // Verify we're getting back the outputs we expect - assert.Equal(t, "eg-test-eks-workers", workersSecurityGroupName) + assert.Equal(t, "eg-test-eks-cluster", eksClusterSecurityGroupName) // Run `terraform output` to get the value of an output variable - workerRoleName := terraform.Output(t, terraformOptions, "workers_role_name") + eksNodeGroupId := terraform.Output(t, terraformOptions, "eks_node_group_id") // Verify we're getting back the outputs we expect - assert.Equal(t, "eg-test-eks-workers", workerRoleName) + assert.Equal(t, "eg-test-eks-cluster:eg-test-eks-workers", eksNodeGroupId) // Run `terraform output` to get the value of an output variable - eksClusterId := terraform.Output(t, terraformOptions, "eks_cluster_id") + eksNodeGroupRoleName := terraform.Output(t, terraformOptions, "eks_node_group_role_name") // Verify we're getting back the outputs we expect - assert.Equal(t, "eg-test-eks-cluster", eksClusterId) + assert.Equal(t, "eg-test-eks-workers", eksNodeGroupRoleName) // Run `terraform output` to get the value of an output variable - eksClusterSecurityGroupName := terraform.Output(t, terraformOptions, "eks_cluster_security_group_name") + eksNodeGroupStatus := terraform.Output(t, terraformOptions, "eks_node_group_status") // Verify we're getting back the outputs we expect - assert.Equal(t, "eg-test-eks-cluster", eksClusterSecurityGroupName) + assert.Equal(t, "ACTIVE", eksNodeGroupStatus) // Wait for the worker nodes to join the cluster // https://github.com/kubernetes/client-go diff --git a/variables.tf b/variables.tf index de29e717..c5e291df 100644 --- a/variables.tf +++ b/variables.tf @@ -76,11 +76,13 @@ variable "allowed_cidr_blocks" { variable "workers_role_arns" { type = list(string) description = "List of Role ARNs of the worker nodes" + default = [] } variable "workers_security_group_ids" { type = list(string) description = "Security Group IDs of the worker nodes" + default = [] } variable "kubernetes_version" { @@ -172,3 +174,9 @@ variable "wait_for_cluster_command" { default = "curl --silent --fail --retry 60 --retry-delay 5 --retry-connrefused --insecure --output /dev/null $ENDPOINT/healthz" description = "`local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint are available as environment variable `ENDPOINT`" } + +variable "kubernetes_config_map_ignore_role_changes" { + type = bool + default = true + description = "Set to `true` to ignore IAM role changes in the Kubernetes Auth ConfigMap" +}