Skip to content

Commit

Permalink
Enhance Kubernetes provider configuration (#119)
Browse files Browse the repository at this point in the history
  • Loading branch information
Nuru authored Jul 17, 2021
1 parent c25940a commit 51ccc3e
Show file tree
Hide file tree
Showing 18 changed files with 889 additions and 414 deletions.
4 changes: 2 additions & 2 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@

# Cloud Posse must review any changes to standard context definition,
# but some changes can be rubber-stamped.
**/*.tf @cloudposse/engineering @cloudposse/approvers
README.yaml @cloudposse/engineering @cloudposse/approvers
**/*.tf @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
README.yaml @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
README.md @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
docs/*.md @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers

Expand Down
2 changes: 1 addition & 1 deletion .github/auto-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ template: |
replacers:
# Remove irrelevant information from Renovate bot
- search: '/---\s+^#.*Renovate configuration(?:.|\n)*?This PR has been generated .*/gm'
- search: '/(?<=---\s+)+^#.*(Renovate configuration|Configuration)(?:.|\n)*?This PR has been generated .*/gm'
replace: ''
# Remove Renovate bot banner image
- search: '/\[!\[[^\]]*Renovate\][^\]]*\](\([^)]*\))?\s*\n+/gm'
Expand Down
4 changes: 3 additions & 1 deletion .github/workflows/auto-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ name: auto-release
on:
push:
branches:
- main
- master
- production

jobs:
publish:
Expand All @@ -14,7 +16,7 @@ jobs:
id: get-merged-pull-request
with:
github_token: ${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }}
# Drafts your next Release notes as Pull Requests are merged into "master"
# Drafts your next Release notes as Pull Requests are merged into "main"
- uses: release-drafter/release-drafter@v5
if: "!contains(steps.get-merged-pull-request.outputs.labels, 'no-release')"
with:
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/validate-codeowners.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
name: Validate Codeowners
on:
workflow_dispatch:

pull_request:

jobs:
Expand Down
271 changes: 134 additions & 137 deletions README.md

Large diffs are not rendered by default.

232 changes: 109 additions & 123 deletions README.yaml

Large diffs are not rendered by default.

51 changes: 40 additions & 11 deletions auth.tf
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,17 @@


locals {
yaml_quote = var.aws_auth_yaml_strip_quotes ? "" : "\""

need_kubernetes_provider = local.enabled && var.apply_config_map_aws_auth

kubeconfig_path_enabled = local.need_kubernetes_provider && var.kubeconfig_path_enabled
kube_exec_auth_enabled = local.kubeconfig_path_enabled ? false : local.need_kubernetes_provider && var.kube_exec_auth_enabled
kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : local.need_kubernetes_provider && var.kube_data_auth_enabled

exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? ["--profile", var.kube_exec_auth_aws_profile] : []
exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? ["--role-arn", var.kube_exec_auth_role_arn] : []

certificate_authority_data_list = coalescelist(aws_eks_cluster.default.*.certificate_authority, [[{ data : "" }]])
certificate_authority_data_list_internal = local.certificate_authority_data_list[0]
certificate_authority_data_map = local.certificate_authority_data_list_internal[0]
Expand Down Expand Up @@ -59,25 +70,43 @@ resource "null_resource" "wait_for_cluster" {
}
}

data "aws_eks_cluster" "eks" {
count = local.enabled && var.apply_config_map_aws_auth ? 1 : 0
name = join("", aws_eks_cluster.default.*.id)
}

# Get an authentication token to communicate with the EKS cluster.
# By default (before other roles are added to the Auth ConfigMap), you can authenticate to EKS cluster only by assuming the role that created the cluster.
# `aws_eks_cluster_auth` uses IAM credentials from the AWS provider to generate a temporary token.
# If the AWS provider assumes an IAM role, `aws_eks_cluster_auth` will use the same IAM role to get the auth token.
# https://www.terraform.io/docs/providers/aws/d/eks_cluster_auth.html
#
# You can set `kube_exec_auth_enabled` to use a different IAM Role or AWS config profile to fetch the auth token
#
data "aws_eks_cluster_auth" "eks" {
count = local.enabled && var.apply_config_map_aws_auth ? 1 : 0
count = local.kube_data_auth_enabled ? 1 : 0
name = join("", aws_eks_cluster.default.*.id)
}


provider "kubernetes" {
token = join("", data.aws_eks_cluster_auth.eks.*.token)
host = join("", data.aws_eks_cluster.eks.*.endpoint)
cluster_ca_certificate = base64decode(join("", data.aws_eks_cluster.eks.*.certificate_authority.0.data))
# Without a dummy API server configured, the provider will throw an error and prevent a "plan" from succeeding
# in situations where Terraform does not provide it with the cluster endpoint before triggering an API call.
# Since those situations are limited to ones where we do not care about the failure, such as fetching the
# ConfigMap before the cluster has been created or in preparation for deleting it, and the worst that will
# happen is that the aws-auth ConfigMap will be unnecessarily updated, it is just better to ignore the error
# so we can proceed with the task of creating or destroying the cluster.
#
# If this solution bothers you, you can disable it by setting var.dummy_kubeapi_server = null
host = local.enabled ? coalesce(aws_eks_cluster.default[0].endpoint, var.dummy_kubeapi_server) : var.dummy_kubeapi_server
cluster_ca_certificate = local.enabled ? base64decode(local.certificate_authority_data) : null
token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null
config_path = local.kubeconfig_path_enabled ? var.kubeconfig_path : null

dynamic "exec" {
for_each = local.kube_exec_auth_enabled ? ["exec"] : []
content {
api_version = "client.authentication.k8s.io/v1alpha1"
command = "aws"
args = concat(local.exec_profile, ["eks", "get-token", "--cluster-name", aws_eks_cluster.default[0].id], local.exec_role)
}
}
}

resource "kubernetes_config_map" "aws_auth_ignore_changes" {
Expand Down Expand Up @@ -110,8 +139,8 @@ resource "kubernetes_config_map" "aws_auth" {
}

data = {
mapRoles = yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles)))
mapUsers = yamlencode(var.map_additional_iam_users)
mapAccounts = yamlencode(var.map_additional_aws_accounts)
mapRoles = replace(yamlencode(distinct(concat(var.map_additional_iam_roles, local.map_worker_roles))), "\"", local.yaml_quote)
mapUsers = replace(yamlencode(var.map_additional_iam_users), "\"", local.yaml_quote)
mapAccounts = replace(yamlencode(var.map_additional_aws_accounts), "\"", local.yaml_quote)
}
}
Loading

0 comments on commit 51ccc3e

Please sign in to comment.