Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[examples] add terraform example with eks add-ons required for ebs/pvc #41

Merged
merged 2 commits into from
Nov 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 62 additions & 0 deletions examples/from-private-vpc/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,68 @@ module "eks" {

}


################################################################################
# Install EKS ADD-ONs with necessary IAM resources
# (ebs-csi, vpc-cni, core-dns, proxy)
################################################################################

module "vpc_cni_ipv4_irsa_role" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "5.17.0"

role_name = "${var.cluster_name}-vpc-cni"
attach_vpc_cni_policy = true
vpc_cni_enable_ipv4 = true
vpc_cni_enable_ipv6 = true

oidc_providers = {
ex = {
provider_arn = module.eks.oidc_provider_arn
namespace_service_accounts = ["kube-system:aws-node"]
}
}

}

data "aws_eks_addon_version" "vpc-cni" {
addon_name = "vpc-cni"
kubernetes_version = var.cluster_version
}

data "aws_eks_addon_version" "kube-proxy" {
addon_name = "kube-proxy"
kubernetes_version = var.cluster_version
}

data "aws_eks_addon_version" "core-dns" {
addon_name = "coredns"
kubernetes_version = var.cluster_version
}

resource "aws_eks_addon" "vpc-cni" {
cluster_name = data.aws_eks_cluster.this.id
addon_name = "vpc-cni"
addon_version = data.aws_eks_addon_version.vpc-cni.version
resolve_conflicts = "OVERWRITE"

service_account_role_arn = module.vpc_cni_ipv4_irsa_role.iam_role_arn
}

resource "aws_eks_addon" "core-dns" {
cluster_name = module.eks.cluster_id
addon_name = "coredns"
addon_version = data.aws_eks_addon_version.core-dns.version
resolve_conflicts = "OVERWRITE"
}

resource "aws_eks_addon" "kube-proxy" {
cluster_name = module.eks.cluster_id
addon_name = "kube-proxy"
addon_version = data.aws_eks_addon_version.kube-proxy.version
resolve_conflicts = "OVERWRITE"
}

################################################################################
# Create aws-auth configmap
# (the eks module recently removed their support for aws-auth management (>=18))
Expand Down
21 changes: 21 additions & 0 deletions examples/from-scratch-with-eks-addon/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Create an Ocean Spark cluster from scratch

This example shows how to create a VPC and an EKS cluster inside of it along with EKS Add-ons required for recent versions (1.25+) of k8s.
The ADD-ONs installed are EBS CSI, VPC-CNI, KUBE-PROXY, CORE-DNS.
The EKS cluster is then imported into Ocean and Ocean Spark.

## Details about the VPC

In this example, the VPC is a "private VPC". It contains:
* private subnets using a NAT gateway for egress. That's where the nodes and pods will go.
* public subnets. That's where the load balancers and other exposed public IPs will go.

Additionally, the VPC has the following tags to be suitable for an EKS cluster:
* `kubernetes.io/cluster/<eks-cluster-name> = shared` on the VPC itself, where `<eks-cluster-name>` is the name of the EKS cluster that will use this VPC. This tag should not be necessary since Kubernetes 1.19. We recommend to add it anyway.
* `kubernetes.io/cluster/<eks-cluster-name> = shared` on all subnets.
* `kubernetes.io/role/elb = 1` on all public subnets.
* `kubernetes.io/role/internal-elb = 1` on all private subnets.

## Using the Terraform script

All required inputs are described in `variables.tf`.
308 changes: 308 additions & 0 deletions examples/from-scratch-with-eks-addon/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,308 @@
provider "aws" {
region = var.aws_region
profile = var.aws_profile
}

################################################################################
# Create VPC
################################################################################

data "aws_availability_zones" "available" {}

locals {
public_1 = cidrsubnet(var.vpc_cidr, 2, 0)
public_2 = cidrsubnet(var.vpc_cidr, 2, 1)
private_1 = cidrsubnet(var.vpc_cidr, 2, 2)
private_2 = cidrsubnet(var.vpc_cidr, 2, 3)
}

module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 2.70"

create_vpc = true
name = var.vpc_name
cidr = var.vpc_cidr
azs = data.aws_availability_zones.available.names
private_subnets = [local.private_1, local.private_2]
public_subnets = [local.public_1, local.public_2]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
enable_dns_support = true
enable_s3_endpoint = true

tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared",
}

public_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}

private_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
}

################################################################################
# Create EKS cluster
################################################################################

module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 18.0"

cluster_name = var.cluster_name
cluster_version = var.cluster_version

cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
create_cloudwatch_log_group = false

vpc_id = module.vpc.vpc_id
subnet_ids = concat(module.vpc.public_subnets, module.vpc.private_subnets)

self_managed_node_groups = {
# This node group is needed upon cluster creation so that the controller pods enabling
# Ocean and Ocean Spark functionalities can be scheduled.
bootstrap = {
instance_type = "c5.large"
max_size = 5
desired_size = 1
min_size = 0
subnet_ids = module.vpc.private_subnets
}
}

node_security_group_additional_rules = {
egress_all = {
description = "Egress from nodes to the Internet, all protocols and ports"
protocol = "-1"
from_port = 0
to_port = 0
type = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
ingress_self_all_to_all = {
description = "Node to node all traffic"
protocol = "-1"
from_port = 0
to_port = 0
type = "ingress"
self = true
}
}

}

################################################################################
# Install EKS ADD-ONs with necessary IAM resources
# (ebs-csi, vpc-cni, core-dns, proxy)
################################################################################

module "ebs_csi_irsa_role" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "5.17.0"

role_name = "${var.cluster_name}-ebs-csi-controller"
attach_ebs_csi_policy = true

attach_vpc_cni_policy = true
vpc_cni_enable_ipv4 = true

oidc_providers = {
main = {
provider_arn = module.eks.oidc_provider_arn
namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"]
}
}
}

module "vpc_cni_ipv4_irsa_role" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "5.17.0"

role_name = "${var.cluster_name}-vpc-cni"
attach_vpc_cni_policy = true
vpc_cni_enable_ipv4 = true
vpc_cni_enable_ipv6 = true

oidc_providers = {
ex = {
provider_arn = module.eks.oidc_provider_arn
namespace_service_accounts = ["kube-system:aws-node"]
}
}

}

data "aws_eks_addon_version" "ebs_csi" {
addon_name = "aws-ebs-csi-driver"
kubernetes_version = var.cluster_version
}

data "aws_eks_addon_version" "vpc-cni" {
addon_name = "vpc-cni"
kubernetes_version = var.cluster_version
}

data "aws_eks_addon_version" "kube-proxy" {
addon_name = "kube-proxy"
kubernetes_version = var.cluster_version
}

data "aws_eks_addon_version" "core-dns" {
addon_name = "coredns"
kubernetes_version = var.cluster_version
}

resource "aws_eks_addon" "ebs_csi" {
cluster_name = data.aws_eks_cluster.this.id
addon_name = "aws-ebs-csi-driver"
addon_version = data.aws_eks_addon_version.ebs_csi.version
resolve_conflicts = "OVERWRITE"

service_account_role_arn = module.ebs_csi_irsa_role.iam_role_arn
}
resource "aws_eks_addon" "vpc-cni" {
cluster_name = data.aws_eks_cluster.this.id
addon_name = "vpc-cni"
addon_version = data.aws_eks_addon_version.vpc-cni.version
resolve_conflicts = "OVERWRITE"

service_account_role_arn = module.vpc_cni_ipv4_irsa_role.iam_role_arn
}

resource "aws_eks_addon" "core-dns" {
cluster_name = module.eks.cluster_id
addon_name = "coredns"
addon_version = data.aws_eks_addon_version.core-dns.version
resolve_conflicts = "OVERWRITE"
}

resource "aws_eks_addon" "kube-proxy" {
cluster_name = module.eks.cluster_id
addon_name = "kube-proxy"
addon_version = data.aws_eks_addon_version.kube-proxy.version
resolve_conflicts = "OVERWRITE"
}

################################################################################
# Create aws-auth configmap
# (the eks module recently removed their support for aws-auth management (>=18))
################################################################################

data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_id
}

locals {
kubeconfig = yamlencode({
apiVersion = "v1"
kind = "Config"
current-context = "terraform"
clusters = [{
name = module.eks.cluster_id
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
token = data.aws_eks_cluster_auth.this.token
}
}]
})
}

resource "null_resource" "patch" {
triggers = {
kubeconfig = base64encode(local.kubeconfig)
cmd_patch = "echo \"${module.eks.aws_auth_configmap_yaml}\" | kubectl apply --kubeconfig <(echo $KUBECONFIG | base64 --decode) -f -"
}

provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = self.triggers.kubeconfig
}
command = self.triggers.cmd_patch
}
}

################################################################################
# Import EKS cluster into Ocean
################################################################################

provider "spotinst" {
token = var.spotinst_token
account = var.spotinst_account
}

module "ocean-aws-k8s" {
source = "spotinst/ocean-aws-k8s/spotinst"
version = "0.2.3"

cluster_name = module.eks.cluster_id
region = var.aws_region
subnet_ids = module.vpc.private_subnets
worker_instance_profile_arn = module.eks.self_managed_node_groups["bootstrap"].iam_instance_profile_arn
security_groups = [module.eks.node_security_group_id]

max_scale_down_percentage = 100

shutdown_hours = {
time_windows = var.shutdown_time_windows,
is_enabled = var.enable_shutdown_hours
}
}

provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}

module "ocean-controller" {
source = "spotinst/ocean-controller/spotinst"
version = "0.43.0"

spotinst_token = var.spotinst_token
spotinst_account = var.spotinst_account

cluster_identifier = module.eks.cluster_id
}

################################################################################
# Import Ocean cluster into Ocean Spark
################################################################################
module "ocean-spark" {
source = "../.."

ocean_cluster_id = module.ocean-aws-k8s.ocean_id

depends_on = [
module.ocean-aws-k8s,
module.ocean-controller,
]

cluster_config = {
cluster_name = module.eks.cluster_id
certificate_authority_data = module.eks.cluster_certificate_authority_data
server_endpoint = module.eks.cluster_endpoint
token = data.aws_eks_cluster_auth.this.token
}
}
Loading
Loading