diff --git a/examples/from-private-vpc/main.tf b/examples/from-private-vpc/main.tf index 4605fa7..c2d21be 100644 --- a/examples/from-private-vpc/main.tf +++ b/examples/from-private-vpc/main.tf @@ -55,6 +55,68 @@ module "eks" { } + +################################################################################ +# Install EKS ADD-ONs with necessary IAM resources +# (ebs-csi, vpc-cni, core-dns, proxy) +################################################################################ + +module "vpc_cni_ipv4_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "5.17.0" + + role_name = "${var.cluster_name}-vpc-cni" + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + vpc_cni_enable_ipv6 = true + + oidc_providers = { + ex = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + +} + +data "aws_eks_addon_version" "vpc-cni" { + addon_name = "vpc-cni" + kubernetes_version = var.cluster_version +} + +data "aws_eks_addon_version" "kube-proxy" { + addon_name = "kube-proxy" + kubernetes_version = var.cluster_version +} + +data "aws_eks_addon_version" "core-dns" { + addon_name = "coredns" + kubernetes_version = var.cluster_version +} + +resource "aws_eks_addon" "vpc-cni" { + cluster_name = data.aws_eks_cluster.this.id + addon_name = "vpc-cni" + addon_version = data.aws_eks_addon_version.vpc-cni.version + resolve_conflicts = "OVERWRITE" + + service_account_role_arn = module.vpc_cni_ipv4_irsa_role.iam_role_arn +} + +resource "aws_eks_addon" "core-dns" { + cluster_name = module.eks.cluster_id + addon_name = "coredns" + addon_version = data.aws_eks_addon_version.core-dns.version + resolve_conflicts = "OVERWRITE" +} + +resource "aws_eks_addon" "kube-proxy" { + cluster_name = module.eks.cluster_id + addon_name = "kube-proxy" + addon_version = data.aws_eks_addon_version.kube-proxy.version + resolve_conflicts = "OVERWRITE" +} + ################################################################################ # Create aws-auth configmap # (the eks module recently removed their support for aws-auth management (>=18)) diff --git a/examples/from-scratch-with-eks-addon/README.md b/examples/from-scratch-with-eks-addon/README.md new file mode 100644 index 0000000..4a4a1f8 --- /dev/null +++ b/examples/from-scratch-with-eks-addon/README.md @@ -0,0 +1,21 @@ +# Create an Ocean Spark cluster from scratch + +This example shows how to create a VPC and an EKS cluster inside of it along with EKS Add-ons required for recent versions (1.25+) of k8s. +The ADD-ONs installed are EBS CSI, VPC-CNI, KUBE-PROXY, CORE-DNS. +The EKS cluster is then imported into Ocean and Ocean Spark. + +## Details about the VPC + +In this example, the VPC is a "private VPC". It contains: +* private subnets using a NAT gateway for egress. That's where the nodes and pods will go. +* public subnets. That's where the load balancers and other exposed public IPs will go. + +Additionally, the VPC has the following tags to be suitable for an EKS cluster: +* `kubernetes.io/cluster/ = shared` on the VPC itself, where `` is the name of the EKS cluster that will use this VPC. This tag should not be necessary since Kubernetes 1.19. We recommend to add it anyway. +* `kubernetes.io/cluster/ = shared` on all subnets. +* `kubernetes.io/role/elb = 1` on all public subnets. +* `kubernetes.io/role/internal-elb = 1` on all private subnets. + +## Using the Terraform script + +All required inputs are described in `variables.tf`. diff --git a/examples/from-scratch-with-eks-addon/main.tf b/examples/from-scratch-with-eks-addon/main.tf new file mode 100644 index 0000000..359da67 --- /dev/null +++ b/examples/from-scratch-with-eks-addon/main.tf @@ -0,0 +1,308 @@ +provider "aws" { + region = var.aws_region + profile = var.aws_profile +} + +################################################################################ +# Create VPC +################################################################################ + +data "aws_availability_zones" "available" {} + +locals { + public_1 = cidrsubnet(var.vpc_cidr, 2, 0) + public_2 = cidrsubnet(var.vpc_cidr, 2, 1) + private_1 = cidrsubnet(var.vpc_cidr, 2, 2) + private_2 = cidrsubnet(var.vpc_cidr, 2, 3) +} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 2.70" + + create_vpc = true + name = var.vpc_name + cidr = var.vpc_cidr + azs = data.aws_availability_zones.available.names + private_subnets = [local.private_1, local.private_2] + public_subnets = [local.public_1, local.public_2] + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + enable_dns_support = true + enable_s3_endpoint = true + + tags = { + "kubernetes.io/cluster/${var.cluster_name}" = "shared", + } + + public_subnet_tags = { + "kubernetes.io/cluster/${var.cluster_name}" = "shared" + "kubernetes.io/role/elb" = "1" + } + + private_subnet_tags = { + "kubernetes.io/cluster/${var.cluster_name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } +} + +################################################################################ +# Create EKS cluster +################################################################################ + +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "~> 18.0" + + cluster_name = var.cluster_name + cluster_version = var.cluster_version + + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true + create_cloudwatch_log_group = false + + vpc_id = module.vpc.vpc_id + subnet_ids = concat(module.vpc.public_subnets, module.vpc.private_subnets) + + self_managed_node_groups = { + # This node group is needed upon cluster creation so that the controller pods enabling + # Ocean and Ocean Spark functionalities can be scheduled. + bootstrap = { + instance_type = "c5.large" + max_size = 5 + desired_size = 1 + min_size = 0 + subnet_ids = module.vpc.private_subnets + } + } + + node_security_group_additional_rules = { + egress_all = { + description = "Egress from nodes to the Internet, all protocols and ports" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "egress" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] + } + ingress_self_all_to_all = { + description = "Node to node all traffic" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "ingress" + self = true + } + } + +} + +################################################################################ +# Install EKS ADD-ONs with necessary IAM resources +# (ebs-csi, vpc-cni, core-dns, proxy) +################################################################################ + +module "ebs_csi_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "5.17.0" + + role_name = "${var.cluster_name}-ebs-csi-controller" + attach_ebs_csi_policy = true + + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"] + } + } +} + +module "vpc_cni_ipv4_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "5.17.0" + + role_name = "${var.cluster_name}-vpc-cni" + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + vpc_cni_enable_ipv6 = true + + oidc_providers = { + ex = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + +} + +data "aws_eks_addon_version" "ebs_csi" { + addon_name = "aws-ebs-csi-driver" + kubernetes_version = var.cluster_version +} + +data "aws_eks_addon_version" "vpc-cni" { + addon_name = "vpc-cni" + kubernetes_version = var.cluster_version +} + +data "aws_eks_addon_version" "kube-proxy" { + addon_name = "kube-proxy" + kubernetes_version = var.cluster_version +} + +data "aws_eks_addon_version" "core-dns" { + addon_name = "coredns" + kubernetes_version = var.cluster_version +} + +resource "aws_eks_addon" "ebs_csi" { + cluster_name = data.aws_eks_cluster.this.id + addon_name = "aws-ebs-csi-driver" + addon_version = data.aws_eks_addon_version.ebs_csi.version + resolve_conflicts = "OVERWRITE" + + service_account_role_arn = module.ebs_csi_irsa_role.iam_role_arn +} +resource "aws_eks_addon" "vpc-cni" { + cluster_name = data.aws_eks_cluster.this.id + addon_name = "vpc-cni" + addon_version = data.aws_eks_addon_version.vpc-cni.version + resolve_conflicts = "OVERWRITE" + + service_account_role_arn = module.vpc_cni_ipv4_irsa_role.iam_role_arn +} + +resource "aws_eks_addon" "core-dns" { + cluster_name = module.eks.cluster_id + addon_name = "coredns" + addon_version = data.aws_eks_addon_version.core-dns.version + resolve_conflicts = "OVERWRITE" +} + +resource "aws_eks_addon" "kube-proxy" { + cluster_name = module.eks.cluster_id + addon_name = "kube-proxy" + addon_version = data.aws_eks_addon_version.kube-proxy.version + resolve_conflicts = "OVERWRITE" +} + +################################################################################ +# Create aws-auth configmap +# (the eks module recently removed their support for aws-auth management (>=18)) +################################################################################ + +data "aws_eks_cluster_auth" "this" { + name = module.eks.cluster_id +} + +locals { + kubeconfig = yamlencode({ + apiVersion = "v1" + kind = "Config" + current-context = "terraform" + clusters = [{ + name = module.eks.cluster_id + cluster = { + certificate-authority-data = module.eks.cluster_certificate_authority_data + server = module.eks.cluster_endpoint + } + }] + contexts = [{ + name = "terraform" + context = { + cluster = module.eks.cluster_id + user = "terraform" + } + }] + users = [{ + name = "terraform" + user = { + token = data.aws_eks_cluster_auth.this.token + } + }] + }) +} + +resource "null_resource" "patch" { + triggers = { + kubeconfig = base64encode(local.kubeconfig) + cmd_patch = "echo \"${module.eks.aws_auth_configmap_yaml}\" | kubectl apply --kubeconfig <(echo $KUBECONFIG | base64 --decode) -f -" + } + + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + environment = { + KUBECONFIG = self.triggers.kubeconfig + } + command = self.triggers.cmd_patch + } +} + +################################################################################ +# Import EKS cluster into Ocean +################################################################################ + +provider "spotinst" { + token = var.spotinst_token + account = var.spotinst_account +} + +module "ocean-aws-k8s" { + source = "spotinst/ocean-aws-k8s/spotinst" + version = "0.2.3" + + cluster_name = module.eks.cluster_id + region = var.aws_region + subnet_ids = module.vpc.private_subnets + worker_instance_profile_arn = module.eks.self_managed_node_groups["bootstrap"].iam_instance_profile_arn + security_groups = [module.eks.node_security_group_id] + + max_scale_down_percentage = 100 + + shutdown_hours = { + time_windows = var.shutdown_time_windows, + is_enabled = var.enable_shutdown_hours + } +} + +provider "kubernetes" { + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) + token = data.aws_eks_cluster_auth.this.token +} + +module "ocean-controller" { + source = "spotinst/ocean-controller/spotinst" + version = "0.43.0" + + spotinst_token = var.spotinst_token + spotinst_account = var.spotinst_account + + cluster_identifier = module.eks.cluster_id +} + +################################################################################ +# Import Ocean cluster into Ocean Spark +################################################################################ +module "ocean-spark" { + source = "../.." + + ocean_cluster_id = module.ocean-aws-k8s.ocean_id + + depends_on = [ + module.ocean-aws-k8s, + module.ocean-controller, + ] + + cluster_config = { + cluster_name = module.eks.cluster_id + certificate_authority_data = module.eks.cluster_certificate_authority_data + server_endpoint = module.eks.cluster_endpoint + token = data.aws_eks_cluster_auth.this.token + } +} diff --git a/examples/from-scratch-with-eks-addon/variables.tf b/examples/from-scratch-with-eks-addon/variables.tf new file mode 100644 index 0000000..4ea37f3 --- /dev/null +++ b/examples/from-scratch-with-eks-addon/variables.tf @@ -0,0 +1,48 @@ +variable "spotinst_token" { + type = string +} + +variable "spotinst_account" { + type = string +} + +variable "aws_region" { + type = string +} + +variable "aws_profile" { + type = string +} + +variable "cluster_name" { + type = string +} + +variable "cluster_version" { + type = string + default = "1.25" +} + +variable "vpc_name" { + type = string +} + +variable "vpc_cidr" { + type = string +} + +variable "shutdown_time_windows" { + type = list(string) + default = [ # GMT + "Fri:23:30-Mon:07:30", # Weekends + "Mon:23:30-Tue:07:30", # Weekday evenings + "Tue:23:30-Wed:07:30", + "Wed:23:30-Thu:07:30", + "Thu:23:30-Fri:07:30", + ] +} + +variable "enable_shutdown_hours" { + type = bool + default = false +} \ No newline at end of file diff --git a/examples/from-scratch-with-eks-addon/versions.tf b/examples/from-scratch-with-eks-addon/versions.tf new file mode 100644 index 0000000..1d697f0 --- /dev/null +++ b/examples/from-scratch-with-eks-addon/versions.tf @@ -0,0 +1,16 @@ +terraform { + required_providers { + spotinst = { + source = "spotinst/spotinst" + version = "~> 1.90" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.10" + } + aws = { + source = "hashicorp/aws" + version = "~> 3.75" + } + } +} diff --git a/examples/from-scratch-with-private-link/main.tf b/examples/from-scratch-with-private-link/main.tf index f006fcb..45f203d 100644 --- a/examples/from-scratch-with-private-link/main.tf +++ b/examples/from-scratch-with-private-link/main.tf @@ -180,6 +180,68 @@ module "eks" { } + +################################################################################ +# Install EKS ADD-ONs with necessary IAM resources +# (ebs-csi, vpc-cni, core-dns, proxy) +################################################################################ + +module "vpc_cni_ipv4_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "5.17.0" + + role_name = "${var.cluster_name}-vpc-cni" + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + vpc_cni_enable_ipv6 = true + + oidc_providers = { + ex = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + +} + +data "aws_eks_addon_version" "vpc-cni" { + addon_name = "vpc-cni" + kubernetes_version = var.cluster_version +} + +data "aws_eks_addon_version" "kube-proxy" { + addon_name = "kube-proxy" + kubernetes_version = var.cluster_version +} + +data "aws_eks_addon_version" "core-dns" { + addon_name = "coredns" + kubernetes_version = var.cluster_version +} + +resource "aws_eks_addon" "vpc-cni" { + cluster_name = data.aws_eks_cluster.this.id + addon_name = "vpc-cni" + addon_version = data.aws_eks_addon_version.vpc-cni.version + resolve_conflicts = "OVERWRITE" + + service_account_role_arn = module.vpc_cni_ipv4_irsa_role.iam_role_arn +} + +resource "aws_eks_addon" "core-dns" { + cluster_name = module.eks.cluster_id + addon_name = "coredns" + addon_version = data.aws_eks_addon_version.core-dns.version + resolve_conflicts = "OVERWRITE" +} + +resource "aws_eks_addon" "kube-proxy" { + cluster_name = module.eks.cluster_id + addon_name = "kube-proxy" + addon_version = data.aws_eks_addon_version.kube-proxy.version + resolve_conflicts = "OVERWRITE" +} + ################################################################################ # Create aws-auth configmap # (the eks module recently removed their support for aws-auth management (>=18)) diff --git a/examples/from-scratch/main.tf b/examples/from-scratch/main.tf index cefc9b7..066a8f9 100644 --- a/examples/from-scratch/main.tf +++ b/examples/from-scratch/main.tf @@ -194,6 +194,67 @@ module "ocean-controller" { cluster_identifier = module.eks.cluster_id } +################################################################################ +# Install EKS ADD-ONs with necessary IAM resources +# (ebs-csi, vpc-cni, core-dns, proxy) +################################################################################ + +module "vpc_cni_ipv4_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "5.17.0" + + role_name = "${var.cluster_name}-vpc-cni" + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + vpc_cni_enable_ipv6 = true + + oidc_providers = { + ex = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + +} + +data "aws_eks_addon_version" "vpc-cni" { + addon_name = "vpc-cni" + kubernetes_version = var.cluster_version +} + +data "aws_eks_addon_version" "kube-proxy" { + addon_name = "kube-proxy" + kubernetes_version = var.cluster_version +} + +data "aws_eks_addon_version" "core-dns" { + addon_name = "coredns" + kubernetes_version = var.cluster_version +} + +resource "aws_eks_addon" "vpc-cni" { + cluster_name = data.aws_eks_cluster.this.id + addon_name = "vpc-cni" + addon_version = data.aws_eks_addon_version.vpc-cni.version + resolve_conflicts = "OVERWRITE" + + service_account_role_arn = module.vpc_cni_ipv4_irsa_role.iam_role_arn +} + +resource "aws_eks_addon" "core-dns" { + cluster_name = module.eks.cluster_id + addon_name = "coredns" + addon_version = data.aws_eks_addon_version.core-dns.version + resolve_conflicts = "OVERWRITE" +} + +resource "aws_eks_addon" "kube-proxy" { + cluster_name = module.eks.cluster_id + addon_name = "kube-proxy" + addon_version = data.aws_eks_addon_version.kube-proxy.version + resolve_conflicts = "OVERWRITE" +} + ################################################################################ # Import Ocean cluster into Ocean Spark ################################################################################