From f411068502f6d08798f6d60d27e96a355b87a00a Mon Sep 17 00:00:00 2001 From: Z4ck404 Date: Thu, 28 Nov 2024 15:16:31 +0100 Subject: [PATCH 1/4] [exmaples/from-scratch] refactor & update the eks module --- examples/from-scratch/main.tf | 76 ++++++++--------------------------- 1 file changed, 16 insertions(+), 60 deletions(-) diff --git a/examples/from-scratch/main.tf b/examples/from-scratch/main.tf index 33a4525..05f649c 100644 --- a/examples/from-scratch/main.tf +++ b/examples/from-scratch/main.tf @@ -67,7 +67,7 @@ module "vpc_endpoints" { module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 18.0" + version = "20.29.0" cluster_name = var.cluster_name cluster_version = var.cluster_version @@ -76,6 +76,9 @@ module "eks" { cluster_endpoint_public_access = true create_cloudwatch_log_group = false + enable_cluster_creator_admin_permissions = true + authentication_mode = "API_AND_CONFIG_MAP" + vpc_id = module.vpc.vpc_id subnet_ids = concat(module.vpc.public_subnets, module.vpc.private_subnets) @@ -113,58 +116,6 @@ module "eks" { } -################################################################################ -# Create aws-auth configmap -# (the eks module recently removed their support for aws-auth management (>=18)) -################################################################################ - -data "aws_eks_cluster_auth" "this" { - name = module.eks.cluster_id -} - -locals { - kubeconfig = yamlencode({ - apiVersion = "v1" - kind = "Config" - current-context = "terraform" - clusters = [{ - name = module.eks.cluster_id - cluster = { - certificate-authority-data = module.eks.cluster_certificate_authority_data - server = module.eks.cluster_endpoint - } - }] - contexts = [{ - name = "terraform" - context = { - cluster = module.eks.cluster_id - user = "terraform" - } - }] - users = [{ - name = "terraform" - user = { - token = data.aws_eks_cluster_auth.this.token - } - }] - }) -} - -resource "null_resource" "patch" { - triggers = { - kubeconfig = base64encode(local.kubeconfig) - cmd_patch = "echo \"${module.eks.aws_auth_configmap_yaml}\" | kubectl apply --kubeconfig <(echo $KUBECONFIG | base64 --decode) -f -" - } - - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - environment = { - KUBECONFIG = self.triggers.kubeconfig - } - command = self.triggers.cmd_patch - } -} - ################################################################################ # Import EKS cluster into Ocean ################################################################################ @@ -178,13 +129,14 @@ module "ocean-aws-k8s" { source = "spotinst/ocean-aws-k8s/spotinst" version = "1.5.0" - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name region = var.aws_region subnet_ids = module.vpc.private_subnets worker_instance_profile_arn = module.eks.self_managed_node_groups["bootstrap"].iam_instance_profile_arn security_groups = [module.eks.node_security_group_id] - max_scale_down_percentage = 100 + max_scale_down_percentage = 100 + is_aggressive_scale_down_enabled = false shutdown_hours = { time_windows = var.shutdown_time_windows, @@ -192,6 +144,10 @@ module "ocean-aws-k8s" { } } +data "aws_eks_cluster_auth" "this" { + name = module.eks.cluster_name +} + provider "helm" { kubernetes { host = module.eks.cluster_endpoint @@ -207,7 +163,7 @@ module "ocean-controller" { spotinst_token = var.spotinst_token spotinst_account = var.spotinst_account - cluster_identifier = module.eks.cluster_id + cluster_identifier = module.eks.cluster_name } ################################################################################ @@ -249,7 +205,7 @@ data "aws_eks_addon_version" "core-dns" { } resource "aws_eks_addon" "vpc-cni" { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name addon_name = "vpc-cni" addon_version = data.aws_eks_addon_version.vpc-cni.version resolve_conflicts_on_update = "OVERWRITE" @@ -258,14 +214,14 @@ resource "aws_eks_addon" "vpc-cni" { } resource "aws_eks_addon" "core-dns" { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name addon_name = "coredns" addon_version = data.aws_eks_addon_version.core-dns.version resolve_conflicts_on_update = "OVERWRITE" } resource "aws_eks_addon" "kube-proxy" { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name addon_name = "kube-proxy" addon_version = data.aws_eks_addon_version.kube-proxy.version resolve_conflicts_on_update = "OVERWRITE" @@ -285,7 +241,7 @@ module "ocean-spark" { ] cluster_config = { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name certificate_authority_data = module.eks.cluster_certificate_authority_data server_endpoint = module.eks.cluster_endpoint token = data.aws_eks_cluster_auth.this.token From 758f4cf1aeddf03f5ac52bd5b116425269dd7054 Mon Sep 17 00:00:00 2001 From: Z4ck404 Date: Thu, 28 Nov 2024 15:26:04 +0100 Subject: [PATCH 2/4] [exmaples/from-private-vpc] refactor & update the eks module --- examples/from-private-vpc/main.tf | 73 ++++++------------------------- 1 file changed, 14 insertions(+), 59 deletions(-) diff --git a/examples/from-private-vpc/main.tf b/examples/from-private-vpc/main.tf index c80ebd7..14c85fa 100644 --- a/examples/from-private-vpc/main.tf +++ b/examples/from-private-vpc/main.tf @@ -9,7 +9,7 @@ provider "aws" { module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 18.0" + version = "20.29.0" cluster_name = var.cluster_name cluster_version = var.cluster_version @@ -18,6 +18,9 @@ module "eks" { cluster_endpoint_public_access = true create_cloudwatch_log_group = false + enable_cluster_creator_admin_permissions = true + authentication_mode = "API_AND_CONFIG_MAP" + vpc_id = var.vpc_id subnet_ids = concat(var.public_subnet_ids, var.private_subnet_ids) @@ -95,7 +98,7 @@ data "aws_eks_addon_version" "core-dns" { } resource "aws_eks_addon" "vpc-cni" { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name addon_name = "vpc-cni" addon_version = data.aws_eks_addon_version.vpc-cni.version resolve_conflicts_on_update = "OVERWRITE" @@ -104,71 +107,19 @@ resource "aws_eks_addon" "vpc-cni" { } resource "aws_eks_addon" "core-dns" { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name addon_name = "coredns" addon_version = data.aws_eks_addon_version.core-dns.version resolve_conflicts_on_update = "OVERWRITE" } resource "aws_eks_addon" "kube-proxy" { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name addon_name = "kube-proxy" addon_version = data.aws_eks_addon_version.kube-proxy.version resolve_conflicts_on_update = "OVERWRITE" } -################################################################################ -# Create aws-auth configmap -# (the eks module recently removed their support for aws-auth management (>=18)) -################################################################################ - -data "aws_eks_cluster_auth" "this" { - name = module.eks.cluster_id -} - -locals { - kubeconfig = yamlencode({ - apiVersion = "v1" - kind = "Config" - current-context = "terraform" - clusters = [{ - name = module.eks.cluster_id - cluster = { - certificate-authority-data = module.eks.cluster_certificate_authority_data - server = module.eks.cluster_endpoint - } - }] - contexts = [{ - name = "terraform" - context = { - cluster = module.eks.cluster_id - user = "terraform" - } - }] - users = [{ - name = "terraform" - user = { - token = data.aws_eks_cluster_auth.this.token - } - }] - }) -} - -resource "null_resource" "patch" { - triggers = { - kubeconfig = base64encode(local.kubeconfig) - cmd_patch = "echo \"${module.eks.aws_auth_configmap_yaml}\" | kubectl apply --kubeconfig <(echo $KUBECONFIG | base64 --decode) -f -" - } - - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - environment = { - KUBECONFIG = self.triggers.kubeconfig - } - command = self.triggers.cmd_patch - } -} - ################################################################################ # Import EKS cluster into Ocean ################################################################################ @@ -182,7 +133,7 @@ module "ocean-aws-k8s" { source = "spotinst/ocean-aws-k8s/spotinst" version = "1.5.0" - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name region = var.aws_region subnet_ids = var.private_subnet_ids worker_instance_profile_arn = module.eks.self_managed_node_groups["bootstrap"].iam_instance_profile_arn @@ -196,6 +147,10 @@ module "ocean-aws-k8s" { } } +data "aws_eks_cluster_auth" "this" { + name = module.eks.cluster_name +} + provider "helm" { kubernetes { host = module.eks.cluster_endpoint @@ -211,7 +166,7 @@ module "ocean-controller" { spotinst_token = var.spotinst_token spotinst_account = var.spotinst_account - cluster_identifier = module.eks.cluster_id + cluster_identifier = module.eks.cluster_name } ################################################################################ @@ -224,7 +179,7 @@ module "ocean-spark" { ocean_cluster_id = module.ocean-aws-k8s.ocean_id cluster_config = { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name certificate_authority_data = module.eks.cluster_certificate_authority_data server_endpoint = module.eks.cluster_endpoint token = data.aws_eks_cluster_auth.this.token From a2c9af985fabc228c134400bfa33fc391694b788 Mon Sep 17 00:00:00 2001 From: Z4ck404 Date: Thu, 28 Nov 2024 15:28:29 +0100 Subject: [PATCH 3/4] [exmaples/from-scratch-with-privatelink] refactor & update the eks module --- .../from-scratch-with-private-link/main.tf | 72 ++++--------------- 1 file changed, 13 insertions(+), 59 deletions(-) diff --git a/examples/from-scratch-with-private-link/main.tf b/examples/from-scratch-with-private-link/main.tf index 0dd1bd7..e9b5ef7 100644 --- a/examples/from-scratch-with-private-link/main.tf +++ b/examples/from-scratch-with-private-link/main.tf @@ -144,7 +144,7 @@ resource "aws_security_group_rule" "ingress_https" { module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 18.0" + version = "20.29.0" cluster_name = var.cluster_name cluster_version = var.cluster_version @@ -153,6 +153,9 @@ module "eks" { cluster_endpoint_public_access = true create_cloudwatch_log_group = false + enable_cluster_creator_admin_permissions = true + authentication_mode = "API_AND_CONFIG_MAP" + vpc_id = module.vpc.vpc_id subnet_ids = concat(module.vpc.public_subnets, module.vpc.private_subnets) @@ -247,7 +250,7 @@ data "aws_eks_addon_version" "core-dns" { } resource "aws_eks_addon" "vpc-cni" { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name addon_name = "vpc-cni" addon_version = data.aws_eks_addon_version.vpc-cni.version resolve_conflicts_on_update = "OVERWRITE" @@ -256,74 +259,25 @@ resource "aws_eks_addon" "vpc-cni" { } resource "aws_eks_addon" "core-dns" { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name addon_name = "coredns" addon_version = data.aws_eks_addon_version.core-dns.version resolve_conflicts_on_update = "OVERWRITE" } resource "aws_eks_addon" "kube-proxy" { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name addon_name = "kube-proxy" addon_version = data.aws_eks_addon_version.kube-proxy.version resolve_conflicts_on_update = "OVERWRITE" } - ################################################################################ -# Create aws-auth configmap -# (the eks module recently removed their support for aws-auth management (>=18)) +# Install the aws load balancer controller ################################################################################ data "aws_eks_cluster_auth" "this" { - name = module.eks.cluster_id -} - -locals { - kubeconfig = yamlencode({ - apiVersion = "v1" - kind = "Config" - current-context = "terraform" - clusters = [{ - name = module.eks.cluster_id - cluster = { - certificate-authority-data = module.eks.cluster_certificate_authority_data - server = module.eks.cluster_endpoint - } - }] - contexts = [{ - name = "terraform" - context = { - cluster = module.eks.cluster_id - user = "terraform" - } - }] - users = [{ - name = "terraform" - user = { - token = data.aws_eks_cluster_auth.this.token - } - }] - }) -} - -resource "null_resource" "patch" { - triggers = { - kubeconfig = base64encode(local.kubeconfig) - cmd_patch = "echo \"${module.eks.aws_auth_configmap_yaml}\" | kubectl apply --kubeconfig <(echo $KUBECONFIG | base64 --decode) -f -" - } - - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - environment = { - KUBECONFIG = self.triggers.kubeconfig - } - command = self.triggers.cmd_patch - } + name = module.eks.cluster_name } - -################################################################################ -# Install the aws load balancer controller -################################################################################ provider "helm" { kubernetes { host = module.eks.cluster_endpoint @@ -337,7 +291,7 @@ module "load_balancer_controller" { cluster_identity_oidc_issuer = module.eks.cluster_oidc_issuer_url cluster_identity_oidc_issuer_arn = module.eks.oidc_provider_arn - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name enabled = true } @@ -355,7 +309,7 @@ module "ocean-aws-k8s" { source = "spotinst/ocean-aws-k8s/spotinst" version = "1.5.0" - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name region = var.aws_region subnet_ids = module.vpc.private_subnets worker_instance_profile_arn = module.eks.self_managed_node_groups["bootstrap"].iam_instance_profile_arn @@ -376,7 +330,7 @@ module "ocean-controller" { spotinst_token = var.spotinst_token spotinst_account = var.spotinst_account - cluster_identifier = module.eks.cluster_id + cluster_identifier = module.eks.cluster_name } ################################################################################ @@ -396,7 +350,7 @@ module "ocean-spark" { ingress_private_link_endpoint_service_address = aws_vpc_endpoint_service.this.service_name cluster_config = { - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name certificate_authority_data = module.eks.cluster_certificate_authority_data server_endpoint = module.eks.cluster_endpoint token = data.aws_eks_cluster_auth.this.token From ca6e95be606bb84aa8f60d6cd74054ca70788149 Mon Sep 17 00:00:00 2001 From: Z4ck404 Date: Thu, 28 Nov 2024 15:54:51 +0100 Subject: [PATCH 4/4] Clean readme and clarify module usage --- README.md | 118 ++++++++++++--------------------------------- examples/README.md | 79 ++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+), 86 deletions(-) create mode 100644 examples/README.md diff --git a/README.md b/README.md index b03f4b3..b89a2aa 100644 --- a/README.md +++ b/README.md @@ -11,16 +11,38 @@ This module imports an existing Ocean cluster into Ocean Spark. * EKS/GKE/AKS cluster integrated with Spot Ocean ## *Usage* + ```hcl +provider "aws" { + region = var.aws_region + profile = var.aws_profile +} + provider "spotinst" { token = var.spotinst_token account = var.spotinst_account } +data "aws_eks_cluster_auth" "this" { + name = "cluster-name" +} + +data "aws_eks_cluster" "this" { + name = "cluster-name" +} + module "ocean-spark" { source = "spotinst/ocean-spark/spotinst" + version = "~> 3.0.0" ocean_cluster_id = var.ocean_cluster_id + + cluster_config = { + cluster_name = "cluster-name" + certificate_authority_data = data.aws_eks_cluster.this.certificate_authority[0].data + server_endpoint = data.aws_eks_cluster.this.endpoint + token = data.aws_eks_cluster_auth.this.token + } } ``` @@ -32,95 +54,19 @@ module "ocean-spark" { ## *Examples* -It can be combined with other Terraform modules to support a number of installation methods for Ocean Spark: -1. Create an Ocean Spark cluster from scratch in your AWS account -2. Create an Ocean Spark Cluster from scratch in your AWS account with AWS Private Link support. -3. Create an Ocean Spark cluster from scratch in your GCP account -4. Create an Ocean Spark cluster from scratch in your Azure account -5. Import an existing EKS cluster into Ocean Spark -6. Import an existing GKE cluster into Ocean Spark -7. Import an existing AKS cluster into Ocean Spark -8. Import an existing Ocean cluster into Ocean Spark - - - -#### 1. Create an Ocean Spark cluster in AWS from scratch - -1. Use the [AWS `vpc` Terraform Module](https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws/latest) to create a VPC network. -2. use the [AWS `eks` Terraform Module](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest) to create an EKS cluster. -3. Use the [SPOTINST `ocean-aws-k8s` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-aws-k8s/spotinst/latest) to import the EKS cluster into Ocean -4. Use the [SPOTINST `kubernetes-controller` Terraform module](https://registry.terraform.io/modules/spotinst/kubernetes-controller/ocean/latest) to install the ocean controller deployment into kubernetes -5. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. - -Folder [`examples/from-scratch/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/tree/main/examples/from-scratch) contains a full example. - -#### 2. Create an Ocean Spark Cluster from scratch with AWS Private Link support. - -1. Use the [AWS `vpc` Terraform Module](https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws/latest) to create a VPC network. -2. Use the [AWS `eks` Terraform module](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest) to create an EKS cluster. -3. Use the [SPOTINST `ocean-aws-k8s` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-aws-k8s/spotinst/latest) to import the EKS cluster into Ocean -4. Use the [SPOTINST `kubernetes-controller` Terraform module](https://registry.terraform.io/modules/spotinst/kubernetes-controller/ocean/latest) to install the ocean controller deployment into kubernetes -5. Create the Private link required resources (NLB, VPC endpoint service and LB TargetGroup). [AWS Docs About PrivateLink](https://docs.aws.amazon.com/vpc/latest/privatelink/getting-started.html). -6. Use the [ Terraform AWS EKS LB Controller Module](https://github.com/DNXLabs/terraform-aws-eks-lb-controller) to install the aws load balancer controller in the EKS cluster. -7. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark and set the [ ingress private link input ](https://registry.terraform.io/providers/spotinst/spotinst/latest/docs/resources/ocean_spark#nestedblock--ingress--private_link) - -Folder [`examples/from-scratch-with-private-link/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/tree/main/examples/from-scratch-with-private-link) contains a full example. - -#### 3. Create an Ocean Spark cluster in GCP from scratch - -1. use the [GCP `google_container_cluster` Terraform resource](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster) to create an GKE cluster. -2. Use the [SPOTINST `spotinst_ocean_gke_import` Terraform resource](https://registry.terraform.io/providers/spotinst/spotinst/latest/docs/resources/ocean_gke_import) to import the GKE cluster into Ocean -3. Use the [SPOTINST `kubernetes-controller` Terraform module](https://registry.terraform.io/modules/spotinst/kubernetes-controller/ocean/latest) to install the ocean controller deployment into kubernetes -4. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. - -Folder [`examples/gcp-from-scratch/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/blob/main/examples/gcp-from-scratch/main.tf) contains a full example. - -#### 4. Create an Ocean Spark cluster in AKS from scratch - - -1. Use the [Azure `azurerm_virtual_network` Terraform resource](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/virtual_network) and [Azure `azurerm_subnet` Terraform resource](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/subnet) to create a VPC network -2. Use the [Azure `aks` Terraform Module](https://registry.terraform.io/modules/Azure/aks/azurerm/latest) to create an Azure cluster. -3. Use the [SPOTINST `ocean-aks-np-k8s` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-aks-np-k8s/spotinst/latest) to import the AKS cluster into Ocean -4. Use the [SPOTINST `ocean-controller` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-controller/spotinst/latest) to install the controller deployment into kubernetes -5. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. - -Folder [`examples/azure-from-scratch/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/blob/main/examples/azure-from-scratch/main.tf) contains a full example. - -#### 5. Import an existing EKS cluster - -1. Use the [SPOTINST `ocean-aws-k8s` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-aws-k8s/spotinst/latest) to import the EKS cluster into Ocean -2. Use the [SPOTINST `kubernetes-controller` Terraform module](https://registry.terraform.io/modules/spotinst/kubernetes-controller/ocean/latest) to install the ocean controller deployment into kubernetes -3. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. - -Folder [`examples/import-eks-cluster/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/tree/main/examples/import-eks-cluster) contains a full example. - -#### 6. Import an existing GKE cluster - -1. Use the [SPOTINST `spotinst_ocean_gke_import` Terraform resource](https://registry.terraform.io/providers/spotinst/spotinst/latest/docs/resources/ocean_gke_import) to import the GKE cluster into Ocean -2. Use the [SPOTINST `kubernetes-controller` Terraform module](https://registry.terraform.io/modules/spotinst/kubernetes-controller/ocean/latest) to install the ocean controller deployment into kubernetes -3. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. - -Folder [`examples/gcp-import-gke-cluster/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/blob/main/examples/gcp-import-gke-cluster/) contains a full example. - -#### 7. Import an existing AKS cluster - -1. Use the [SPOTINST `ocean-aks-np-k8s` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-aks-np-k8s/spotinst/latest) to import the AKS cluster into Ocean -2. Use the [SPOTINST `ocean-controller` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-controller/spotinst/latest) to install the controller deployment into kubernetes -3. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. - -Folder [`examples/azure-import-aks-cluster/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/blob/main/examples/azure-import-aks-cluster/) contains a full example. - - -#### 8. Import an existing Ocean cluster - -1. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. - -Folder [`examples/import-ocean-cluster/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/tree/main/examples/import-ocean-cluster) contains a full example. - +This module can be combined with other Terraform modules to support a number of installation methods for Ocean Spark: +1. [Create an Ocean Spark cluster from scratch in your AWS account](/examples/from-scratch/) +2. [Create an Ocean Spark Cluster from scratch in your AWS account with AWS Private Link support](/examples/from-scratch-with-private-link/) +3. [Create an Ocean Spark cluster from scratch in your GCP account](/examples/gcp-from-scratch/) +4. [Create an Ocean Spark cluster from scratch in your Azure account](/examples/azure-from-scratch/) +5. [Import an existing EKS cluster into Ocean Spark](/examples/import-eks-cluster/) +6. [Import an existing GKE cluster into Ocean Spark](/examples/gcp-import-gke-cluster/) +7. [Import an existing AKS cluster into Ocean Spark](/examples/azure-import-aks-cluster/) +8. [Import an existing Ocean cluster into Ocean Spark](/examples/import-ocean-cluster/) ### :warning: Before running `terraform destroy` :warning: -#### If your cluster was created with `v1` of the module or you set `deployer_namespace = spot-system`, follow those steps: +#### If your cluster was created with `v1` of the module or you set `deployer_namespace = spot-system`, follow these steps: 1- Switch your kubectl context to the targeted cluster diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000..e4ae0a1 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,79 @@ +## Examples + +#### 1. Create an Ocean Spark cluster in AWS from scratch + +1. Use the [AWS `vpc` Terraform Module](https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws/latest) to create a VPC network. +2. use the [AWS `eks` Terraform Module](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest) to create an EKS cluster. +3. Use the [SPOTINST `ocean-aws-k8s` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-aws-k8s/spotinst/latest) to import the EKS cluster into Ocean +4. Use the [SPOTINST `kubernetes-controller` Terraform module](https://registry.terraform.io/modules/spotinst/kubernetes-controller/ocean/latest) to install the ocean controller deployment into kubernetes +5. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. + +The folder [`from-scratch/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/tree/main/examples/from-scratch) contains a full example. + + +#### 2. Create an Ocean Spark Cluster from scratch with AWS Private Link support. + +1. Use the [AWS `vpc` Terraform Module](https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws/latest) to create a VPC network. +2. Use the [AWS `eks` Terraform module](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest) to create an EKS cluster. +3. Use the [SPOTINST `ocean-aws-k8s` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-aws-k8s/spotinst/latest) to import the EKS cluster into Ocean +4. Use the [SPOTINST `kubernetes-controller` Terraform module](https://registry.terraform.io/modules/spotinst/kubernetes-controller/ocean/latest) to install the ocean controller deployment into kubernetes +5. Create the Private link required resources (NLB, VPC endpoint service and LB TargetGroup). [AWS Docs About PrivateLink](https://docs.aws.amazon.com/vpc/latest/privatelink/getting-started.html). +6. Use the [ Terraform AWS EKS LB Controller Module](https://github.com/DNXLabs/terraform-aws-eks-lb-controller) to install the aws load balancer controller in the EKS cluster. +7. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark and set the [ ingress private link input ](https://registry.terraform.io/providers/spotinst/spotinst/latest/docs/resources/ocean_spark#nestedblock--ingress--private_link) + +The folder [`from-scratch-with-private-link/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/tree/main/examples/from-scratch-with-private-link) contains a full example. + + +#### 3. Create an Ocean Spark cluster in GCP from scratch + +1. use the [GCP `google_container_cluster` Terraform resource](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster) to create an GKE cluster. +2. Use the [SPOTINST `spotinst_ocean_gke_import` Terraform resource](https://registry.terraform.io/providers/spotinst/spotinst/latest/docs/resources/ocean_gke_import) to import the GKE cluster into Ocean +3. Use the [SPOTINST `kubernetes-controller` Terraform module](https://registry.terraform.io/modules/spotinst/kubernetes-controller/ocean/latest) to install the ocean controller deployment into kubernetes +4. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. + +The folder [`gcp-from-scratch/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/blob/main/examples/gcp-from-scratch/main.tf) contains a full example. + + +#### 4. Create an Ocean Spark cluster in AKS from scratch + +1. Use the [Azure `azurerm_virtual_network` Terraform resource](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/virtual_network) and [Azure `azurerm_subnet` Terraform resource](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/subnet) to create a VPC network +2. Use the [Azure `aks` Terraform Module](https://registry.terraform.io/modules/Azure/aks/azurerm/latest) to create an Azure cluster. +3. Use the [SPOTINST `ocean-aks-np-k8s` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-aks-np-k8s/spotinst/latest) to import the AKS cluster into Ocean +4. Use the [SPOTINST `ocean-controller` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-controller/spotinst/latest) to install the controller deployment into kubernetes +5. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. + +The folder [`azure-from-scratch/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/blob/main/examples/azure-from-scratch/main.tf) contains a full example. + + +#### 5. Import an existing EKS cluster + +1. Use the [SPOTINST `ocean-aws-k8s` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-aws-k8s/spotinst/latest) to import the EKS cluster into Ocean +2. Use the [SPOTINST `kubernetes-controller` Terraform module](https://registry.terraform.io/modules/spotinst/kubernetes-controller/ocean/latest) to install the ocean controller deployment into kubernetes +3. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. + +The folder [`import-eks-cluster/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/tree/main/examples/import-eks-cluster) contains a full example. + + +#### 6. Import an existing GKE cluster + +1. Use the [SPOTINST `spotinst_ocean_gke_import` Terraform resource](https://registry.terraform.io/providers/spotinst/spotinst/latest/docs/resources/ocean_gke_import) to import the GKE cluster into Ocean +2. Use the [SPOTINST `kubernetes-controller` Terraform module](https://registry.terraform.io/modules/spotinst/kubernetes-controller/ocean/latest) to install the ocean controller deployment into kubernetes +3. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. + +The folder [`examples/gcp-import-gke-cluster/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/blob/main/examples/gcp-import-gke-cluster/) contains a full example. + + +#### 7. Import an existing AKS cluster + +1. Use the [SPOTINST `ocean-aks-np-k8s` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-aks-np-k8s/spotinst/latest) to import the AKS cluster into Ocean +2. Use the [SPOTINST `ocean-controller` Terraform module](https://registry.terraform.io/modules/spotinst/ocean-controller/spotinst/latest) to install the controller deployment into kubernetes +3. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. + +The folder [`azure-import-aks-cluster/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/blob/main/examples/azure-import-aks-cluster/) contains a full example. + + +#### 8. Import an existing Ocean cluster + +1. Use the [SPOTINST `ocean-spark` Terraform module](this module) to import the cluster into Ocean Spark. + +The folder [`import-ocean-cluster/`](https://github.com/spotinst/terraform-spotinst-ocean-spark/tree/main/examples/import-ocean-cluster) contains a full example. \ No newline at end of file