diff --git a/main.tf b/main.tf index 4f18bd67..d208b71a 100644 --- a/main.tf +++ b/main.tf @@ -138,44 +138,14 @@ module "eks" { } } } - } - # https://docs.aws.amazon.com/eks/latest/userguide/access-policies.html#access-policy-permissions - # TODO: Additional roles that need to be created: - # AmazonEKSAdminViewPolicy? - # AmazonEKSEditPolicy - # AmazonEKSViewPolicy + } + # https://docs.aws.amazon.com/eks/latest/userguide/access-policies.html#access-policy-permissions + # TODO: Additional roles that need to be created: + # AmazonEKSAdminViewPolicy? + # AmazonEKSEditPolicy + # AmazonEKSViewPolicy } tags = var.tags } -module "ocean-controller" { - source = "spotinst/ocean-controller/spotinst" - - # Credentials. - spotinst_token = data.aws_secretsmanager_secret_version.secret_credentials.secret_string - spotinst_account = var.spotinst_account - - # Configuration. - cluster_identifier = var.cluster_name -} - -module "ocean-aws-k8s" { - source = "spotinst/ocean-aws-k8s/spotinst" - version = "1.2.0" - - depends_on = [module.eks, module.vpc] - - # Configuration - cluster_name = var.cluster_name - region = var.region - subnet_ids = module.vpc.private_subnets - worker_instance_profile_arn = tolist(data.aws_iam_instance_profiles.profile.arns)[0] - security_groups = [module.eks.node_security_group_id] - is_aggressive_scale_down_enabled = true - max_scale_down_percentage = 33 - # Overwrite Name Tag and add additional - # tags = { - # "kubernetes.io/cluster/tyu-spot-ocean" = "owned" - # } -} diff --git a/modules/internal-k8-infra/data.tf b/modules/internal-k8-infra/data.tf index f069b694..17235fcb 100644 --- a/modules/internal-k8-infra/data.tf +++ b/modules/internal-k8-infra/data.tf @@ -13,3 +13,19 @@ data "aws_secretsmanager_secret" "spotinst_token" { data "aws_secretsmanager_secret_version" "secret_credentials" { secret_id = data.aws_secretsmanager_secret.spotinst_token.id } + +# TODO: This should search for the VPC using some other value as ID would change +# on first startup and teardown/restart +data "aws_vpc" "selected" { + id = "spacelift-created-vpc vpc-0f30cfca319ebc521" +} + +data "aws_eks_node_group" "profile" { + cluster_name = var.cluster_name + node_group_name = "one" +} + +# TODO: This may be wrong +data "aws_iam_instance_profiles" "profile" { + role_name = data.aws_eks_cluster.eks_managed_node_groups["one"].iam_role_name +} diff --git a/modules/internal-k8-infra/main.tf b/modules/internal-k8-infra/main.tf index 256bbd7d..0ee4ec08 100644 --- a/modules/internal-k8-infra/main.tf +++ b/modules/internal-k8-infra/main.tf @@ -1,3 +1,31 @@ +module "kubernetes-controller" { + source = "spotinst/kubernetes-controller/ocean" + version = "0.0.2" + + # Credentials + spotinst_token = data.aws_secretsmanager_secret_version.secret_credentials.secret_string + spotinst_account = var.spotinst_account + + # Configuration + cluster_identifier = var.cluster_name +} + + +module "ocean-aws-k8s" { + source = "spotinst/ocean-aws-k8s/spotinst" + version = "1.2.0" + + # Configuration + cluster_name = var.cluster_name + region = var.region + subnet_ids = data.aws_vpc.private_subnets + worker_instance_profile_arn = data.aws_eks_node_group.profile.node_role_arn + security_groups = [data.aws_eks_cluster.cluster.node_security_group_id] + is_aggressive_scale_down_enabled = true + max_scale_down_percentage = 33 + tags = var.tags +} + resource "kubernetes_namespace" "airflow" { metadata { name = "airflow"