Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[IBCDPE-935] private worker pool module #12

Merged
merged 17 commits into from
Jul 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 14 additions & 8 deletions common-resources/policies/main.tf
Original file line number Diff line number Diff line change
@@ -1,18 +1,24 @@
resource "spacelift_policy" "enforce-tags-on-resources" {
name = "Enforce Tags On Resources - cli"
body = file("${path.module}/enforce-tags-on-resources.rego")
type = "PLAN"
labels = ["compliance", "plan", "tagging", "terraform"]
name = "Enforce Tags On Resources - cli"
body = file("${path.module}/enforce-tags-on-resources.rego")
type = "PLAN"
labels = ["compliance", "plan", "tagging", "terraform"]
description = "This policy ensures that all Terraform-managed resources adhere to tagging conventions by requiring the presence of specific tags. It denies changes to resources that lack any of these required tags, emphasizing the importance of consistent tagging for resource identification, environment management, and ownership tracking. The policy aids in maintaining order, facilitating cost allocation, security, and governance across the infrastructure."
space_id = "root"
space_id = "root"
}


resource "spacelift_policy" "cloud-spend-estimation" {
name = "Cloud Spend Estimation - cli"
body = file("${path.module}/check-estimated-cloud-spend.rego")
type = "PLAN"
name = "Cloud Spend Estimation - cli"
body = file("${path.module}/check-estimated-cloud-spend.rego")
type = "PLAN"
space_id = "root"
}


resource "spacelift_policy" "drift-detection-warning" {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we don't have the premium policy , does this work?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Defining policies in this file just makes them available to be used. It does not add it to any stacks.

If this policy were added to a stack it would always pass because of the condition in .rego file:
input.spacelift.run.drift_detection

drift_detection would always not exist or be false in all cases.

Since I had already written this I was just going to leave it in.

name = "drift-detection-warning"
body = file("${path.module}/warn-for-drift-reconciliation.rego")
type = "PLAN"
space_id = "root"
}
5 changes: 5 additions & 0 deletions common-resources/policies/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,8 @@ output "check_estimated_cloud_spend_id" {
value = spacelift_policy.cloud-spend-estimation.id
description = "The ID for this spacelift_policy"
}

output "drift_detection_warning_id" {
value = spacelift_policy.drift-detection-warning.id
description = "The ID for this spacelift_policy"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
package spacelift

warn["Drift reconciliation requires manual approval"] {
input.spacelift.run.drift_detection
}
1 change: 1 addition & 0 deletions dev/stacks/dpe-sandbox-k8s-deployments/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ module "sage-aws-eks-autoscaler" {
# ip_protocol = "-1" # semantically equivalent to all ports
# }


resource "kubernetes_namespace" "testing" {
metadata {
name = "testing-namespace"
Expand Down
6 changes: 6 additions & 0 deletions dev/stacks/dpe-sandbox-k8s-deployments/provider.tf
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,9 @@ provider "kubernetes" {
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.cluster.token
}

provider "helm" {
kubernetes {
config_path = var.kube_config_path
}
}
19 changes: 11 additions & 8 deletions main.tf
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
import {
# The initial administrative stack is created manually in the Spacelift UI, and imported
# See https://docs.spacelift.io/vendors/terraform/terraform-provider.html#proposed-workflow
# "We suggest to first manually create a single administrative stack, and then use it
# to programmatically define other stacks as necessary."
to = spacelift_stack.root_administrative_stack
id = "root-spacelift-administrative-stack"
}
# After infra is imported it can be commented out or removed. Keeping it here for reference.

# import {
# # The initial administrative stack is created manually in the Spacelift UI, and imported
# # See https://docs.spacelift.io/vendors/terraform/terraform-provider.html#proposed-workflow
# # "We suggest to first manually create a single administrative stack, and then use it
# # to programmatically define other stacks as necessary."
# to = spacelift_stack.root_administrative_stack
# id = "root-spacelift-administrative-stack"
# }

resource "spacelift_stack" "root_administrative_stack" {
github_enterprise {
namespace = "Sage-Bionetworks-Workflows"
Expand Down
21 changes: 21 additions & 0 deletions modules/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -60,3 +60,24 @@ resource "spacelift_version" "sage-aws-eks-autoscaler-version" {
module_id = spacelift_module.sage-aws-eks-autoscaler.id
version_number = "0.2.2"
}

resource "spacelift_module" "spacelift-private-workerpool" {
github_enterprise {
namespace = "Sage-Bionetworks-Workflows"
id = "sage-bionetworks-workflows-gh"
}

name = "spacelift-private-workerpool"
terraform_provider = "aws"
administrative = false
branch = "ibcdpe-935-vpc-updates"
description = "Module for the spacelift private workerpool helm chart which deploys the K8s operator"
repository = "eks-stack"
project_root = "modules/spacelift-private-worker"
space_id = "root"
}

resource "spacelift_version" "spacelift-private-workerpool-version" {
module_id = spacelift_module.spacelift-private-workerpool.id
version_number = "0.1.3"
}
62 changes: 62 additions & 0 deletions modules/spacelift-private-worker/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# Purpose
This module is used to create helm release for spacelift private workers. It follows
the instructions outlined at <https://docs.spacelift.io/concepts/worker-pools/kubernetes-workers>.


Spacelift private workers are required in order to use `Drift Detection`. Documentation
on this: https://docs.spacelift.io/concepts/stack/drift-detection

Also to note: In order to use private workers you must have the enterprise plan of
spacelift where there is a charge for each private worker being used.


## Examples

When deploying the private workerpool a 2-step process is required (Unless more time is
spent to figure out a 1-step process). The process is as follows:

1) Add the module and deploy it to your stack with `create-worker-pool = false`
2) Change the bool to `true` and deploy again

The reason for this is that the `helm chart` that deploy this to the K8s cluster needs
to first install CRDs (Custom resource definitions) into the cluster. Once those are
created then we can create the resource definition for the worker pool that specifies
how many instances and with what settings to run the worker pool under.

```
module "spacelift-private-workerpool" {
source = "spacelift.io/sagebionetworks/spacelift-private-workerpool/aws"
version = "0.1.3"
cluster_name = var.cluster_name
# Deployment steps:
# Deploy with this as false in order to create the K8s CRD
# Create the required secrets
# Deploy with this as true in order to create the workerpool
create-worker-pool = false
}
```

## What is left for production
If this is going to be used for a production use case the secret management will need
to be revisited. The helm chart assumes that a kubernetes secret exists. Here is how
to create it with the kubectl CLI:

```
SPACELIFT_WP_TOKEN=<enter-token>
SPACELIFT_WP_PRIVATE_KEY=<enter-base64-encoded-key>

kubectl apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: test-workerpool
type: Opaque
stringData:
token: ${SPACELIFT_WP_TOKEN}
privateKey: ${SPACELIFT_WP_PRIVATE_KEY}
EOF
```

We would likely want the secret to be stored in AWS secret manager and access via:
<https://docs.aws.amazon.com/secretsmanager/latest/userguide/integrating_csi_driver.html>

25 changes: 25 additions & 0 deletions modules/spacelift-private-worker/data.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
data "aws_eks_cluster" "cluster" {
name = var.cluster_name
}

data "aws_eks_cluster_auth" "cluster" {
name = var.cluster_name
}

# This will probable be manually created in the AWS console to prevent the token from being stored in the repo
# TODO: Some more work is needed to integrate with https://docs.aws.amazon.com/secretsmanager/latest/userguide/integrating_csi_driver.html
# For an MVP a kubernetes secret can be created manually
# data "aws_secretsmanager_secret" "worker-pool-token" {
# name = "spacelift_worker_pool_token"
# }
# data "aws_secretsmanager_secret" "worker-pool-private-key" {
# name = "spacelift_worker_pool_private_key"
# }

# data "aws_secretsmanager_secret_version" "worker-pool-token-secret" {
# secret_id = data.aws_secretsmanager_secret.worker-pool-token.id
# }

# data "aws_secretsmanager_secret_version" "worker-pool-private-key-secret" {
# secret_id = data.aws_secretsmanager_secret.worker-pool-private-key.id
# }
65 changes: 65 additions & 0 deletions modules/spacelift-private-worker/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
resource "kubernetes_namespace" "spacelift-workerpool" {
metadata {
name = "spacelift-workerpool"
}
}


resource "helm_release" "spacelift-workerpool" {
name = "spacelift-workerpool-controller"
repository = "https://downloads.spacelift.io/helm"
chart = "spacelift-workerpool-controller"
namespace = "spacelift-workerpool"
version = "0.24.0"
depends_on = [kubernetes_namespace.spacelift-workerpool]
}

resource "kubernetes_manifest" "test-workerpool" {
// This is being conditionally created because of the required order of operations
// The CRD must be created before the workerpool, so we need to wait for the helm release to be created
count = var.create-worker-pool ? 1 : 0

depends_on = [
helm_release.spacelift-workerpool
]

manifest = {
apiVersion = "workers.spacelift.io/v1beta1"
kind = "WorkerPool"
metadata = {
name = "test-workerpool"
namespace = "spacelift-workerpool"
}
spec = {
poolSize = 2
token = {
secretKeyRef = {
name = "test-workerpool"
key = "token"
}
}
privateKey = {
secretKeyRef = {
name = "test-workerpool"
key = "privateKey"
}
}
}
}
}

# How to create a K8 resource for the spacelift secrets:

# SPACELIFT_WP_TOKEN=<enter-token>
# SPACELIFT_WP_PRIVATE_KEY=<enter-base64-encoded-key>

# kubectl apply -f - <<EOF
# apiVersion: v1
# kind: Secret
# metadata:
# name: test-workerpool
# type: Opaque
# stringData:
# token: ${SPACELIFT_WP_TOKEN}
# privateKey: ${SPACELIFT_WP_PRIVATE_KEY}
# EOF
16 changes: 16 additions & 0 deletions modules/spacelift-private-worker/provider.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
provider "aws" {
region = var.region
}

provider "kubernetes" {
config_path = var.kube_config_path
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.cluster.token
}

provider "helm" {
kubernetes {
config_path = var.kube_config_path
}
}
23 changes: 23 additions & 0 deletions modules/spacelift-private-worker/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
variable "cluster_name" {
description = "Name of K8 cluster"
type = string
}

variable "kube_config_path" {
description = "Kube config path"
type = string
default = "~/.kube/config"
}

variable "region" {
description = "AWS region"
type = string
default = "us-east-1"
}


variable "create-worker-pool" {
description = "Determines if a workerpool should be created"
type = bool
default = false
}