Document not found (404)
+This URL is invalid, sorry. Please use the navigation bar or search to continue.
+ +diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..38957dd --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.yml linguist-detectable=true diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..9d4bf80 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,50 @@ +--- +name: mdbook +on: + push: + branches: [master] + paths: + - 'docs/**' + - 'README.md' + pull_request: + paths: + - 'docs/**' + - 'README.md' + +jobs: + build: + runs-on: ubuntu-latest + permissions: + contents: write + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + steps: + - name: Checkout codebase + uses: actions/checkout@v4 + + - name: Install latest mdbook + run: | + tag=$(curl 'https://api.github.com/repos/rust-lang/mdbook/releases/latest' | jq -r '.tag_name') + url="https://github.com/rust-lang/mdbook/releases/download/${tag}/mdbook-${tag}-x86_64-unknown-linux-gnu.tar.gz" + mkdir mdbook + curl -sSL $url | tar -xz --directory=./mdbook + echo `pwd`/mdbook >> $GITHUB_PATH + + - name: Build mdbook + run: | + cd docs + mdbook build + + - name: Deploy to GitHub Pages + run: | + git worktree add gh-pages + git config user.name "Deploy from CI" + git config user.email "" + + cd gh-pages + # Delete the ref to avoid keeping history. + git update-ref -d refs/heads/gh-pages + rm -rf * + mv ../docs/book/* . + git add . + git commit -m "Deploy $GITHUB_SHA to gh-pages" + git push --force --set-upstream origin gh-pages diff --git a/.github/workflows/molecule.yml b/.github/workflows/molecule.yml new file mode 100644 index 0000000..d12cfa3 --- /dev/null +++ b/.github/workflows/molecule.yml @@ -0,0 +1,41 @@ +name: molecule +on: workflow_dispatch + +jobs: + molecule: + runs-on: macos-10.15 + steps: + - name: Checkout codebase + uses: actions/checkout@v3 + + - name: Setup Python3 + uses: actions/setup-python@v4 + with: + python-version: 3.x + + - name: Cache pip dependencies + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: pip-ansible + + - name: Cache Vagrant box + uses: actions/cache@v3 + with: + path: ~/.vagrant.d/boxes + key: ${{ runner.os }}-vagrant-${{ hashFiles('ansible/molecule/common/molecule.yml') }} + restore-keys: | + ${{ runner.os }}-vagrant- + + - name: Install dependencies + run: | + pip install -q --disable-pip-version-check wheel + pip install -q --disable-pip-version-check ansible molecule-vagrant + env: + PIP_CACHE_DIR: ~/.cache/pip + + - name: Test common + run: molecule test -s common + working-directory: ansible + env: + ANSIBLE_FORCE_COLOR: "True" diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 0000000..70386d0 --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,22 @@ +--- +name: pre-commit +on: + push: + branches: [master] + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - name: Checkout codebase + uses: actions/checkout@v4 + + - name: Setup Python3 + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Pre-commit + uses: pre-commit/action@v3.0.0 + with: + extra_args: --all-files diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4b61716 --- /dev/null +++ b/.gitignore @@ -0,0 +1,36 @@ +.cache/* +.venv/* +cloud-config + +# secrets +certs/* +!certs/.gitkeep +*.crt +secrets.* +vault.txt + +# ansible +**/test.yml +**/testca/vault/vault* + +# terraform +**/.terraform/* +*.tfstate +*.tfstate.* +*.tfvars +tf_ansible_* + +# packer +*.pkrvars.hcl +**/builds/* + +# vagrant +**/.vagrant/* + +# nomad +input.vars + +# docs +**/site/* +**/book/* +**/public/* diff --git a/.gitleaks.toml b/.gitleaks.toml new file mode 100644 index 0000000..ae0c8c1 --- /dev/null +++ b/.gitleaks.toml @@ -0,0 +1,9 @@ +[extend] +useDefault = true + +[allowlist] +# testing CA for Vault +paths = [ + '''ansible/molecule/vault/testca''', + '''ansible/molecule/vault/testca\/\.*''', +] diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..5041bf0 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,52 @@ +default_stages: [push, commit] + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-merge-conflict + - id: check-added-large-files + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: detect-private-key + exclude: ansible/molecule/vault/testca + + - repo: https://github.com/zricethezav/gitleaks + rev: v8.18.0 + hooks: + - id: gitleaks-docker + + - repo: https://github.com/antonbabenko/pre-commit-terraform + rev: v1.83.5 + hooks: + - id: terraform_fmt + args: + - --args=-diff + - --args=-recursive + - id: terraform_validate + + - repo: local + hooks: + - id: packer_fmt + name: Packer fmt + description: Rewrites all Packer files in canonical format + language: script + entry: ./bin/packer-fmt + files: (\.pkr\.(hcl|json)|\.pkrvars\.hcl)$ + require_serial: true + + - repo: https://github.com/gruntwork-io/pre-commit + rev: v0.1.22 + hooks: + - id: packer-validate + + # - repo: https://github.com/ansible-community/ansible-lint + # rev: v6.22.0 + # hooks: + # - id: ansible-lint + # entry: ansible-lint -c ansible/.ansible-lint ansible + # pass_filenames: false diff --git a/404.html b/404.html new file mode 100644 index 0000000..bc4a047 --- /dev/null +++ b/404.html @@ -0,0 +1,221 @@ + + +
+ + +This URL is invalid, sorry. Please use the navigation bar or search to continue.
+ +Ansible playbooks are used to configure provisioned server and client nodes to +run a functional cluster. They use modular and customizable roles to setup +various software.
+ +This role installs common packages and performs standard post-provisioning such +as:
+++Note: Security hardening and installation of Docker are performed +separately in the
+common.yml
playbook.
Variable | Description | Type | Default |
---|---|---|---|
common_user | User to be created | string | debian |
common_timezone | Timezone to set | string | Asia/Singapore |
common_keyring_dir | Keyring directory path for external apt repositories | string | /etc/apt/keyrings |
common_nfs_dir | NFS share directory path | string | /mnt/storage |
common_packages | List of common packages to be installed | list(string) | See defaults.yml for full list |
common_nomad_version | Nomad version to install | string | 1.6.1-1 |
common_consul_version | Consul version to install | string | 1.15.4-1 |
common_vault_version | Vault version to install | string | 1.14.0-1 |
common_consul_template_version | Consul template version to install | string | 0.32.0-1 |
common_reset_nomad | Clear Nomad data directory | boolean | true |
common_dotfiles | List of dotfiles to be added, and their destinations | list | [] |
bw
to not install the Bitwarden CLInfs
to not create any NFS share directoriesdotfiles
to not copy any remote dotfiles/opt/nomad/data
directories to a blank slate. To disable this
+behaviour, set common_reset_nomad: false
.apt
.common_dotfiles
is used to add dotfiles from a Github repository to the host.
+For example:common_dotfiles:
+ - url: https://raw.githubusercontent.com/foo/repo/master/.vimrc
+ dest: /home/foo/.vimrc
+
+
+ This role deploys a new Consul-template instance.
+Vault-agent is used to authenticate to Vault for
+consul-template. It only requires access to the vault_agent_token_file
. This
+means consul-template requires access to Vault directories. It also requires
+access to any template destination directories (eg. Consul, Nomad TLS
+directories). As such, the role runs consul-template as root. I'm still
+considering alternatives that allow consul-template to be ran as a
+non-privileged user.
++Note: Vault and Vault-agent do not have to be installed for the role to run +successfully. However, they must be available for the consul-template service +to start without error.
+
Variable | Description | Type | Default |
---|---|---|---|
consul_template_dir | Configuration directory | string | /opt/consul-template |
vault_address | Vault instance IP address | string | ${ansible_default_ipv4.address} |
This role deploys a new Consul instance. It can deploy Consul as a server or client, +depending on the host's group name.
+For encryption, the role creates consul-template templates for:
+consul keygen
if it does not
+already existVariable | Description | Type | Default |
---|---|---|---|
consul_config_dir | Configuration directory | string | /etc/consul.d |
consul_data_dir | Data directory | string | /opt/consul |
consul_tls_dir | TLS files directory | string | ${consul_data_dir}/tls |
consul_template_config_dir | consul-template configuration file | string | /etc/consul-template |
consul_upstream_dns_address | List of upstream DNS servers for dnsmasq | ["1.1.1.1"] | |
consul_server | Start Consul in server mode | bool | true |
consul_bootstrap_expect | (server only) The expected number of servers in a cluster | number | 1 |
consul_client | Start Consul in client mode | bool | false |
consul_server_ip | (client only) Server's IP address | string | - |
consul_vault_addr | Vault server API address to use | string | https://localhost:8200 |
consul_common_name | Consul node certificate common_name | string | See below |
consul_alt_names | Consul's TLS certificate alt names | string | consul.service.consul |
consul_ip_sans | Consul's TLS certificate IP SANs | string | 127.0.0.1 |
setup_consul_watches | Set up Consul watches for healthchecks | bool | false |
consul_gotify_url | Gotify URL for sending webhook | string | "" |
consul_gotify_token | Gotify token for sending webhook | string | "" |
consul_server
and consul_agent
are mutually exclusive and cannot be both
+true
.consul_bootstrap_expect
must be the same value in all Consul servers. If the
+key is not present in the server, that server instance will not attempt to
+bootstrap the cluster.consul_server_ip
+when consul_agent
is true
.consul_common_name
is server.dc1.consul
or
+client.dc1.consul
depending on whether Consul is started in server or client
+mode.This role issues a new Vault certificate from the configured pki_int
role.
The role issues a new certificate from Vault and writes it to the host's +filesystem at a chosen path. The role logins with an existing Ansible +auth certificate with limited permissions from its configured policies.
+The role also optionally adds a consul-template template stanza to automatically +renew the certificate key pair.
+Variable | Description | Type | Default |
---|---|---|---|
issue_cert_role | Certificate role | string | client |
issue_cert_common_name | Certificate common name | string | "" |
issue_cert_ttl | Certificate TTL | string | 24h |
issue_cert_vault_addr | Vault instance address | string | https://localhost:8200 |
issue_cert_owner | Certificate key pair owner | string | "" |
issue_cert_group | Certificate key pair group | string | "" |
issue_cert_path | Certificate path | string | cert.crt |
issue_cert_key_path | Private key path | string | key.pem |
issue_cert_ca_path | CA path | string | ca.crt |
issue_cert_auth_role | Auth role to write certificate to | string | "" |
issue_cert_auth_policies | Policies to add to auth role | string | "" |
issue_cert_add_template | Add consul-template template | boolean | true |
issue_cert_consul_template_config | consul-template config file path | string | /etc/consul-template/consul-template.hcl |
issue_cert_consul_template_marker | consul-template template marker | string | # {mark} TLS |
issue_cert_service | Service to restart after consul-template renews cert | string | "" |
issue_cert_auth_*
variables are only used when issue_cert_role = "auth"
This role deploys a new Nomad instance. It can deploy Nomad as a server or client, +depending on the host's group name.
+For encryption, the role creates consul-template templates for:
+nomad operator gossip keyring generate
if it does not already existVariable | Description | Type | Default |
---|---|---|---|
nomad_config_dir | Configuration directory | string | /etc/nomad.d |
nomad_data_dir | Data directory | string | /opt/nomad |
nomad_tls_dir | TLS files directory | string | ${nomad_data_dir}/tls |
consul_template_config_dir | consul-template configuration file | string | /etc/consul-template |
nomad_register_consul | Register Nomad as a Consul service | bool | true |
nomad_vault_integration | Sets up Vault integration in server node | bool | true |
nomad_server | Start Nomad in server mode | bool | true |
nomad_bootstrap_expect | (server only) The expected number of servers in a cluster | number | 1 |
nomad_client | Start Nomad in client mode | bool | false |
nomad_server_ip | (client only) Server's IP address | string | - |
nomad_vault_addr | Vault server API address to use | string | https://localhost:8200 |
nomad_common_name | Nomad node certificate common_name | string | server.global.nomad |
nomad_alt_names | Nomad's TLS certificate alt names | string | nomad.service.consul |
nomad_ip_sans | Nomad's TLS certificate IP SANs | string | 127.0.0.1 |
cni_plugin_version | CNI plugins version | string | 1.3.0 |
nomad_server
and nomad_agent
are mutually exclusive and cannot be both
+true
.nomad_bootstrap_expect
must be the same value in all Nomad servers. If the
+key is not present in the server, that server instance will not attempt to
+bootstrap the cluster.nomad_server_ip
+when nomad_agent
is true
.nomad_common_name
is server.global.nomad
or
+client.global.nomad
depending on whether nomad is started in server or client
+mode.++Work in Progress: This role is unfinished and untested.
+
This role unseals an initialized but sealed Vault server. The unseal key shares +can be provided as:
+Variable | Description | Type | Default |
---|---|---|---|
unseal_vault_port | Configured Vault port | int | 8200 |
unseal_vault_addr | Vault HTTP address | string | http://localhost:8200 |
unseal_store | Accepts file, bitwarden | string | |
unseal_keys_files | Array of files with unseal keys | list | |
unseal_keys | Array of key shares | list | |
unseal_bw_password | Bitwarden password | string | |
unseal_bw_keys_names | List of Bitwarden secrets storing key shares | list |
This role deploys a new Vault instance and performs the required initialization. +If ran on a client node, it provisions a Vault agent instance instead.
+Vault is configured and started. If the instance is uninitialized, the role +performs first-time initialization and stores the root token and unseal key. +Only a single unseal key is supported at the moment. The secrets can be stored +in the filesystem or on Bitwarden.
+++Note: If storing in Bitwarden, the Bitwarden CLI must be installed, +configured and the
+bw_password
variable must be provided.
It then proceeds to login with the root token and setup the PKI secrets engine
+and various authentication roles with the Terraform provider. A full list of
+Terraform resources can be found at homelab/terraform/vault
.
++Warning: Any existing Vault resources in the same workspace are +destroyed permanently. Take care that the appropriate workspaces are used +when running the role on multiple Vault server instances.
+
If this role is ran on a client node or vault_setup_agent
is true
(on a
+server node), it will also provision a Vault-Agent instance. It requires an
+existing unsealed Vault server and should be run only after the Vault server has
+been setup.
Vault-agent's method of authentication to Vault is TLS certificate +authentication. Ansible will generate these certificates and write them to the +agent's auth role.
+++Note: This means Ansible requires access to Vault which it receives through +authentication using its own TLS certificates, created by Terraform during the +provisioning of the Vault server. These certificates were also written to +
+homelab/certs/
Variable | Description | Type | Default |
---|---|---|---|
vault_config_dir | Configuration directory | string | /etc/vault.d |
vault_data_dir | Restricted data directory | string | /opt/vault/data |
vault_log_dir | Restricted logs directory | string | /opt/vault/logs |
vault_tls_dir | TLS files directory | string | /opt/vault/tls |
vault_ca_cert_dir | Vault's CA certificate directory | string | /usr/share/ca-certificates/vault |
vault_server | Setup Vault server | bool | true |
vault_log_file | Audit log file | string | ${vault_log_dir}/vault.log |
vault_store_local | Copy Vault init secrets to local file | bool | true |
vault_secrets_file | File path for Vault init secrets | string | vault.txt |
vault_store_bw | Store root token in Bitwarden | bool | false |
vault_terraform_workspace | Terraform workspace | string | default |
vault_admin_password | Password for admin user | string | password |
vault_register_consul | Register Vault as a Consul service | bool | true |
vault_setup_agent | Setup Vault agent | bool | true |
vault_server_fqdn | Existing Vault server's FQDN | string | ${ansible_default_ipv4.address} |
vault_server
and vault_setup_agent
are not mutually exclusive. A host
+can have both instances running at the same time. However, there must already
+be an existing server instance if vault_server
is false
.vault_server_fqdn
is used to communicate with an existing Vault server that
+is listening on port 8200 when setting up Vault agent.This role offers two methods of storing the secrets generated (root token and +unseal key(s)) during the initial Vault initialization:
+Storing the secrets on the local filesystem is only recommended as a temporary +measure (to verify the secrets), or for testing and development. The file should +be deleted afterwards or moved to a safer location.
+++Warning: The Bitwarden storage functionality is not very robust and not +recommended at the moment. Use it with caution.
+
Storing the secrets in Bitwarden requires the following prerequisites:
+bw_password
variable must be defined and passed to Ansible safelyThe bw_get.sh
and bw_store.sh
helper scripts are used to create or update
+the secrets. Take care that the scripts will overwrite any existing secrets (of
+the same name).
Some notes when adding a new application jobspec to Nomad in
+terraform/nomad/apps
.
To place the application behind the Traefik reverse proxy, its jobspec should
+include the service.tags
:
tags = [
+ "traefik.enable=true",
+ "traefik.http.routers.app-proxy.entrypoints=https",
+ "traefik.http.routers.app-proxy.tls=true",
+ "traefik.http.routers.app-proxy.rule=Host(`app.example.tld`)",
+]
+
+This section is relevant if the application requires KV secrets from Vault. It +uses the Vault Terraform module.
+Firstly, add the relevant KV secrets to Vault.
+Next, create and add a Vault policy for read-only access to the relevant KV secrets:
+# terraform/vault/policies/nomad_app.hcl
+path "kvv2/data/prod/nomad/app" {
+ capabilities = ["read"]
+}
+
+# terraform/vault/policies.tf
+resource "vault_policy" "nomad_app" {
+ name = "nomad_app"
+ policy = file("policies/nomad_app.hcl")
+}
+
+vault
and template
blocks in the Nomad jobspec:vault {
+ policies = ["nomad_app"]
+}
+
+template {
+ data = <<EOF
+{{ with secret "kvv2/data/prod/nomad/app" }}
+AUTH="{{ .Data.data.username }}":"{{ .Data.data.password }}"
+{{ end }}
+EOF
+ destination = "secrets/auth.env"
+ env = true
+}
+
+This will access the Vault secrets and include them as the AUTH
environment
+variable in the job.
This section is relevant if the application requires access to the Postgres +database. It uses the Postgres Terraform module.
+postgres_roles
variable in
+terraform/postgres/
:postgres_roles = [
+ {
+ name = "app"
+ rotation_period = 86400
+ }
+]
+
+This will create a Postgres role and database in the running Postgres +instance, a static role in Vault for rotation of the role's credentials, and +a Vault policy to read the role's credentials.
+template
and vault
block to access the database credentials:vault {
+ policies = ["app"]
+}
+
+template {
+ data = <<EOF
+{{ with secret "postgres/static-creds/app" }}
+DATABASE_URL = "postgres://foo:{{ .Data.password }}@localhost:5432/foo?sslmode=disable"
+{{ end }}
+EOF
+ destination = "secrets/.env"
+ env = true
+}
+
+Diun allows monitoring a Docker image for new
+updates. To opt in to watching a task's Docker image, include the diun.enable
+label:
config {
+ labels = {
+ "diun.enable" = "true"
+ }
+}
+
+By default, this will only watch the current tag of the image. If the tag is
+latest
, Diun will send a notification when that tag's checksum changes.
To allow Diun to watch other tags, include additional labels:
+config {
+ labels = {
+ "diun.enable" = "true"
+ "diun.watch_repo" = "true"
+ "diun.max_tags" = 3
+ }
+}
+
+This will let Diun watch all tags in the Docker repo. It is highly recommended +to set a maximum number of tags that Diun should watch, otherwise Diun will +watch ALL tags, including older ones.
+See Diun for more information on configuring Diun.
+ +Diun is used to monitor Docker images for new +updates.
+watch:
+ workers: 10
+ schedule: "0 0 * * 5"
+ jitter: 30s
+ firstCheckNotif: false
+
+providers:
+ docker:
+ watchByDefault: false
+
+notif:
+ telegram:
+ # Telegram bot token
+ token: aabbccdd:11223344
+ # Telegram chat ID
+ chatIDs:
+ - 123456789
+ templateBody: |
+ Docker tag {{ .Entry.Image }} which you subscribed to through {{ .Entry.Provider }} provider has been released.
+
+To opt in to watching a Docker image, include the diun.enable
+Docker label:
config {
+ labels = {
+ "diun.enable" = "true"
+ }
+}
+
+By default, this will only watch the current tag of the image. If the tag is
+latest
, Diun will send a notification when that tag's checksum changes.
To allow Diun to watch other tags, include additional labels:
+config {
+ labels = {
+ "diun.enable" = "true"
+ "diun.watch_repo" = "true"
+ "diun.max_tags" = 3
+ }
+}
+
+This will let Diun watch all tags in the Docker repo. It is highly recommended +to set a maximum number of tags that Diun should watch, otherwise Diun will +watch ALL tags, including older ones.
+# manipulate images in database
+$ docker exec diun diun image list
+$ docker exec diun diun image inspect --image=[image]
+$ docker exec diun diun image remove --image=[image]
+
+# send test notification
+$ docker exec diun diun notif test
+
+books
bind mount to an existing
+calibre database with the books
+metadata.GOTIFY_DEFAULTUSER_NAME
and GOTIFY_DEFAULTUSER_PASS
with custom
+credentials.LD_SUPERUSER_NAME
and LD_SUPERUSER_PASSWORD
with custom
+credentials.AUTH_FILE
environment variable with custom credentials
+in the form username:password
.Create a password file with htpasswd
:
$ docker run \
+ --entrypoint htpasswd \
+ httpd:2 -Bbn foo password > htpasswd
+
+Login to the registry by providing the username and password given in Basic +Auth:
+$ docker login foo.example.com
+
+Our goal is to provision a Nomad, Consul and Vault cluster with one server node +and one client node. The basic provisioning flow is as follows:
+The following assumptions are made in this guide:
+Please make the necessary changes if there are any deviations from the above.
+The Proxmox builder plugin is used to create a new VM template. It supports two +different builders:
+proxmox-clone
- From an existing VM template (recommended)proxmox-iso
- From an ISO file (incomplete)We will be using the first builder. If you have an existing template to +provision, you may skip to the next section. +Otherwise, assuming that we are lacking an existing, clean VM template, we will +import a cloud image and turn it into a new template.
+++Note: It is important that the existing template must +have:
++
+- An attached cloud-init drive for the builder to add the SSH communicator +configuration
+- cloud-init installed
+- qemu-guest-agent installed
+
bin/import-cloud-image
script to import a new cloud image:$ import-cloud-image [URL]
+
+packer/base-clone
++Tip: Use the
+bin/generate-vars
script to quickly generate variable files +inpacker
andterraform
subdirectories.
auto.pkrvars.hcl
:proxmox_url = "https://<PVE_IP>:8006/api2/json"
+proxmox_username = "<user>@pam"
+proxmox_password = "<password>"
+
+clone_vm = "<cloud-image-name>"
+vm_name = "<new-template-name>"
+vm_id = 5000
+
+ssh_username = "debian"
+ssh_public_key_path = "/path/to/public/key"
+ssh_private_key_path = "/path/to/private/key"
+
+$ packer validate -var-file="auto.pkrvars.hcl" .
+$ packer build -var-file="auto.pkrvars.hcl" .
+
+Packer will create a new base image and use the Ansible post-provisioner to +install and configure software (eg. Docker, Nomad, Consul and Vault). For more +details, see Packer.
+We are using the +bpg/proxmox +provider to provision virtual machines from our Packer templates.
+terraform/cluster
terraform.tfvars
:proxmox_ip = "https://<PVE_IP>:8006/api2/json"
+proxmox_api_token = "<API_TOKEN>"
+
+template_id = 5000
+ip_gateway = "10.10.10.1"
+
+servers = [
+ {
+ name = "server"
+ id = 110
+ cores = 2
+ sockets = 2
+ memory = 4096
+ disk_size = 10
+ ip_address = "10.10.10.110/24"
+ }
+]
+
+clients = [
+ {
+ name = "client"
+ id = 111
+ cores = 2
+ sockets = 2
+ memory = 10240
+ disk_size = 15
+ ip_address = "10.10.10.111/24"
+ }
+]
+
+ssh_user = "debian"
+ssh_private_key_file = "/path/to/ssh/private/key"
+ssh_public_key_file = "/path/to/ssh/public/key"
+
+
+
+$ terraform init
+$ terraform plan
+$ terraform apply
+
+The above configuration will provision two VM nodes in Proxmox:
+Server node: VMID 110 at 10.10.10.110
+Client node: VMID 111 at 10.10.10.111
+
+An Ansible inventory file tf_ansible_inventory
should be generated in the same
+directory with the given VM IPs in the server
and client
groups.
For more details, refer to the Terraform configuration for +Proxmox.
+At this stage, there should be one server node and one client node running on +Proxmox that is reachable by SSH. These nodes should have Nomad, Consul and +Vault installed. We will proceed to use Ansible (and Terraform) to configure +Vault, Consul and Nomad (in that order) into a working cluster.
+ansible
$ ansible-inventory --graph
+
+group_vars
files in
+inventory/group_vars/{prod,server,client}.yml
$ ansible-inventory --graph --vars
+
+++Note: The
+nfs_share_mounts
variable ininventory/group_vars/client.yml
+should be modified or removed if not required
$ ansible-playbook main.yml
+
+The playbook will perform the following:
+terraform/vault
When re-running the playbook on the same server, Vault will not be
+re-initialized. However, if the playbook is run on a separate server (eg. for
+testing on a dev cluster), the Vault role will permanently delete any
+existing state in the terraform/vault
subdirectory if a different
+vault_terraform_workspace
is not provided. This WILL result in permanent data
+loss and care should be taken when running the role (and playbook) on multiple
+clusters or servers.
Smoke tests are performed with goss as part
+of the main.yml
playbook to ensure all required software are installed and
+running.
++Note: The included goss files are static with hardcoded information. As +such, they will fail if some of the Ansible default variables are changed (eg. +username, NFS mountpoints). See +issues for details on a workaround.
+
After verifying that the cluster is up and running, we can begin to run
+applications on it with Nomad jobs. This project provides a number of Nomad
+jobspec files in terraform/nomad/apps
to be run with Terraform with the
+following features:
See Adding a New Application for details on onboarding a +new application to Nomad.
+ +Cloud images are pre-installed disk images that have been customized to run on
+cloud platforms. They are shipped with cloud-init
that simplifies the
+installation and provisioning of virtual machines.
Unlike ISOs and LXC container images, Proxmox's API lacks support for uploading +cloud images directly from a given URL (see +here and +here). +Instead, they must be manually downloaded and converted into a VM +template to be available to Proxmox.
+++Warning: When cloning the cloud image template with Terraform, +
+qemu-guest-agent
must be installed andagent=1
must be set. Otherwise, +Terraform will timeout. As such, it is recommended to create a further +bootstrapped template with Packer and Ansible.
$ wget https://cloud.debian.org/images/cloud/bullseye/20230124-1270/debian11-generic-amd64-20230124-1270.qcow2
+
+$ qm create 9000 \
+ --name "debian-11-amd64" \
+ --net0 "virtio,bridge=vmbr0" \
+ --serial0 socket \
+ --vga serial0 \
+ --scsihw virtio-scsi-pci \
+ --scsi0 "local:0,import-from=/path/to/image" \
+ --bootdisk scsi0 \
+ --boot "order=scsi0" \
+ --ide1 "local:cloudinit" \
+ --ostype l26 \
+ --cores 1 \
+ --sockets 1 \
+ --memory 512 \
+ --agent 1
+
+$ qm resize 9000 scsi0 5G
+
+$ qm template 9000
+
+A full script of the steps above can be found at +bin/import-cloud-image.
+$ import-cloud-image --help
+
+Usage: import-cloud-image [--debug|--force] [URL] [FILENAME]
+
+