diff --git a/common/.ansible-lint b/common/.ansible-lint new file mode 100644 index 00000000..0522976e --- /dev/null +++ b/common/.ansible-lint @@ -0,0 +1,21 @@ +# Vim filetype=yaml +--- +offline: false +skip_list: + - name[template] # Allow Jinja templating inside task and play names + - template-instead-of-copy # Templated files should use template instead of copy + - yaml[line-length] # too long lines + - yaml[indentation] # Forcing lists to be always indented by 2 chars is silly IMO + - var-naming[no-role-prefix] # This would be too much churn for very little gain + - no-changed-when + - var-naming[no-role-prefix] # There are too many changes now and it would be too risky + +# ansible-lint gh workflow cannot find ansible.cfg hence fails to import vault_utils role +exclude_paths: + - ./ansible/playbooks/vault/vault.yaml + - ./ansible/playbooks/iib-ci/iib-ci.yaml + - ./ansible/playbooks/k8s_secrets/k8s_secrets.yml + - ./ansible/playbooks/process_secrets/process_secrets.yml + - ./ansible/playbooks/write-token-kubeconfig/write-token-kubeconfig.yml + - ./ansible/playbooks/process_secrets/display_secrets_info.yml + - ./ansible/roles/vault_utils/tests/test.yml diff --git a/common/.github/dependabot.yml b/common/.github/dependabot.yml new file mode 100644 index 00000000..a175e666 --- /dev/null +++ b/common/.github/dependabot.yml @@ -0,0 +1,9 @@ +--- +version: 2 +updates: + # Check for updates to GitHub Actions every week + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + diff --git a/common/.github/linters/.gitleaks.toml b/common/.github/linters/.gitleaks.toml new file mode 100644 index 00000000..9ad74347 --- /dev/null +++ b/common/.github/linters/.gitleaks.toml @@ -0,0 +1,4 @@ +[whitelist] +# As of v4, gitleaks only matches against filename, not path in the +# files directive. Leaving content for backwards compatibility. +files = [ ] diff --git a/common/.github/linters/.markdown-lint.yml b/common/.github/linters/.markdown-lint.yml new file mode 100644 index 00000000..a0bc47d1 --- /dev/null +++ b/common/.github/linters/.markdown-lint.yml @@ -0,0 +1,6 @@ +{ + "default": true, + "MD003": false, + "MD013": false, + "MD033": false +} \ No newline at end of file diff --git a/common/.github/workflows/superlinter.yml b/common/.github/workflows/superlinter.yml new file mode 100644 index 00000000..03b6fff9 --- /dev/null +++ b/common/.github/workflows/superlinter.yml @@ -0,0 +1,44 @@ +--- +name: Super linter + +on: [push, pull_request] + +jobs: + build: + # Name the Job + name: Super linter + # Set the agent to run on + runs-on: ubuntu-latest + + steps: + - name: Checkout Code + uses: actions/checkout@v4 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 + + ################################ + # Run Linter against code base # + ################################ + - name: Lint Code Base + uses: super-linter/super-linter/slim@v7 + env: + VALIDATE_ALL_CODEBASE: true + DEFAULT_BRANCH: main + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # These are the validation we disable atm + VALIDATE_ANSIBLE: false + VALIDATE_BASH: false + VALIDATE_CHECKOV: false + VALIDATE_JSCPD: false + VALIDATE_JSON_PRETTIER: false + VALIDATE_MARKDOWN_PRETTIER: false + VALIDATE_KUBERNETES_KUBECONFORM: false + VALIDATE_PYTHON_PYLINT: false + VALIDATE_SHELL_SHFMT: false + VALIDATE_YAML: false + VALIDATE_YAML_PRETTIER: false + # VALIDATE_DOCKERFILE_HADOLINT: false + # VALIDATE_MARKDOWN: false + # VALIDATE_NATURAL_LANGUAGE: false + # VALIDATE_TEKTON: false diff --git a/common/.gitignore b/common/.gitignore new file mode 100644 index 00000000..454efc9e --- /dev/null +++ b/common/.gitignore @@ -0,0 +1,13 @@ +__pycache__/ +*.py[cod] +*~ +*.swp +*.swo +values-secret.yaml +.*.expected.yaml +.vscode +pattern-vault.init +pattern-vault.init.bak +super-linter.log +golang-external-secrets/Chart.lock +hashicorp-vault/Chart.lock diff --git a/common/.gitleaks.toml b/common/.gitleaks.toml new file mode 120000 index 00000000..c05303b9 --- /dev/null +++ b/common/.gitleaks.toml @@ -0,0 +1 @@ +.github/linters/.gitleaks.toml \ No newline at end of file diff --git a/common/Changes.md b/common/Changes.md new file mode 100644 index 00000000..c12f1755 --- /dev/null +++ b/common/Changes.md @@ -0,0 +1,153 @@ +# Changes + +## Sep 24, 2024 + +* Ansible has been moved out of the common code tree, you must use a clustergroup chart that is >= 0.9.1 + +## Sep 6, 2024 + +* Most charts have been removed from the tree. To get the charts you now have to point to them + +## Sep 25, 2023 + +* Upgraded ESO to v0.9.5 + +## Aug 17, 2023 + +* Introduced support for multisource applications via .chart + .chartVersion + +## Jul 8, 2023 + +* Introduced a default of 20 for sync failures retries in argo applications (global override via global.options.applicationRetryLimit + and per-app override via .syncPolicy) + +## May 22, 2023 + +* Upgraded ESO to 0.8.2 +* *Important* we now use the newly blessed sso config for argo. This means that gitops < 1.8 are *unsupported* + +## May 18, 2023 + +* Introduce a EXTRA_HELM_OPTS env variable that will be passed to the helm invocations + +## April 21, 2023 + +* Added labels and annotation support to namespaces.yaml template + +## Apr 11, 2023 + +* Apply the ACM ocp-gitops-policy everywhere but the hub + +## Apr 7, 2023 + +* Moved to gitops-1.8 channel by default (stable is unmaintained and will be dropped starting with ocp-4.13) + +## March 20, 2023 + +* Upgraded ESO to 0.8.1 + +## February 9, 2023 + +* Add support for /values-.yaml and for /values--.yaml + +## January 29, 2023 + +* Stop extracting the HUB's CA via an imperative job running on the imported cluster. + Just use ACM to push the HUB's CA out to the managed clusters. + +## January 23, 2023 + +* Add initial support for running ESO on ACM-imported clusters + +## January 18, 2023 + +* Add validate-schema target + +## January 13, 2023 + +* Simplify the secrets paths when using argo hosted sites + +## January 10, 2023 + +* vaultPrefixes is now optional in the v2 secret spec and defaults to ["hub"] + +## December 9, 2022 + +* Dropped insecureUnsealVaultInsideCluster (and file_unseal) entirely. Now + vault is always unsealed via a cronjob in the cluster. It is recommended to + store the imperative/vaultkeys secret offline securely and then delete it. + +## December 8, 2022 + +* Removed the legacy installation targets: + `deploy upgrade legacy-deploy legacy-upgrade` + Patterns must now use the operator-based installation + +## November 29, 2022 + +* Upgraded vault-helm to 0.23.0 +* Enable vault-ssl by default + +## November 22, 2022 + +* Implemented a new format for the values-secret.yaml. Example can be found in examples/ folder +* Now the order of values-secret file lookup is the following: + 1. ~/values-secret-.yaml + 2. ~/values-secret.yaml + 3. /values-secret.yaml.template +* Add support for ansible vault encrypted values-secret files. You can now encrypt your values-secret file + at rest with `ansible-vault encrypt ~/values-secret.yaml`. When running `make load-secrets` if an encrypted + file is encountered the user will be prompted automatically for the password to decrypt it. + +## November 6, 2022 + +* Add support for /values--.yaml (e.g. /values-AWS-group-one.yaml) + +## October 28, 2022 + +* Updated vault helm chart to v0.22.1 and vault containers to 1.12.0 + +## October 25, 2022 + +* Updated External Secrets Operator to v0.6.0 +* Moved to -UBI based ESO containers + +## October 13, 2022 + +* Added global.clusterVersion as a new helm variable which represents the OCP + Major.Minor cluster version. By default now a user can add a + values--.yaml file to have specific cluster version + overrides (e.g. values-4.10-hub.yaml). Will need Validated Patterns Operator >= 0.0.6 + when deploying with the operator. Note: When using the ArgoCD Hub and spoke model, + you cannot have spokes with a different version of OCP than the hub. + +## October 4, 2022 + +* Extended the values-secret.yaml file to support multiple vault paths and re-wrote + the push_secrets feature as python module plugin. This requires the following line + in a pattern's ansible.cfg's '[defaults]' stanza: + + `library=~/.ansible/plugins/modules:./ansible/plugins/modules:./common/ansible/plugins/modules:/usr/share/ansible/plugins/modules` + +## October 3, 2022 + +* Restore the ability to install a non-default site: `make TARGET_SITE=mysite install` +* Revised tests (new output and filenames, requires adding new result files to Git) +* ACM 2.6 required for ACM-based managed sites +* Introduced global.clusterDomain template variable (without the `apps.` prefix) +* Removed the ability to send specific charts to another cluster, use hosted argo sites instead +* Added the ability to have the hub host `values-{site}.yaml` for spoke clusters. + + The following example would deploy the namespaces, subscriptions, and + applications defined in `values-group-one.yaml` to the `perth` cluster + directly from ArgoCD on the hub. + + ```yaml + managedClusterGroups: + - name: group-one + hostedArgoSites: + - name: perth + domain: perth1.beekhof.net + bearerKeyPath: secret/data/hub/cluster_perth + caKeyPath: secret/data/hub/cluster_perth_ca + ``` diff --git a/common/LICENSE b/common/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/common/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/common/Makefile b/common/Makefile new file mode 100644 index 00000000..20e543fc --- /dev/null +++ b/common/Makefile @@ -0,0 +1,246 @@ +NAME ?= $(shell basename "`pwd`") + +ifneq ($(origin TARGET_SITE), undefined) + TARGET_SITE_OPT=--set main.clusterGroupName=$(TARGET_SITE) +endif + +# This variable can be set in order to pass additional helm arguments from the +# the command line. I.e. we can set things without having to tweak values files +EXTRA_HELM_OPTS ?= + +# INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:394248 +# or +# INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:394248,registry-proxy.engineering.redhat.com/rh-osbs/iib:394249 +INDEX_IMAGES ?= + +TARGET_ORIGIN ?= origin +# This is to ensure that whether we start with a git@ or https:// URL, we end up with an https:// URL +# This is because we expect to use tokens for repo authentication as opposed to SSH keys +TARGET_REPO=$(shell git ls-remote --get-url --symref $(TARGET_ORIGIN) | sed -e 's/.*URL:[[:space:]]*//' -e 's%^git@%%' -e 's%^https://%%' -e 's%:%/%' -e 's%^%https://%') +# git branch --show-current is also available as of git 2.22, but we will use this for compatibility +TARGET_BRANCH=$(shell git rev-parse --abbrev-ref HEAD) + +UUID_FILE ?= ~/.config/validated-patterns/pattern-uuid +UUID_HELM_OPTS ?= + +# --set values always take precedence over the contents of -f +ifneq ("$(wildcard $(UUID_FILE))","") + UUID := $(shell cat $(UUID_FILE)) + UUID_HELM_OPTS := --set main.analyticsUUID=$(UUID) +endif + +# Set the secret name *and* its namespace when deploying from private repositories +# The format of said secret is documented here: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#repositories +TOKEN_SECRET ?= +TOKEN_NAMESPACE ?= + +ifeq ($(TOKEN_SECRET),) + HELM_OPTS=-f values-global.yaml --set main.git.repoURL="$(TARGET_REPO)" --set main.git.revision=$(TARGET_BRANCH) $(TARGET_SITE_OPT) $(UUID_HELM_OPTS) $(EXTRA_HELM_OPTS) +else + # When we are working with a private repository we do not escape the git URL as it might be using an ssh secret which does not use https:// + TARGET_CLEAN_REPO=$(shell git ls-remote --get-url --symref $(TARGET_ORIGIN)) + HELM_OPTS=-f values-global.yaml --set main.tokenSecret=$(TOKEN_SECRET) --set main.tokenSecretNamespace=$(TOKEN_NAMESPACE) --set main.git.repoURL="$(TARGET_CLEAN_REPO)" --set main.git.revision=$(TARGET_BRANCH) $(TARGET_SITE_OPT) $(UUID_HELM_OPTS) $(EXTRA_HELM_OPTS) +endif + +# Helm does the right thing and fetches all the tags and detects the newest one +PATTERN_INSTALL_CHART ?= oci://quay.io/hybridcloudpatterns/pattern-install + +##@ Pattern Common Tasks + +.PHONY: help +help: ## This help message + @echo "Pattern: $(NAME)" + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^(\s|[a-zA-Z_0-9-])+:.*?##/ { printf " \033[36m%-35s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +# Makefiles in the individual patterns should call these targets explicitly +# e.g. from industrial-edge: make -f common/Makefile show +.PHONY: show +show: ## show the starting template without installing it + helm template $(PATTERN_INSTALL_CHART) --name-template $(NAME) $(HELM_OPTS) + +preview-all: ## (EXPERIMENTAL) Previews all applications on hub and managed clusters + @echo "NOTE: This is just a tentative approximation of rendering all hub and managed clusters templates" + @common/scripts/preview-all.sh $(TARGET_REPO) $(TARGET_BRANCH) + +preview-%: + $(eval CLUSTERGROUP ?= $(shell yq ".main.clusterGroupName" values-global.yaml)) + @common/scripts/preview.sh $(CLUSTERGROUP) $* $(TARGET_REPO) $(TARGET_BRANCH) + +.PHONY: operator-deploy +operator-deploy operator-upgrade: validate-prereq validate-origin validate-cluster ## runs helm install + @common/scripts/deploy-pattern.sh $(NAME) $(PATTERN_INSTALL_CHART) $(HELM_OPTS) + +.PHONY: uninstall +uninstall: ## runs helm uninstall + $(eval CSV := $(shell oc get subscriptions -n openshift-operators openshift-gitops-operator -ojsonpath={.status.currentCSV})) + helm uninstall $(NAME) + @oc delete csv -n openshift-operators $(CSV) + +.PHONY: load-secrets +load-secrets: ## loads the secrets into the backend determined by values-global setting + common/scripts/process-secrets.sh $(NAME) + +.PHONY: legacy-load-secrets +legacy-load-secrets: ## loads the secrets into vault (only) + common/scripts/vault-utils.sh push_secrets $(NAME) + +.PHONY: secrets-backend-vault +secrets-backend-vault: ## Edits values files to use default Vault+ESO secrets config + common/scripts/set-secret-backend.sh vault + common/scripts/manage-secret-app.sh vault present + common/scripts/manage-secret-app.sh golang-external-secrets present + common/scripts/manage-secret-namespace.sh validated-patterns-secrets absent + @git diff --exit-code || echo "Secrets backend set to vault, please review changes, commit, and push to activate in the pattern" + +.PHONY: secrets-backend-kubernetes +secrets-backend-kubernetes: ## Edits values file to use Kubernetes+ESO secrets config + common/scripts/set-secret-backend.sh kubernetes + common/scripts/manage-secret-namespace.sh validated-patterns-secrets present + common/scripts/manage-secret-app.sh vault absent + common/scripts/manage-secret-app.sh golang-external-secrets present + @git diff --exit-code || echo "Secrets backend set to kubernetes, please review changes, commit, and push to activate in the pattern" + +.PHONY: secrets-backend-none +secrets-backend-none: ## Edits values files to remove secrets manager + ESO + common/scripts/set-secret-backend.sh none + common/scripts/manage-secret-app.sh vault absent + common/scripts/manage-secret-app.sh golang-external-secrets absent + common/scripts/manage-secret-namespace.sh validated-patterns-secrets absent + @git diff --exit-code || echo "Secrets backend set to none, please review changes, commit, and push to activate in the pattern" + +.PHONY: load-iib +load-iib: ## CI target to install Index Image Bundles + @set -e; if [ x$(INDEX_IMAGES) != x ]; then \ + ansible-playbook rhvp.cluster_utils.iib_ci; \ + else \ + echo "No INDEX_IMAGES defined. Bailing out"; \ + exit 1; \ + fi + +.PHONY: token-kubeconfig +token-kubeconfig: ## Create a local ~/.kube/config with password (not usually needed) + common/scripts/write-token-kubeconfig.sh + +##@ Validation Tasks + +# We only check the remote ssh git branch's existance if we're not running inside a container +# as getting ssh auth working inside a container seems a bit brittle +# If the main repoUpstreamURL field is set, then we need to check against +# that and not target_repo +.PHONY: validate-origin +validate-origin: ## verify the git origin is available + @echo "Checking repository:" + $(eval UPSTREAMURL := $(shell yq -r '.main.git.repoUpstreamURL // (.main.git.repoUpstreamURL = "")' values-global.yaml)) + @if [ -z "$(UPSTREAMURL)" ]; then\ + echo -n " $(TARGET_REPO) - branch '$(TARGET_BRANCH)': ";\ + git ls-remote --exit-code --heads $(TARGET_REPO) $(TARGET_BRANCH) >/dev/null &&\ + echo "OK" || (echo "NOT FOUND"; exit 1);\ + else\ + echo "Upstream URL set to: $(UPSTREAMURL)";\ + echo -n " $(UPSTREAMURL) - branch '$(TARGET_BRANCH)': ";\ + git ls-remote --exit-code --heads $(UPSTREAMURL) $(TARGET_BRANCH) >/dev/null &&\ + echo "OK" || (echo "NOT FOUND"; exit 1);\ + fi + +.PHONY: validate-cluster +validate-cluster: ## Do some cluster validations before installing + @echo "Checking cluster:" + @echo -n " cluster-info: " + @oc cluster-info >/dev/null && echo "OK" || (echo "Error"; exit 1) + @echo -n " storageclass: " + @if [ `oc get storageclass -o go-template='{{printf "%d\n" (len .items)}}'` -eq 0 ]; then\ + echo "WARNING: No storageclass found";\ + else\ + echo "OK";\ + fi + + +.PHONY: validate-schema +validate-schema: ## validates values files against schema in common/clustergroup + $(eval VAL_PARAMS := $(shell for i in ./values-*.yaml; do echo -n "$${i} "; done)) + @echo -n "Validating clustergroup schema of: " + @set -e; for i in $(VAL_PARAMS); do echo -n " $$i"; helm template common/clustergroup $(HELM_OPTS) -f "$${i}" >/dev/null; done + @echo + +.PHONY: validate-prereq +validate-prereq: ## verify pre-requisites + @if [ ! -f /run/.containerenv ]; then\ + echo "Checking prerequisites:";\ + for t in $(EXECUTABLES); do if ! which $$t > /dev/null 2>&1; then echo "No $$t in PATH"; exit 1; fi; done;\ + echo " Check for '$(EXECUTABLES)': OK";\ + echo -n " Check for python-kubernetes: ";\ + if ! ansible -m ansible.builtin.command -a "{{ ansible_python_interpreter }} -c 'import kubernetes'" localhost > /dev/null 2>&1; then echo "Not found"; exit 1; fi;\ + echo "OK";\ + echo -n " Check for kubernetes.core collection: ";\ + if ! ansible-galaxy collection list | grep kubernetes.core > /dev/null 2>&1; then echo "Not found"; exit 1; fi;\ + echo "OK";\ + else\ + if [ -f values-global.yaml ]; then\ + OUT=`yq -r '.main.multiSourceConfig.enabled // (.main.multiSourceConfig.enabled = "false")' values-global.yaml`;\ + if [ "$${OUT,,}" = "false" ]; then\ + echo "You must set \".main.multiSourceConfig.enabled: true\" in your 'values-global.yaml' file";\ + echo "because your common subfolder is the slimmed down version with no helm charts in it";\ + exit 1;\ + fi;\ + fi;\ + fi + +.PHONY: argo-healthcheck +argo-healthcheck: ## Checks if all argo applications are synced + @echo "Checking argo applications" + $(eval APPS := $(shell oc get applications -A -o jsonpath='{range .items[*]}{@.metadata.namespace}{","}{@.metadata.name}{"\n"}{end}')) + @NOTOK=0; \ + for i in $(APPS); do\ + n=`echo "$${i}" | cut -f1 -d,`;\ + a=`echo "$${i}" | cut -f2 -d,`;\ + STATUS=`oc get -n "$${n}" application/"$${a}" -o jsonpath='{.status.sync.status}'`;\ + if [[ $$STATUS != "Synced" ]]; then\ + NOTOK=$$(( $${NOTOK} + 1));\ + fi;\ + HEALTH=`oc get -n "$${n}" application/"$${a}" -o jsonpath='{.status.health.status}'`;\ + if [[ $$HEALTH != "Healthy" ]]; then\ + NOTOK=$$(( $${NOTOK} + 1));\ + fi;\ + echo "$${n} $${a} -> Sync: $${STATUS} - Health: $${HEALTH}";\ + done;\ + if [ $${NOTOK} -gt 0 ]; then\ + echo "Some applications are not synced or are unhealthy";\ + exit 1;\ + fi + + +##@ Test and Linters Tasks + +.PHONY: qe-tests +qe-tests: ## Runs the tests that QE runs + @set -e; if [ -f ./tests/interop/run_tests.sh ]; then \ + pushd ./tests/interop; ./run_tests.sh; popd; \ + else \ + echo "No ./tests/interop/run_tests.sh found skipping"; \ + fi + +.PHONY: super-linter +super-linter: ## Runs super linter locally + rm -rf .mypy_cache + podman run -e RUN_LOCAL=true -e USE_FIND_ALGORITHM=true \ + -e VALIDATE_ANSIBLE=false \ + -e VALIDATE_BASH=false \ + -e VALIDATE_CHECKOV=false \ + -e VALIDATE_DOCKERFILE_HADOLINT=false \ + -e VALIDATE_JSCPD=false \ + -e VALIDATE_JSON_PRETTIER=false \ + -e VALIDATE_MARKDOWN_PRETTIER=false \ + -e VALIDATE_KUBERNETES_KUBECONFORM=false \ + -e VALIDATE_PYTHON_PYLINT=false \ + -e VALIDATE_SHELL_SHFMT=false \ + -e VALIDATE_TEKTON=false \ + -e VALIDATE_YAML=false \ + -e VALIDATE_YAML_PRETTIER=false \ + $(DISABLE_LINTERS) \ + -v $(PWD):/tmp/lint:rw,z \ + -w /tmp/lint \ + ghcr.io/super-linter/super-linter:slim-v7 + +.PHONY: deploy upgrade legacy-deploy legacy-upgrade +deploy upgrade legacy-deploy legacy-upgrade: + @echo "UNSUPPORTED TARGET: please switch to 'operator-deploy'"; exit 1 diff --git a/common/README.md b/common/README.md new file mode 100644 index 00000000..41223529 --- /dev/null +++ b/common/README.md @@ -0,0 +1,51 @@ +# Validated Patterns common/ repository + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +## Note + +This is the `main` branch of common and it assumes that the pattern is fully +multisource (meaning that any used charts from VP is actually referenced from +either a helm chart repository or quay repository). I.e. there are no helm +charts contained in this branch of common and there is no ansible code neither. + +The helm charts now live in separate repositories under the VP +[organization](https://github.com/validatedpatterns) on GitHub. The repositories are: + +- clustergroup-chart +- pattern-install-chart +- hashicorp-vault-chart +- golang-external-secrets-chart +- acm-chart +- letsencrypt-chart + +The ansible bits live in this [repository](https://github.com/validatedpatterns/rhvp.cluster_utils) + +In order to be able to use this "slimmed-down" main branch of common you *must* +use a 0.9.* clustergroup-chart that. Add the following to your `values-global.yaml`: + +```yaml +main: + multiSourceConfig: + enabled: true + clusterGroupChartVersion: 0.9.* +``` + +## Start Here + +This repository is never used as standalone. It is usually imported in each pattern as a subtree. +In order to import the common/ the very first time you can use +`https://github.com/validatedpatterns/multicloud-gitops/blob/main/common/scripts/make_common_subtree.sh` + +In order to update your common subtree inside your pattern repository you can either use +`https://github.com/validatedpatterns/utilities/blob/main/scripts/update-common-everywhere.sh` or +do it manually by doing the following: + +```sh +git remote add -f upstream-common https://github.com/validatedpatterns/common.git +git merge -s subtree -Xtheirs -Xsubtree=common upstream-common/main +``` + +## Secrets + +There are two different secret formats parsed by the ansible bits. Both are documented [here](https://github.com/validatedpatterns/common/tree/main/ansible/roles/vault_utils/README.md) diff --git a/common/requirements.yml b/common/requirements.yml new file mode 100644 index 00000000..cb11ca24 --- /dev/null +++ b/common/requirements.yml @@ -0,0 +1,4 @@ +--- +# Define Ansible collection requirements here +collections: + - name: git+https://github.com/validatedpatterns/rhvp.cluster_utils.git,v1 diff --git a/common/scripts/deploy-pattern.sh b/common/scripts/deploy-pattern.sh new file mode 100755 index 00000000..56837366 --- /dev/null +++ b/common/scripts/deploy-pattern.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -o pipefail + +RUNS=5 +# Retry five times because the CRD might not be fully installed yet +echo -n "Installing pattern: " +for i in $(seq 1 ${RUNS}); do \ + exec 3>&1 4>&2 + OUT=$( { helm template --include-crds --name-template $* 2>&4 | oc apply -f- 2>&4 1>&3; } 4>&1 3>&1) + ret=$? + exec 3>&- 4>&- + if [ ${ret} -eq 0 ]; then + break; + else + echo -n "." + sleep 10 + fi +done + +# All the runs failed +if [ ${i} -eq ${RUNS} ]; then + echo "Installation failed [${i}/${RUNS}]. Error:" + echo "${OUT}" + exit 1 +fi +echo "Done" diff --git a/common/scripts/determine-main-clustergroup.sh b/common/scripts/determine-main-clustergroup.sh new file mode 100755 index 00000000..6271dbad --- /dev/null +++ b/common/scripts/determine-main-clustergroup.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +PATTERN_DIR="$1" + +if [ -z "$PATTERN_DIR" ]; then + PATTERN_DIR="." +fi + +CGNAME=$(yq '.main.clusterGroupName' "$PATTERN_DIR/values-global.yaml") + +if [ -z "$CGNAME" ] || [ "$CGNAME" == "null" ]; then + echo "Error - cannot detrmine clusterGroupName" + exit 1 +fi + +echo "$CGNAME" diff --git a/common/scripts/determine-pattern-name.sh b/common/scripts/determine-pattern-name.sh new file mode 100755 index 00000000..fb503fe6 --- /dev/null +++ b/common/scripts/determine-pattern-name.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +PATTERN_DIR="$1" + +if [ -z "$PATTERN_DIR" ]; then + PATTERN_DIR="." +fi + +PATNAME=$(yq '.global.pattern' "$PATTERN_DIR/values-global.yaml" 2>/dev/null) + +if [ -z "$PATNAME" ] || [ "$PATNAME" == "null" ]; then + PATNAME="$(basename "$PWD")" +fi + +echo "$PATNAME" diff --git a/common/scripts/determine-secretstore-backend.sh b/common/scripts/determine-secretstore-backend.sh new file mode 100755 index 00000000..ef784790 --- /dev/null +++ b/common/scripts/determine-secretstore-backend.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +PATTERN_DIR="$1" + +if [ -z "$PATTERN_DIR" ]; then + PATTERN_DIR="." +fi + +BACKEND=$(yq '.global.secretStore.backend' "$PATTERN_DIR/values-global.yaml" 2>/dev/null) + +if [ -z "$BACKEND" -o "$BACKEND" == "null" ]; then + BACKEND="vault" +fi + +echo "$BACKEND" diff --git a/common/scripts/display-secrets-info.sh b/common/scripts/display-secrets-info.sh new file mode 100755 index 00000000..d9915855 --- /dev/null +++ b/common/scripts/display-secrets-info.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +set -eu + +get_abs_filename() { + # $1 : relative filename + echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" +} + +SCRIPT=$(get_abs_filename "$0") +SCRIPTPATH=$(dirname "${SCRIPT}") +COMMONPATH=$(dirname "${SCRIPTPATH}") +PATTERNPATH=$(dirname "${COMMONPATH}") + +if [ "$#" -ge 1 ]; then + export VALUES_SECRET=$(get_abs_filename "${1}") +fi + +if [[ "$#" == 2 ]]; then + SECRETS_BACKING_STORE="$2" +else + SECRETS_BACKING_STORE="$($SCRIPTPATH/determine-secretstore-backend.sh)" +fi + +PATTERN_NAME=$(basename "`pwd`") + +ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" -e secrets_backing_store="${SECRETS_BACKING_STORE}" -e override_no_log=false "rhvp.cluster_utils.display_secrets_info" diff --git a/common/scripts/load-k8s-secrets.sh b/common/scripts/load-k8s-secrets.sh new file mode 100755 index 00000000..9219f92f --- /dev/null +++ b/common/scripts/load-k8s-secrets.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -eu + +get_abs_filename() { + # $1 : relative filename + echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" +} + +SCRIPT=$(get_abs_filename "$0") +SCRIPTPATH=$(dirname "${SCRIPT}") +COMMONPATH=$(dirname "${SCRIPTPATH}") +PATTERNPATH=$(dirname "${COMMONPATH}") + +PATTERN_NAME=${1:-$(basename "`pwd`")} + +ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" "rhvp.cluster_utils.k8s_secrets" diff --git a/common/scripts/make_common_subtree.sh b/common/scripts/make_common_subtree.sh new file mode 100755 index 00000000..a5e406d8 --- /dev/null +++ b/common/scripts/make_common_subtree.sh @@ -0,0 +1,76 @@ +#!/bin/sh + +if [ "$1" = "-h" ]; then + echo "This script will convert common into a subtree and add a remote to help manage it." + echo "The script takes three positional arguments, as follows:" + echo + echo "$0 " + echo + echo "Run without arguments, the script would run as if these arguments had been passed:" + echo "$0 https://github.com/hybrid-cloud-patterns/common.git main common-subtree" + echo + echo "Please ensure the git subtree command is available. On RHEL/Fedora, the git subtree command" + echo "is in a separate package called git-subtree" + exit 1 +fi + +if [ -f '/etc/redhat-release' ]; then + rpm -qa | grep git-subtree 2>&1 + if [ ! $? = 0 ]; then + echo "you need to install git-subtree" + echo "would you like to install it now?" + select ANS in yes no + do + case $ANS in + yes) + sudo dnf install git-subtree -y + break + ;; + no) + exit + break + ;; + *) + echo "You must enter yes or no" + ;; + esac + done + fi +fi + +if [ "$1" ]; then + subtree_repo=$1 +else + subtree_repo=https://github.com/hybrid-cloud-patterns/common.git +fi + +if [ "$2" ]; then + subtree_branch=$2 +else + subtree_branch=main +fi + +if [ "$3" ]; then + subtree_remote=$3 +else + subtree_remote=common-subtree +fi + +git diff --quiet || (echo "This script must be run on a clean working tree" && exit 1) + +echo "Changing directory to project root" +cd `git rev-parse --show-toplevel` + +echo "Removing existing common and replacing it with subtree from $subtree_repo $subtree_remote" +rm -rf common + +echo "Committing removal of common" +(git add -A :/ && git commit -m "Removed previous version of common to convert to subtree from $subtree_repo $subtree_branch") || exit 1 + +echo "Adding (possibly replacing) subtree remote $subtree_remote" +git remote rm "$subtree_remote" +git remote add -f "$subtree_remote" "$subtree_repo" || exit 1 +git subtree add --prefix=common "$subtree_remote" "$subtree_branch" || exit 1 + +echo "Complete. You may now push these results if you are satisfied" +exit 0 diff --git a/common/scripts/manage-secret-app.sh b/common/scripts/manage-secret-app.sh new file mode 100755 index 00000000..18a986e5 --- /dev/null +++ b/common/scripts/manage-secret-app.sh @@ -0,0 +1,53 @@ +#!/bin/sh + +APP=$1 +STATE=$2 + +MAIN_CLUSTERGROUP_FILE="./values-$(common/scripts/determine-main-clustergroup.sh).yaml" +MAIN_CLUSTERGROUP_PROJECT="$(common/scripts/determine-main-clustergroup.sh)" + +case "$APP" in + "vault") + APP_NAME="vault" + NAMESPACE="vault" + PROJECT="$MAIN_CLUSTERGROUP_PROJECT" + CHART_NAME="hashicorp-vault" + CHART_VERSION=0.1.* + + ;; + "golang-external-secrets") + APP_NAME="golang-external-secrets" + NAMESPACE="golang-external-secrets" + PROJECT="$MAIN_CLUSTERGROUP_PROJECT" + CHART_NAME="golang-external-secrets" + CHART_VERSION=0.1.* + + ;; + *) + echo "Error - cannot manage $APP can only manage vault and golang-external-secrets" + exit 1 + ;; +esac + +case "$STATE" in + "present") + common/scripts/manage-secret-namespace.sh "$NAMESPACE" "$STATE" + + RES=$(yq ".clusterGroup.applications[] | select(.path == \"$CHART_LOCATION\")" "$MAIN_CLUSTERGROUP_FILE" 2>/dev/null) + if [ -z "$RES" ]; then + echo "Application with chart location $CHART_LOCATION not found, adding" + yq -i ".clusterGroup.applications.$APP_NAME = { \"name\": \"$APP_NAME\", \"namespace\": \"$NAMESPACE\", \"project\": \"$PROJECT\", \"chart\": \"$CHART_NAME\", \"chartVersion\": \"$CHART_VERSION\"}" "$MAIN_CLUSTERGROUP_FILE" + fi + ;; + "absent") + common/scripts/manage-secret-namespace.sh "$NAMESPACE" "$STATE" + echo "Removing application wth chart location $CHART_LOCATION" + yq -i "del(.clusterGroup.applications[] | select(.chart == \"$CHART_NAME\"))" "$MAIN_CLUSTERGROUP_FILE" + ;; + *) + echo "$STATE not supported" + exit 1 + ;; +esac + +exit 0 diff --git a/common/scripts/manage-secret-namespace.sh b/common/scripts/manage-secret-namespace.sh new file mode 100755 index 00000000..bcb06742 --- /dev/null +++ b/common/scripts/manage-secret-namespace.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +NAMESPACE=$1 +STATE=$2 + +MAIN_CLUSTERGROUP_FILE="./values-$(common/scripts/determine-main-clustergroup.sh).yaml" +MAIN_CLUSTERGROUP_PROJECT="$(common/scripts/determine-main-clustergroup.sh)" + +case "$STATE" in + "present") + + RES=$(yq ".clusterGroup.namespaces[] | select(. == \"$NAMESPACE\")" "$MAIN_CLUSTERGROUP_FILE" 2>/dev/null) + if [ -z "$RES" ]; then + echo "Namespace $NAMESPACE not found, adding" + yq -i ".clusterGroup.namespaces += [ \"$NAMESPACE\" ]" "$MAIN_CLUSTERGROUP_FILE" + fi + ;; + "absent") + echo "Removing namespace $NAMESPACE" + yq -i "del(.clusterGroup.namespaces[] | select(. == \"$NAMESPACE\"))" "$MAIN_CLUSTERGROUP_FILE" + ;; + *) + echo "$STATE not supported" + exit 1 + ;; +esac + +exit 0 diff --git a/common/scripts/pattern-util.sh b/common/scripts/pattern-util.sh new file mode 100755 index 00000000..1fcaee76 --- /dev/null +++ b/common/scripts/pattern-util.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +function is_available { + command -v $1 >/dev/null 2>&1 || { echo >&2 "$1 is required but it's not installed. Aborting."; exit 1; } +} + +function version { + echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }' +} + +if [ -z "$PATTERN_UTILITY_CONTAINER" ]; then + PATTERN_UTILITY_CONTAINER="quay.io/hybridcloudpatterns/utility-container" +fi +# If PATTERN_DISCONNECTED_HOME is set it will be used to populate both PATTERN_UTILITY_CONTAINER +# and PATTERN_INSTALL_CHART automatically +if [ -n "${PATTERN_DISCONNECTED_HOME}" ]; then + PATTERN_UTILITY_CONTAINER="${PATTERN_DISCONNECTED_HOME}/utility-container" + PATTERN_INSTALL_CHART="oci://${PATTERN_DISCONNECTED_HOME}/pattern-install" + echo "PATTERN_DISCONNECTED_HOME is set to ${PATTERN_DISCONNECTED_HOME}" + echo "Setting the following variables:" + echo " PATTERN_UTILITY_CONTAINER: ${PATTERN_UTILITY_CONTAINER}" + echo " PATTERN_INSTALL_CHART: ${PATTERN_INSTALL_CHART}" +fi + +readonly commands=(podman) +for cmd in ${commands[@]}; do is_available "$cmd"; done + +UNSUPPORTED_PODMAN_VERSIONS="1.6 1.5" +PODMAN_VERSION_STR=$(podman --version) +for i in ${UNSUPPORTED_PODMAN_VERSIONS}; do + # We add a space + if echo "${PODMAN_VERSION_STR}" | grep -q -E "\b${i}"; then + echo "Unsupported podman version. We recommend > 4.3.0" + podman --version + exit 1 + fi +done + +# podman --version outputs: +# podman version 4.8.2 +PODMAN_VERSION=$(echo "${PODMAN_VERSION_STR}" | awk '{ print $NF }') + +# podman < 4.3.0 do not support keep-id:uid=... +if [ $(version "${PODMAN_VERSION}") -lt $(version "4.3.0") ]; then + PODMAN_ARGS="-v ${HOME}:/root" +else + # We do not rely on bash's $UID and $GID because on MacOSX $GID is not set + MYNAME=$(id -n -u) + MYUID=$(id -u) + MYGID=$(id -g) + PODMAN_ARGS="--passwd-entry ${MYNAME}:x:${MYUID}:${MYGID}::/pattern-home:/bin/bash --user ${MYUID}:${MYGID} --userns keep-id:uid=${MYUID},gid=${MYGID}" + +fi + +if [ -n "$KUBECONFIG" ]; then + if [[ ! "${KUBECONFIG}" =~ ^$HOME* ]]; then + echo "${KUBECONFIG} is pointing outside of the HOME folder, this will make it unavailable from the container." + echo "Please move it somewhere inside your $HOME folder, as that is what gets bind-mounted inside the container" + exit 1 + fi +fi + +# Detect if we use podman machine. If we do not then we bind mount local host ssl folders +# if we are using podman machine then we do not bind mount anything (for now!) +REMOTE_PODMAN=$(podman system connection list -q | wc -l) +if [ $REMOTE_PODMAN -eq 0 ]; then # If we are not using podman machine we check the hosts folders + # Use /etc/pki by default and try a couple of fallbacks if it does not exist + if [ -d /etc/pki ]; then + PKI_HOST_MOUNT_ARGS="-v /etc/pki:/etc/pki:ro" + elif [ -d /etc/ssl ]; then + PKI_HOST_MOUNT_ARGS="-v /etc/ssl:/etc/ssl:ro" + else + PKI_HOST_MOUNT_ARGS="-v /usr/share/ca-certificates:/usr/share/ca-certificates:ro" + fi +else + PKI_HOST_MOUNT_ARGS="" +fi + +# Copy Kubeconfig from current environment. The utilities will pick up ~/.kube/config if set so it's not mandatory +# $HOME is mounted as itself for any files that are referenced with absolute paths +# $HOME is mounted to /root because the UID in the container is 0 and that's where SSH looks for credentials + +podman run -it --rm --pull=newer \ + --security-opt label=disable \ + -e EXTRA_HELM_OPTS \ + -e EXTRA_PLAYBOOK_OPTS \ + -e TARGET_ORIGIN \ + -e NAME \ + -e TOKEN_SECRET \ + -e TOKEN_NAMESPACE \ + -e VALUES_SECRET \ + -e KUBECONFIG \ + -e PATTERN_INSTALL_CHART \ + -e PATTERN_DISCONNECTED_HOME \ + -e K8S_AUTH_HOST \ + -e K8S_AUTH_VERIFY_SSL \ + -e K8S_AUTH_SSL_CA_CERT \ + -e K8S_AUTH_USERNAME \ + -e K8S_AUTH_PASSWORD \ + -e K8S_AUTH_TOKEN \ + ${PKI_HOST_MOUNT_ARGS} \ + -v "${HOME}":"${HOME}" \ + -v "${HOME}":/pattern-home \ + ${PODMAN_ARGS} \ + ${EXTRA_ARGS} \ + -w "$(pwd)" \ + "$PATTERN_UTILITY_CONTAINER" \ + $@ diff --git a/common/scripts/preview-all.sh b/common/scripts/preview-all.sh new file mode 100755 index 00000000..4bf59322 --- /dev/null +++ b/common/scripts/preview-all.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +REPO=$1; shift; +TARGET_BRANCH=$1; shift + +HUB=$( yq ".main.clusterGroupName" values-global.yaml ) +MANAGED_CLUSTERS=$( yq ".clusterGroup.managedClusterGroups.[].name" values-$HUB.yaml ) +ALL_CLUSTERS=( $HUB $MANAGED_CLUSTERS ) + +CLUSTER_INFO_OUT=$(oc cluster-info 2>&1) +CLUSTER_INFO_RET=$? +if [ $CLUSTER_INFO_RET -ne 0 ]; then + echo "Could not access the cluster:" + echo "${CLUSTER_INFO_OUT}" + exit 1 +fi + +for cluster in ${ALL_CLUSTERS[@]}; do + # We always add clustergroup as it is the entry point and it gets special cased in preview.sh. + APPS="clustergroup $( yq ".clusterGroup.applications.[].name" values-$cluster.yaml )" + for app in $APPS; do + printf "# Parsing application $app from cluster $cluster\n" + common/scripts/preview.sh $cluster $app $REPO $TARGET_BRANCH + done +done diff --git a/common/scripts/preview.sh b/common/scripts/preview.sh new file mode 100755 index 00000000..6da45785 --- /dev/null +++ b/common/scripts/preview.sh @@ -0,0 +1,118 @@ +#!/bin/bash + +# DISCLAIMER +# +# - Parsing of applications needs to be more clever. +# - There is currently not a mechanism to actually preview against multiple clusters +# (i.e. a hub and a remote). All previews will be done against the current. +# - Make output can be included in the YAML. + +SITE=$1; shift +APPNAME=$1; shift +GIT_REPO=$1; shift +GIT_BRANCH=$1; shift + +if [ "${APPNAME}" != "clustergroup" ]; then + # This covers the following case: + # foobar: + # name: foo + # namespace: foo + # project: foo + # path: charts/all/foo + # So we retrieve the actual index ("foobar") given the name attribute of the application + APP=$(yq ".clusterGroup.applications | with_entries(select(.value.name == \"$APPNAME\")) | keys | .[0]" values-$SITE.yaml) + isLocalHelmChart=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) + if [ $isLocalHelmChart != "null" ]; then + chart=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) + else + helmrepo=$(yq ".clusterGroup.applications.$APP.repoURL" values-$SITE.yaml) + helmrepo="${helmrepo:+oci://quay.io/hybridcloudpatterns}" + chartversion=$(yq ".clusterGroup.applications.$APP.chartVersion" values-$SITE.yaml) + chartname=$(yq ".clusterGroup.applications.$APP.chart" values-$SITE.yaml) + chart="${helmrepo}/${chartname} --version ${chartversion}" + fi + namespace=$(yq ".clusterGroup.applications.$APP.namespace" values-$SITE.yaml) +else + APP=$APPNAME + clusterGroupChartVersion=$(yq ".main.multiSourceConfig.clusterGroupChartVersion" values-global.yaml) + helmrepo="oci://quay.io/hybridcloudpatterns" + chart="${helmrepo}/clustergroup --version ${clusterGroupChartVersion}" + namespace="openshift-operators" +fi +pattern=$(yq ".global.pattern" values-global.yaml) + +# You can override the default lookups by using OCP_{PLATFORM,VERSION,DOMAIN} +# Note that when using the utility container you need to pass in the above variables +# by export EXTRA_ARGS="-e OCP_PLATFORM -e OCP_VERSION -e OCP_DOMAIN" before +# invoking pattern-util.sh +platform=${OCP_PLATFORM:-$(oc get Infrastructure.config.openshift.io/cluster -o jsonpath='{.spec.platformSpec.type}')} +ocpversion=${OCP_VERSION:-$(oc get clusterversion/version -o jsonpath='{.status.desired.version}' | awk -F. '{print $1"."$2}')} +domain=${OCP_DOMAIN:-$(oc get Ingress.config.openshift.io/cluster -o jsonpath='{.spec.domain}' | sed 's/^apps.//')} + +function replaceGlobals() { + output=$( echo $1 | sed -e 's/ //g' -e 's/\$//g' -e s@^-@@g -e s@\'@@g ) + + output=$(echo $output | sed "s@{{.Values.global.clusterPlatform}}@${platform}@g") + output=$(echo $output | sed "s@{{.Values.global.clusterVersion}}@${ocpversion}@g") + output=$(echo $output | sed "s@{{.Values.global.clusterDomain}}@${domain}@g") + + echo $output +} + +function getOverrides() { + overrides='' + overrides=$( yq ".clusterGroup.applications.$APP.overrides[]" "values-$SITE.yaml" ) + overrides=$( echo "$overrides" | tr -d '\n' ) + overrides=$( echo "$overrides" | sed -e 's/name:/ --set/g; s/value: /=/g' ) + if [ -n "$overrides" ]; then + echo "$overrides" + fi +} + + +CLUSTER_OPTS="" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.pattern=$pattern" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.repoURL=$GIT_REPO" +CLUSTER_OPTS="$CLUSTER_OPTS --set main.git.repoURL=$GIT_REPO" +CLUSTER_OPTS="$CLUSTER_OPTS --set main.git.revision=$GIT_BRANCH" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.namespace=$namespace" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.hubClusterDomain=apps.$domain" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.localClusterDomain=apps.$domain" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterDomain=$domain" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterVersion=$ocpversion" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterPlatform=$platform" + + +sharedValueFiles=$(yq ".clusterGroup.sharedValueFiles" values-$SITE.yaml) +appValueFiles=$(yq ".clusterGroup.applications.$APP.extraValueFiles" values-$SITE.yaml) +isKustomize=$(yq ".clusterGroup.applications.$APP.kustomize" values-$SITE.yaml) +OVERRIDES=$( getOverrides ) + +VALUE_FILES="-f values-global.yaml -f values-$SITE.yaml" +IFS=$'\n' +for line in $sharedValueFiles; do + if [ $line != "null" ] && [ -f $line ]; then + file=$(replaceGlobals $line) + VALUE_FILES="$VALUE_FILES -f $PWD$file" + fi +done + +for line in $appValueFiles; do + if [ $line != "null" ] && [ -f $line ]; then + file=$(replaceGlobals $line) + VALUE_FILES="$VALUE_FILES -f $PWD$file" + fi +done + +if [ $isKustomize == "true" ]; then + kustomizePath=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) + repoURL=$(yq ".clusterGroup.applications.$APP.repoURL" values-$SITE.yaml) + if [[ $repoURL == http* ]] || [[ $repoURL == git@ ]]; then + kustomizePath="${repoURL}/${kustomizePath}" + fi + cmd="oc kustomize ${kustomizePath}" + eval "$cmd" +else + cmd="helm template $chart --name-template ${APP} -n ${namespace} ${VALUE_FILES} ${OVERRIDES} ${CLUSTER_OPTS}" + eval "$cmd" +fi diff --git a/common/scripts/process-secrets.sh b/common/scripts/process-secrets.sh new file mode 100755 index 00000000..47eff7fa --- /dev/null +++ b/common/scripts/process-secrets.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -eu + +get_abs_filename() { + # $1 : relative filename + echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" +} + +SCRIPT=$(get_abs_filename "$0") +SCRIPTPATH=$(dirname "${SCRIPT}") +COMMONPATH=$(dirname "${SCRIPTPATH}") +PATTERNPATH=$(dirname "${COMMONPATH}") + +PATTERN_NAME=${1:-$(basename "`pwd`")} +SECRETS_BACKING_STORE="$($SCRIPTPATH/determine-secretstore-backend.sh)" + +ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" -e secrets_backing_store="${SECRETS_BACKING_STORE}" "rhvp.cluster_utils.process_secrets" diff --git a/common/scripts/set-secret-backend.sh b/common/scripts/set-secret-backend.sh new file mode 100755 index 00000000..e07b15bf --- /dev/null +++ b/common/scripts/set-secret-backend.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +BACKEND=$1 + +yq -i ".global.secretStore.backend = \"$BACKEND\"" values-global.yaml diff --git a/common/scripts/vault-utils.sh b/common/scripts/vault-utils.sh new file mode 100755 index 00000000..b014e5a4 --- /dev/null +++ b/common/scripts/vault-utils.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -eu + +get_abs_filename() { + # $1 : relative filename + echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" +} + +SCRIPT=$(get_abs_filename "$0") +SCRIPTPATH=$(dirname "${SCRIPT}") +COMMONPATH=$(dirname "${SCRIPTPATH}") +PATTERNPATH=$(dirname "${COMMONPATH}") + +# Parse arguments +if [ $# -lt 1 ]; then + echo "Specify at least the command ($#): $*" + exit 1 +fi + +TASK="${1}" +PATTERN_NAME=${2:-$(basename "`pwd`")} + +if [ -z ${TASK} ]; then + echo "Task is unset" + exit 1 +fi + +ansible-playbook -t "${TASK}" -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" "rhvp.cluster_utils.vault" diff --git a/common/scripts/write-token-kubeconfig.sh b/common/scripts/write-token-kubeconfig.sh new file mode 100755 index 00000000..7544fac2 --- /dev/null +++ b/common/scripts/write-token-kubeconfig.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -eu + +OUTPUTFILE=${1:-"~/.kube/config"} + +get_abs_filename() { + # $1 : relative filename + echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" +} + +SCRIPT=$(get_abs_filename "$0") +SCRIPTPATH=$(dirname "${SCRIPT}") +COMMONPATH=$(dirname "${SCRIPTPATH}") +PATTERNPATH=$(dirname "${COMMONPATH}") + +ansible-playbook -e pattern_dir="${PATTERNPATH}" -e kubeconfig_file="${OUTPUTFILE}" "rhvp.cluster_utils.write-token-kubeconfig"