From 8d80488322a8433977017fe0ce854f451515bae8 Mon Sep 17 00:00:00 2001 From: Gustavo Lira e Silva Date: Fri, 6 Dec 2024 14:13:13 -0300 Subject: [PATCH] refactor: modularize OpenShift CI script for better maintainability - new (#2032) * modularize OpenShift CI script for better maintainability * Add log copying for auth providers in utils.sh script This commit introduces copying of logs from `auth-providers-logs` directory to the artifact directory. It ensures that logs for authentication providers are now included in the pipeline artifacts, aiding in troubleshooting and analysis. Additionally, the indentation for the screenshots copying block is corrected for consistency. --- .ibm/pipelines/cluster/aks/deployment.sh | 4 +- .ibm/pipelines/cluster/gke/deployment.sh | 4 +- .ibm/pipelines/jobs/aks.sh | 30 + .ibm/pipelines/jobs/gke.sh | 24 + .ibm/pipelines/jobs/main.sh | 42 ++ .ibm/pipelines/jobs/ocp-v4-15.sh | 21 + .ibm/pipelines/jobs/ocp-v4-16.sh | 21 + .ibm/pipelines/jobs/operator.sh | 12 + .ibm/pipelines/jobs/periodic.sh | 40 ++ .ibm/pipelines/openshift-ci-tests.sh | 545 ++---------------- .ibm/pipelines/utils.sh | 424 +++++++++++++- e2e-tests/playwright.config.ts | 1 + .../e2e/plugins/topology/topology.spec.ts | 3 +- 13 files changed, 669 insertions(+), 502 deletions(-) create mode 100644 .ibm/pipelines/jobs/aks.sh create mode 100644 .ibm/pipelines/jobs/gke.sh create mode 100644 .ibm/pipelines/jobs/main.sh create mode 100644 .ibm/pipelines/jobs/ocp-v4-15.sh create mode 100644 .ibm/pipelines/jobs/ocp-v4-16.sh create mode 100644 .ibm/pipelines/jobs/operator.sh create mode 100644 .ibm/pipelines/jobs/periodic.sh diff --git a/.ibm/pipelines/cluster/aks/deployment.sh b/.ibm/pipelines/cluster/aks/deployment.sh index 459a04c341..245ebe1106 100644 --- a/.ibm/pipelines/cluster/aks/deployment.sh +++ b/.ibm/pipelines/cluster/aks/deployment.sh @@ -1,5 +1,4 @@ initiate_aks_deployment() { - install_helm add_helm_repos delete_namespace "${NAME_SPACE_RBAC_K8S}" configure_namespace "${NAME_SPACE_K8S}" @@ -20,7 +19,6 @@ initiate_aks_deployment() { } initiate_rbac_aks_deployment() { - install_helm add_helm_repos delete_namespace "${NAME_SPACE_K8S}" configure_namespace "${NAME_SPACE_RBAC_K8S}" @@ -38,4 +36,4 @@ initiate_rbac_aks_deployment() { --set global.host="${K8S_CLUSTER_ROUTER_BASE}" \ --set upstream.backstage.image.repository="${QUAY_REPO}" \ --set upstream.backstage.image.tag="${TAG_NAME}" -} \ No newline at end of file +} diff --git a/.ibm/pipelines/cluster/gke/deployment.sh b/.ibm/pipelines/cluster/gke/deployment.sh index 813fefd67c..99c910fd81 100644 --- a/.ibm/pipelines/cluster/gke/deployment.sh +++ b/.ibm/pipelines/cluster/gke/deployment.sh @@ -1,6 +1,5 @@ initiate_gke_deployment() { gcloud_ssl_cert_create $GKE_CERT_NAME $GKE_INSTANCE_DOMAIN_NAME $GOOGLE_CLOUD_PROJECT - install_helm add_helm_repos delete_namespace "${NAME_SPACE_RBAC_K8S}" configure_namespace "${NAME_SPACE_K8S}" @@ -24,7 +23,6 @@ initiate_gke_deployment() { initiate_rbac_gke_deployment() { gcloud_ssl_cert_create $GKE_CERT_NAME $GKE_INSTANCE_DOMAIN_NAME $GOOGLE_CLOUD_PROJECT - install_helm add_helm_repos delete_namespace "${NAME_SPACE_K8S}" configure_namespace "${NAME_SPACE_RBAC_K8S}" @@ -43,4 +41,4 @@ initiate_rbac_gke_deployment() { --set upstream.backstage.image.repository="${QUAY_REPO}" \ --set upstream.backstage.image.tag="${TAG_NAME}" \ --set upstream.ingress.annotations."ingress\.gcp\.kubernetes\.io/pre-shared-cert"="${GKE_CERT_NAME}" -} \ No newline at end of file +} diff --git a/.ibm/pipelines/jobs/aks.sh b/.ibm/pipelines/jobs/aks.sh new file mode 100644 index 0000000000..ddf4d33c56 --- /dev/null +++ b/.ibm/pipelines/jobs/aks.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +handle_aks() { + echo "Starting AKS deployment" + for file in ${DIR}/cluster/aks/*.sh; do source $file; done + + export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_URL) + export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_TOKEN) + export K8S_CLUSTER_ROUTER_BASE=$AKS_INSTANCE_DOMAIN_NAME + export NAME_SPACE_K8S="showcase-k8s-ci-nightly" + export NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly" + + url="https://${K8S_CLUSTER_ROUTER_BASE}" + + az_login + az_aks_start "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" + az_aks_approuting_enable "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" + az_aks_get_credentials "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" + + set_github_app_3_credentials + + initiate_aks_deployment + check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" "${url}" + delete_namespace "${NAME_SPACE_K8S}" + initiate_rbac_aks_deployment + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_K8S}" + delete_namespace "${NAME_SPACE_RBAC_K8S}" +} + + diff --git a/.ibm/pipelines/jobs/gke.sh b/.ibm/pipelines/jobs/gke.sh new file mode 100644 index 0000000000..71e8de697c --- /dev/null +++ b/.ibm/pipelines/jobs/gke.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +handle_gke() { + echo "Starting GKE deployment" + for file in ${DIR}/cluster/gke/*.sh; do source $file; done + + export K8S_CLUSTER_ROUTER_BASE=$GKE_INSTANCE_DOMAIN_NAME + export NAME_SPACE_K8S="showcase-k8s-ci-nightly" + export NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly" + url="https://${K8S_CLUSTER_ROUTER_BASE}" + + gcloud_auth "${GKE_SERVICE_ACCOUNT_NAME}" "/tmp/secrets/GKE_SERVICE_ACCOUNT_KEY" + gcloud_gke_get_credentials "${GKE_CLUSTER_NAME}" "${GKE_CLUSTER_REGION}" "${GOOGLE_CLOUD_PROJECT}" + + set_github_app_3_credentials + + initiate_gke_deployment + check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" "${url}" + delete_namespace "${NAME_SPACE_K8S}" + initiate_rbac_gke_deployment + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_K8S}" + delete_namespace "${NAME_SPACE_RBAC_K8S}" + +} diff --git a/.ibm/pipelines/jobs/main.sh b/.ibm/pipelines/jobs/main.sh new file mode 100644 index 0000000000..027252e91d --- /dev/null +++ b/.ibm/pipelines/jobs/main.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +set -x + +set_namespace() { + # Enable parallel PR testing for main branch by utilizing a pool of namespaces + local namespaces_pool=("pr-1" "pr-2" "pr-3") + local namespace_found=false + # Iterate through namespace pool to find an available set + for ns in "${namespaces_pool[@]}"; do + if ! oc get namespace "showcase-$ns" >/dev/null 2>&1; then + echo "Namespace "showcase-$ns" does not exist, Using NS: showcase-$ns, showcase-rbac-$ns, postgress-external-db-$ns" + export NAME_SPACE="showcase-$ns" + export NAME_SPACE_RBAC="showcase-rbac-$ns" + export NAME_SPACE_POSTGRES_DB="postgress-external-db-$ns" + namespace_found=true + break + fi + done + if ! $namespace_found; then + echo "Error: All namespaces $namespaces_pool already in Use" + exit 1 + fi +} + +handle_main() { + echo "Configuring namespace: ${NAME_SPACE}" + set_github_app_4_credentials + set_namespace + oc_login + + API_SERVER_URL=$(oc whoami --show-server) + ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) + ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + + export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') + local url="https://${RELEASE_NAME}-backstage-${NAME_SPACE}.${K8S_CLUSTER_ROUTER_BASE}" + initiate_deployments + deploy_test_backstage_provider "${NAME_SPACE}" + check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" +} diff --git a/.ibm/pipelines/jobs/ocp-v4-15.sh b/.ibm/pipelines/jobs/ocp-v4-15.sh new file mode 100644 index 0000000000..7bc8d941c1 --- /dev/null +++ b/.ibm/pipelines/jobs/ocp-v4-15.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +handle_ocp_4_15() { + K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_URL) + K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_TOKEN) + + oc_login + + API_SERVER_URL=$(oc whoami --show-server) + ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) + ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + + export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') + apply_yaml_files "${DIR}" "${NAME_SPACE}" + deploy_test_backstage_provider "${NAME_SPACE}" + local url="https://${release_name}-backstage-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" + + initiate_deployments + check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" +} diff --git a/.ibm/pipelines/jobs/ocp-v4-16.sh b/.ibm/pipelines/jobs/ocp-v4-16.sh new file mode 100644 index 0000000000..c8c8f8ed4f --- /dev/null +++ b/.ibm/pipelines/jobs/ocp-v4-16.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +handle_ocp_4_16() { + K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_URL) + K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_TOKEN) + + oc_login + + API_SERVER_URL=$(oc whoami --show-server) + ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) + ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + + export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') + apply_yaml_files "${DIR}" "${NAME_SPACE}" + deploy_test_backstage_provider "${NAME_SPACE}" + local url="https://${release_name}-backstage-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" + + initiate_deployments + check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" +} diff --git a/.ibm/pipelines/jobs/operator.sh b/.ibm/pipelines/jobs/operator.sh new file mode 100644 index 0000000000..58b70c0442 --- /dev/null +++ b/.ibm/pipelines/jobs/operator.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +handle_operator() { + oc_login + + API_SERVER_URL=$(oc whoami --show-server) + ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) + ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + + apply_yaml_files "${DIR}" "${NAME_SPACE}" + deploy_test_backstage_provider "${NAME_SPACE}" +} diff --git a/.ibm/pipelines/jobs/periodic.sh b/.ibm/pipelines/jobs/periodic.sh new file mode 100644 index 0000000000..921eb7fb20 --- /dev/null +++ b/.ibm/pipelines/jobs/periodic.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +handle_nightly() { + export NAME_SPACE="showcase-ci-nightly" + export NAME_SPACE_RBAC="showcase-rbac-nightly" + export NAME_SPACE_POSTGRES_DB="postgress-external-db-nightly" + export NAME_SPACE_K8S="showcase-k8s-ci-nightly" + export NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly" + + oc_login + + API_SERVER_URL=$(oc whoami --show-server) + ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) + ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + + export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') + + configure_namespace "${NAME_SPACE}" + deploy_test_backstage_provider "${NAME_SPACE}" + local url="https://${RELEASE_NAME}-backstage-${NAME_SPACE}.${K8S_CLUSTER_ROUTER_BASE}" + install_pipelines_operator + sleep 20 # wait for Pipeline Operator/Tekton pipelines to be ready + oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline.yaml" + oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline-run.yaml" + initiate_deployments + check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" + + # Only test TLS config with RDS and Change configuration at runtime in nightly jobs + initiate_rds_deployment "${RELEASE_NAME}" "${NAME_SPACE_RDS}" + check_and_test "${RELEASE_NAME}" "${NAME_SPACE_RDS}" "${url}" + + # Deploy `showcase-runtime` to run tests that require configuration changes at runtime + configure_namespace "${NAME_SPACE_RUNTIME}" + uninstall_helmchart "${NAME_SPACE_RUNTIME}" "${RELEASE_NAME}" + oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE_RUNTIME}" + apply_yaml_files "${DIR}" "${NAME_SPACE_RUNTIME}" + helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_RUNTIME}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" + check_and_test "${RELEASE_NAME}" "${NAME_SPACE_RUNTIME}" "${url}" +} diff --git a/.ibm/pipelines/openshift-ci-tests.sh b/.ibm/pipelines/openshift-ci-tests.sh index a49fb6b4ac..ca3dfcd8e3 100755 --- a/.ibm/pipelines/openshift-ci-tests.sh +++ b/.ibm/pipelines/openshift-ci-tests.sh @@ -4,7 +4,7 @@ set -xe export PS4='[$(date "+%Y-%m-%d %H:%M:%S")] ' # logs timestamp for every cmd. LOGFILE="test-log" -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +export DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" secret_name="rhdh-k8s-plugin-secret" OVERALL_RESULT=0 @@ -23,498 +23,67 @@ cleanup() { trap cleanup EXIT INT ERR -source "${DIR}/utils.sh" -if [[ "$JOB_NAME" == *aks* ]]; then - for file in ${DIR}/cluster/aks/*.sh; do source $file; done -elif [[ "$JOB_NAME" == *gke* ]]; then - for file in ${DIR}/cluster/gke/*.sh; do source $file; done -fi - -set_cluster_info() { - export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_URL) - export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_TOKEN) - - if [[ "$JOB_NAME" == *ocp-v4-16 ]]; then - K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_URL) - K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_TOKEN) - elif [[ "$JOB_NAME" == *ocp-v4-15 ]]; then - K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_URL) - K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_TOKEN) - elif [[ "$JOB_NAME" == *aks* ]]; then - K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_URL) - K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_TOKEN) - fi -} - -set_namespace() { - if [[ "$JOB_NAME" == *periodic-* ]]; then - NAME_SPACE="showcase-ci-nightly" - NAME_SPACE_RBAC="showcase-rbac-nightly" - NAME_SPACE_POSTGRES_DB="postgress-external-db-nightly" - NAME_SPACE_K8S="showcase-k8s-ci-nightly" - NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly" - elif [[ "$JOB_NAME" == *pull-*-main-e2e-tests* ]]; then - # Enable parallel PR testing for main branch by utilizing a pool of namespaces - local namespaces_pool=("pr-1" "pr-2" "pr-3") - local namespace_found=false - # Iterate through namespace pool to find an available set - for ns in "${namespaces_pool[@]}"; do - if ! oc get namespace "showcase-$ns" >/dev/null 2>&1; then - echo "Namespace "showcase-$ns" does not exist, Using NS: showcase-$ns, showcase-rbac-$ns, postgress-external-db-$ns" - NAME_SPACE="showcase-$ns" - NAME_SPACE_RBAC="showcase-rbac-$ns" - NAME_SPACE_POSTGRES_DB="postgress-external-db-$ns" - namespace_found=true - break - fi - done - if ! $namespace_found; then - echo "Error: All namespaces $namespaces_pool already in Use" - exit 1 - fi - fi -} - -add_helm_repos() { - helm version - - local repos=( - "bitnami=https://charts.bitnami.com/bitnami" - "backstage=https://backstage.github.io/charts" - "${HELM_REPO_NAME}=${HELM_REPO_URL}" - ) - - for repo in "${repos[@]}"; do - local key="${repo%%=*}" - local value="${repo##*=}" - - if ! helm repo list | grep -q "^$key"; then - helm repo add "$key" "$value" - else - echo "Repository $key already exists - updating repository instead." - fi - done - - helm repo update -} - -install_oc() { - if command -v oc >/dev/null 2>&1; then - echo "oc is already installed." - else - curl -LO https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz - tar -xf oc.tar.gz - mv oc /usr/local/bin/ - rm oc.tar.gz - echo "oc installed successfully." - fi -} - -install_helm() { - if command -v helm >/dev/null 2>&1; then - echo "Helm is already installed." - else - echo "Installing Helm 3 client" - mkdir ~/tmpbin && cd ~/tmpbin - curl -sL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash -f - export PATH=$(pwd):$PATH - echo "Helm client installed successfully." - fi -} - -uninstall_helmchart() { - local project=$1 - local release=$2 - if helm list -n "${project}" | grep -q "${release}"; then - echo "Chart already exists. Removing it before install." - helm uninstall "${release}" -n "${project}" - fi -} - -configure_namespace() { - local project=$1 - delete_namespace $project - oc create namespace "${project}" - oc config set-context --current --namespace="${project}" -} - -delete_namespace() { - local project=$1 - if oc get namespace "$project" >/dev/null 2>&1; then - echo "Namespace ${project} exists. Attempting to delete..." - - # Remove blocking finalizers - remove_finalizers_from_resources "$project" - - # Attempt to delete the namespace - oc delete namespace "$project" --grace-period=0 --force || true - - # Check if namespace is still stuck in 'Terminating' and force removal if necessary - if oc get namespace "$project" -o jsonpath='{.status.phase}' | grep -q 'Terminating'; then - echo "Namespace ${project} is stuck in Terminating. Forcing deletion..." - force_delete_namespace "$project" - fi - fi -} - -configure_external_postgres_db() { - local project=$1 - oc apply -f "${DIR}/resources/postgres-db/postgres.yaml" --namespace="${NAME_SPACE_POSTGRES_DB}" - sleep 5 - - oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.ca\.crt}' | base64 --decode > postgres-ca - oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.tls\.crt}' | base64 --decode > postgres-tls-crt - oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.tls\.key}' | base64 --decode > postgres-tsl-key - - oc create secret generic postgress-external-db-cluster-cert \ - --from-file=ca.crt=postgres-ca \ - --from-file=tls.crt=postgres-tls-crt \ - --from-file=tls.key=postgres-tsl-key \ - --dry-run=client -o yaml | oc apply -f - --namespace="${project}" - - POSTGRES_PASSWORD=$(oc get secret/postgress-external-db-pguser-janus-idp -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath={.data.password}) - sed -i "s|POSTGRES_PASSWORD:.*|POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" - POSTGRES_HOST=$(echo -n "postgress-external-db-primary.$NAME_SPACE_POSTGRES_DB.svc.cluster.local" | base64 | tr -d '\n') - sed -i "s|POSTGRES_HOST:.*|POSTGRES_HOST: ${POSTGRES_HOST}|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" - oc apply -f "${DIR}/resources/postgres-db/postgres-cred.yaml" --namespace="${project}" -} - -apply_yaml_files() { - local dir=$1 - local project=$2 - echo "Applying YAML files to namespace ${project}" - - oc config set-context --current --namespace="${project}" - - local files=( - "$dir/resources/service_account/service-account-rhdh.yaml" - "$dir/resources/cluster_role_binding/cluster-role-binding-k8s.yaml" - "$dir/resources/cluster_role/cluster-role-k8s.yaml" - "$dir/resources/cluster_role/cluster-role-ocm.yaml" - "$dir/auth/secrets-rhdh-secrets.yaml" - ) - - for file in "${files[@]}"; do - sed -i "s/namespace:.*/namespace: ${project}/g" "$file" - done - - if [[ "$JOB_NAME" == *aks* || "$JOB_NAME" == *gke* || "$JOB_NAME" == *operator* ]]; then - GITHUB_APP_APP_ID=$GITHUB_APP_3_APP_ID - GITHUB_APP_CLIENT_ID=$GITHUB_APP_3_CLIENT_ID - GITHUB_APP_PRIVATE_KEY=$GITHUB_APP_3_PRIVATE_KEY - GITHUB_APP_CLIENT_SECRET=$GITHUB_APP_3_CLIENT_SECRET - elif [[ "$JOB_NAME" == *pull-*-main-e2e-tests* ]]; then - # GITHUB_APP_4 for all pr's on main branch. - GITHUB_APP_APP_ID=$(cat /tmp/secrets/GITHUB_APP_4_APP_ID) - GITHUB_APP_CLIENT_ID=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_ID) - GITHUB_APP_PRIVATE_KEY=$(cat /tmp/secrets/GITHUB_APP_4_PRIVATE_KEY) - GITHUB_APP_CLIENT_SECRET=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_SECRET) - fi - - DH_TARGET_URL=$(echo -n "test-backstage-customization-provider-${project}.${K8S_CLUSTER_ROUTER_BASE}" | base64 -w 0) - - for key in GITHUB_APP_APP_ID GITHUB_APP_CLIENT_ID GITHUB_APP_PRIVATE_KEY GITHUB_APP_CLIENT_SECRET GITHUB_APP_JANUS_TEST_APP_ID GITHUB_APP_JANUS_TEST_CLIENT_ID GITHUB_APP_JANUS_TEST_CLIENT_SECRET GITHUB_APP_JANUS_TEST_PRIVATE_KEY GITHUB_APP_WEBHOOK_URL GITHUB_APP_WEBHOOK_SECRET KEYCLOAK_CLIENT_SECRET ACR_SECRET GOOGLE_CLIENT_ID GOOGLE_CLIENT_SECRET K8S_CLUSTER_TOKEN_ENCODED OCM_CLUSTER_URL GITLAB_TOKEN DH_TARGET_URL; do - sed -i "s|${key}:.*|${key}: ${!key}|g" "$dir/auth/secrets-rhdh-secrets.yaml" - done - - oc apply -f "$dir/resources/service_account/service-account-rhdh.yaml" --namespace="${project}" - oc apply -f "$dir/auth/service-account-rhdh-secret.yaml" --namespace="${project}" - oc apply -f "$dir/auth/secrets-rhdh-secrets.yaml" --namespace="${project}" - if [[ "$JOB_NAME" != *aks* && "$JOB_NAME" != *gke* ]]; then - oc new-app https://github.com/janus-qe/test-backstage-customization-provider --namespace="${project}" - oc expose svc/test-backstage-customization-provider --namespace="${project}" - fi - oc apply -f "$dir/resources/cluster_role/cluster-role-k8s.yaml" --namespace="${project}" - oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-k8s.yaml" --namespace="${project}" - oc apply -f "$dir/resources/cluster_role/cluster-role-ocm.yaml" --namespace="${project}" - oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-ocm.yaml" --namespace="${project}" - - if [[ "$JOB_NAME" != *aks* ]]; then # Skip for AKS, because of strange `sed: -e expression #1, char 136: unterminated `s' command` - sed -i "s/K8S_CLUSTER_API_SERVER_URL:.*/K8S_CLUSTER_API_SERVER_URL: ${ENCODED_API_SERVER_URL}/g" "$dir/auth/secrets-rhdh-secrets.yaml" - fi - sed -i "s/K8S_CLUSTER_NAME:.*/K8S_CLUSTER_NAME: ${ENCODED_CLUSTER_NAME}/g" "$dir/auth/secrets-rhdh-secrets.yaml" - - set +x - token=$(oc get secret "${secret_name}" -n "${project}" -o=jsonpath='{.data.token}') - sed -i "s/OCM_CLUSTER_TOKEN: .*/OCM_CLUSTER_TOKEN: ${token}/" "$dir/auth/secrets-rhdh-secrets.yaml" - set -x - - if [[ "${project}" == *rbac* ]]; then - oc create configmap app-config-rhdh --from-file="app-config-rhdh.yaml"="$dir/resources/config_map/app-config-rhdh-rbac.yaml" --namespace="${project}" --dry-run=client -o yaml | oc apply -f - - elif [[ "$JOB_NAME" == *aks* || "$JOB_NAME" == *gke* ]]; then - yq 'del(.backend.cache)' "$dir/resources/config_map/app-config-rhdh.yaml" \ - | kubectl create configmap app-config-rhdh --from-file="app-config-rhdh.yaml"="/dev/stdin" --namespace="${project}" --dry-run=client -o yaml \ - | kubectl apply -f - - else - oc create configmap app-config-rhdh --from-file="app-config-rhdh.yaml"="$dir/resources/config_map/app-config-rhdh.yaml" --namespace="${project}" --dry-run=client -o yaml | oc apply -f - - fi - oc create configmap rbac-policy --from-file="rbac-policy.csv"="$dir/resources/config_map/rbac-policy.csv" --namespace="${project}" --dry-run=client -o yaml | oc apply -f - - oc apply -f "$dir/auth/secrets-rhdh-secrets.yaml" --namespace="${project}" - - #sleep 20 # wait for Pipeline Operator/Tekton pipelines to be ready - # Renable when namespace termination issue is solved - # oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline.yaml" - # oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline-run.yaml" -} - -run_tests() { - local release_name=$1 - local project=$2 - project=${project%-pr-*} # Remove -pr- suffix if any set for main branchs pr's. - cd "${DIR}/../../e2e-tests" - yarn install - yarn playwright install chromium - - Xvfb :99 & - export DISPLAY=:99 - - ( - set -e - echo "Using PR container image: ${TAG_NAME}" - yarn "$project" - ) 2>&1 | tee "/tmp/${LOGFILE}" - - local RESULT=${PIPESTATUS[0]} +export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_URL) +export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_TOKEN) - pkill Xvfb - - mkdir -p "${ARTIFACT_DIR}/${project}/test-results" - mkdir -p "${ARTIFACT_DIR}/${project}/attachments/screenshots" - cp -a /tmp/backstage-showcase/e2e-tests/test-results/* "${ARTIFACT_DIR}/${project}/test-results" - cp -a /tmp/backstage-showcase/e2e-tests/${JUNIT_RESULTS} "${ARTIFACT_DIR}/${project}/${JUNIT_RESULTS}" - - if [ -d "/tmp/backstage-showcase/e2e-tests/screenshots" ]; then - cp -a /tmp/backstage-showcase/e2e-tests/screenshots/* "${ARTIFACT_DIR}/${project}/attachments/screenshots/" - fi - - if [ -d "/tmp/backstage-showcase/e2e-tests/auth-providers-logs" ]; then - cp -a /tmp/backstage-showcase/e2e-tests/auth-providers-logs/* "${ARTIFACT_DIR}/${project}/" - fi - - ansi2html <"/tmp/${LOGFILE}" >"/tmp/${LOGFILE}.html" - cp -a "/tmp/${LOGFILE}.html" "${ARTIFACT_DIR}/${project}" - cp -a /tmp/backstage-showcase/e2e-tests/playwright-report/* "${ARTIFACT_DIR}/${project}" - - droute_send "${release_name}" "${project}" - - echo "${project} RESULT: ${RESULT}" - if [ "${RESULT}" -ne 0 ]; then - OVERALL_RESULT=1 - fi -} - -check_backstage_running() { - local release_name=$1 - local namespace=$2 - if [[ "$JOB_NAME" == *aks* || "$JOB_NAME" == *gke* ]]; then - local url="https://${K8S_CLUSTER_ROUTER_BASE}" - else - local url="https://${release_name}-backstage-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" - fi - - local max_attempts=30 - local wait_seconds=30 - - echo "Checking if Backstage is up and running at ${url}" - - for ((i = 1; i <= max_attempts; i++)); do - local http_status - http_status=$(curl --insecure -I -s -o /dev/null -w "%{http_code}" "${url}") - - if [ "${http_status}" -eq 200 ]; then - echo "Backstage is up and running!" - export BASE_URL="${url}" - echo "######## BASE URL ########" - echo "${BASE_URL}" - return 0 - else - echo "Attempt ${i} of ${max_attempts}: Backstage not yet available (HTTP Status: ${http_status})" - sleep "${wait_seconds}" - fi - done - - echo "Failed to reach Backstage at ${BASE_URL} after ${max_attempts} attempts." | tee -a "/tmp/${LOGFILE}" - cp -a "/tmp/${LOGFILE}" "${ARTIFACT_DIR}/${namespace}/" - return 1 -} - -install_tekton_pipelines() { - local dir=$1 - - if oc get pods -n "tekton-pipelines" | grep -q "tekton-pipelines"; then - echo "Tekton Pipelines are already installed." - else - echo "Tekton Pipelines is not installed. Installing..." - oc apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml - fi -} - -install_pipelines_operator() { - local dir=$1 - DISPLAY_NAME="Red Hat OpenShift Pipelines" - - if oc get csv -n "openshift-operators" | grep -q "${DISPLAY_NAME}"; then - echo "Red Hat OpenShift Pipelines operator is already installed." - else - echo "Red Hat OpenShift Pipelines operator is not installed. Installing..." - oc apply -f "${dir}/resources/pipeline-run/pipelines-operator.yaml" - fi -} - -initiate_deployments() { - - #install_pipelines_operator - install_crunchy_postgres_operator - install_helm - add_helm_repos - - configure_namespace "${NAME_SPACE}" - uninstall_helmchart "${NAME_SPACE}" "${RELEASE_NAME}" - - # Deploy redis cache db. - oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE}" - - cd "${DIR}" - apply_yaml_files "${DIR}" "${NAME_SPACE}" - echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE}" - helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" - - configure_namespace "${NAME_SPACE_POSTGRES_DB}" - configure_namespace "${NAME_SPACE_RBAC}" - configure_external_postgres_db "${NAME_SPACE_RBAC}" - - uninstall_helmchart "${NAME_SPACE_RBAC}" "${RELEASE_NAME_RBAC}" - apply_yaml_files "${DIR}" "${NAME_SPACE_RBAC}" - echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${RELEASE_NAME_RBAC}" - helm upgrade -i "${RELEASE_NAME_RBAC}" -n "${NAME_SPACE_RBAC}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_RBAC_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" -} - -initiate_rds_deployment() { - local release_name=$1 - local namespace=$2 - configure_namespace "${namespace}" - uninstall_helmchart "${namespace}" "${release_name}" - sed -i "s|POSTGRES_USER:.*|POSTGRES_USER: $RDS_USER|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" - sed -i "s|POSTGRES_PASSWORD:.*|POSTGRES_PASSWORD: $(echo -n $RDS_PASSWORD | base64 -w 0)|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" - sed -i "s|POSTGRES_HOST:.*|POSTGRES_HOST: $(echo -n $RDS_1_HOST | base64 -w 0)|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" - oc apply -f "$DIR/resources/postgres-db/postgres-crt-rds.yaml" -n "${namespace}" - oc apply -f "$DIR/resources/postgres-db/postgres-cred.yaml" -n "${namespace}" - oc apply -f "$DIR/resources/postgres-db/dynamic-plugins-root-PVC.yaml" -n "${namespace}" - helm upgrade -i "${release_name}" -n "${namespace}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "$DIR/resources/postgres-db/values-showcase-postgres.yaml" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" -} - -check_and_test() { - local release_name=$1 - local namespace=$2 - if check_backstage_running "${release_name}" "${namespace}"; then - echo "Display pods for verification..." - oc get pods -n "${namespace}" - run_tests "${release_name}" "${namespace}" - else - echo "Backstage is not running. Exiting..." - OVERALL_RESULT=1 - fi - save_all_pod_logs $namespace -} - -# Function to remove finalizers from specific resources in a namespace that are blocking deletion. -remove_finalizers_from_resources() { - local project=$1 - echo "Removing finalizers from resources in namespace ${project} that are blocking deletion." - - # Remove finalizers from stuck PipelineRuns and TaskRuns - for resource_type in "pipelineruns.tekton.dev" "taskruns.tekton.dev"; do - for resource in $(oc get "$resource_type" -n "$project" -o name); do - oc patch "$resource" -n "$project" --type='merge' -p '{"metadata":{"finalizers":[]}}' || true - echo "Removed finalizers from $resource in $project." - done - done - - # Check and remove specific finalizers stuck on 'chains.tekton.dev' resources - for chain_resource in $(oc get pipelineruns.tekton.dev,taskruns.tekton.dev -n "$project" -o name); do - oc patch "$chain_resource" -n "$project" --type='json' -p='[{"op": "remove", "path": "/metadata/finalizers"}]' || true - echo "Removed Tekton finalizers from $chain_resource in $project." - done -} - -# Function to forcibly delete a namespace stuck in 'Terminating' status -force_delete_namespace() { - local project=$1 - echo "Forcefully deleting namespace ${project}." - oc get namespace "$project" -o json | jq '.spec = {"finalizers":[]}' | oc replace --raw "/api/v1/namespaces/$project/finalize" -f - -} +source "${DIR}/env_variables.sh" +echo "Loaded env_variables.sh" +source "${DIR}/utils.sh" +echo "Loaded utils.sh" +source "${DIR}/jobs/aks.sh" +echo "Loaded aks.sh" +source "${DIR}/jobs/gke.sh" +echo "Loaded gke.sh" +source "${DIR}/jobs/main.sh" +echo "Loaded main.sh" +source "${DIR}/jobs/ocp-v4-15.sh" +echo "Loaded ocp-v4-15.sh" +source "${DIR}/jobs/ocp-v4-16.sh" +echo "Loaded ocp-v4-16.sh" +source "${DIR}/jobs/operator.sh" +echo "Loaded operator.sh" +source "${DIR}/jobs/periodic.sh" +echo "Loaded periodic.sh" main() { echo "Log file: ${LOGFILE}" - set_cluster_info - source "${DIR}/env_variables.sh" - - install_oc - if [[ "$JOB_NAME" == *aks* ]]; then - az_login - az_aks_start "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" - az_aks_approuting_enable "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" - az_aks_get_credentials "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" - elif [[ "$JOB_NAME" == *gke* ]]; then - gcloud_auth "${GKE_SERVICE_ACCOUNT_NAME}" "/tmp/secrets/GKE_SERVICE_ACCOUNT_KEY" - gcloud_gke_get_credentials "${GKE_CLUSTER_NAME}" "${GKE_CLUSTER_REGION}" "${GOOGLE_CLOUD_PROJECT}" - else - oc login --token="${K8S_CLUSTER_TOKEN}" --server="${K8S_CLUSTER_URL}" - fi - echo "OCP version: $(oc version)" - - set_namespace - - API_SERVER_URL=$(oc whoami --show-server) - if [[ "$JOB_NAME" == *aks* ]]; then - export K8S_CLUSTER_ROUTER_BASE=$AKS_INSTANCE_DOMAIN_NAME - elif [[ "$JOB_NAME" == *gke* ]]; then - export K8S_CLUSTER_ROUTER_BASE=$GKE_INSTANCE_DOMAIN_NAME - else - export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') - fi - - echo "K8S_CLUSTER_ROUTER_BASE : $K8S_CLUSTER_ROUTER_BASE" - - ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) - ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) - - if [[ "$JOB_NAME" == *aks* ]]; then - initiate_aks_deployment - check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" - delete_namespace "${NAME_SPACE_K8S}" - initiate_rbac_aks_deployment - check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_K8S}" - delete_namespace "${NAME_SPACE_RBAC_K8S}" - elif [[ "$JOB_NAME" == *gke* ]]; then - initiate_gke_deployment - check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" - delete_namespace "${NAME_SPACE_K8S}" - initiate_rbac_gke_deployment - check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_K8S}" - delete_namespace "${NAME_SPACE_RBAC_K8S}" - elif [[ "$JOB_NAME" == *auth-providers* ]]; then - run_tests "${AUTH_PROVIDERS_RELEASE}" "${AUTH_PROVIDERS_NAMESPACE}" - else - initiate_deployments - check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" - check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" - # Only test TLS config with RDS and Change configuration at runtime in nightly jobs - if [[ "$JOB_NAME" == *periodic* ]]; then - initiate_rds_deployment "${RELEASE_NAME}" "${NAME_SPACE_RDS}" - check_and_test "${RELEASE_NAME}" "${NAME_SPACE_RDS}" - - # Deploy `showcase-runtime` to run tests that require configuration changes at runtime - configure_namespace "${NAME_SPACE_RUNTIME}" - uninstall_helmchart "${NAME_SPACE_RUNTIME}" "${RELEASE_NAME}" - oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE_RUNTIME}" - apply_yaml_files "${DIR}" "${NAME_SPACE_RUNTIME}" - helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_RUNTIME}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" - check_and_test "${RELEASE_NAME}" "${NAME_SPACE_RUNTIME}" - fi - fi + echo "JOB_NAME : $JOB_NAME" + + case "$JOB_NAME" in + *aks*) + echo "Calling handle_aks" + handle_aks + ;; + *gke*) + echo "Calling handle_gke" + handle_gke + ;; + *periodic*) + echo "Calling handle_periodic" + handle_nightly + ;; + *pull-*-main-e2e-tests*) + echo "Calling handle_main" + handle_main + ;; + *ocp-v4-16*) + echo "Calling handle_ocp_v4_16" + handle_ocp_v4_16 + ;; + *ocp-v4-15*) + echo "Calling handle_ocp_v4_15" + handle_ocp_v4_15 + ;; + *operator*) + echo "Calling Operator" + handle_operator + ;; + esac + +echo "K8S_CLUSTER_ROUTER_BASE : $K8S_CLUSTER_ROUTER_BASE" +echo "Main script completed with result: ${OVERALL_RESULT}" +exit "${OVERALL_RESULT}" - exit "${OVERALL_RESULT}" } main diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index 17a235bc55..c8fe52b958 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -1,5 +1,7 @@ #!/bin/sh +set -x + retrieve_pod_logs() { local pod_name=$1; local container=$2; local namespace=$3 echo " Retrieving logs for container: $container" @@ -24,7 +26,7 @@ save_all_pod_logs(){ for init_container in $init_containers; do retrieve_pod_logs $pod_name $init_container $namespace done - + containers=$(kubectl get pod $pod_name -n $namespace -o jsonpath='{.spec.containers[*].name}') for container in $containers; do retrieve_pod_logs $pod_name $container $namespace @@ -48,7 +50,7 @@ droute_send() { local project=$2 local droute_project="droute" METEDATA_OUTPUT="data_router_metadata_output.json" - + oc login --token="${RHDH_PR_OS_CLUSTER_TOKEN}" --server="${RHDH_PR_OS_CLUSTER_URL}" oc whoami --show-server local droute_pod_name=$(oc get pods -n droute --no-headers -o custom-columns=":metadata.name" | grep ubi9-cert-rsync) @@ -272,16 +274,424 @@ install_crunchy_postgres_operator(){ install_subscription crunchy-postgres-operator openshift-operators crunchy-postgres-operator v5 certified-operators } -# Installs the Red Hat OpenShift Pipelines operator if not already installed +add_helm_repos() { + helm version + + local repos=( + "bitnami=https://charts.bitnami.com/bitnami" + "backstage=https://backstage.github.io/charts" + "${HELM_REPO_NAME}=${HELM_REPO_URL}" + ) + + for repo in "${repos[@]}"; do + local key="${repo%%=*}" + local value="${repo##*=}" + + if ! helm repo list | grep -q "^$key"; then + helm repo add "$key" "$value" + else + echo "Repository $key already exists - updating repository instead." + fi + done + + helm repo update +} + +uninstall_helmchart() { + local project=$1 + local release=$2 + if helm list -n "${project}" | grep -q "${release}"; then + echo "Chart already exists. Removing it before install." + helm uninstall "${release}" -n "${project}" + fi +} + +configure_namespace() { + local project=$1 + echo "Deleting and recreating namespace: $project" + delete_namespace $project + + if ! oc create namespace "${project}"; then + echo "Error: Failed to create namespace ${project}" >&2 + exit 1 + fi + if ! oc config set-context --current --namespace="${project}"; then + echo "Error: Failed to set context for namespace ${project}" >&2 + exit 1 + fi + + echo "Namespace ${project} is ready." +} + +delete_namespace() { + local project=$1 + if oc get namespace "$project" >/dev/null 2>&1; then + echo "Namespace ${project} exists. Attempting to delete..." + + # Remove blocking finalizers + remove_finalizers_from_resources "$project" + + # Attempt to delete the namespace + oc delete namespace "$project" --grace-period=0 --force || true + + # Check if namespace is still stuck in 'Terminating' and force removal if necessary + if oc get namespace "$project" -o jsonpath='{.status.phase}' | grep -q 'Terminating'; then + echo "Namespace ${project} is stuck in Terminating. Forcing deletion..." + force_delete_namespace "$project" + fi + fi +} + +configure_external_postgres_db() { + local project=$1 + oc apply -f "${DIR}/resources/postgres-db/postgres.yaml" --namespace="${NAME_SPACE_POSTGRES_DB}" + sleep 5 + + oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.ca\.crt}' | base64 --decode > postgres-ca + oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.tls\.crt}' | base64 --decode > postgres-tls-crt + oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.tls\.key}' | base64 --decode > postgres-tsl-key + + oc create secret generic postgress-external-db-cluster-cert \ + --from-file=ca.crt=postgres-ca \ + --from-file=tls.crt=postgres-tls-crt \ + --from-file=tls.key=postgres-tsl-key \ + --dry-run=client -o yaml | oc apply -f - --namespace="${project}" + + POSTGRES_PASSWORD=$(oc get secret/postgress-external-db-pguser-janus-idp -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath={.data.password}) + sed -i "s|POSTGRES_PASSWORD:.*|POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" + POSTGRES_HOST=$(echo -n "postgress-external-db-primary.$NAME_SPACE_POSTGRES_DB.svc.cluster.local" | base64 | tr -d '\n') + sed -i "s|POSTGRES_HOST:.*|POSTGRES_HOST: ${POSTGRES_HOST}|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" + oc apply -f "${DIR}/resources/postgres-db/postgres-cred.yaml" --namespace="${project}" +} + +set_github_app_3_credentials() { + GITHUB_APP_APP_ID=$GITHUB_APP_3_APP_ID + GITHUB_APP_CLIENT_ID=$GITHUB_APP_3_CLIENT_ID + GITHUB_APP_PRIVATE_KEY=$GITHUB_APP_3_PRIVATE_KEY + GITHUB_APP_CLIENT_SECRET=$GITHUB_APP_3_CLIENT_SECRET + + export GITHUB_APP_APP_ID + export GITHUB_APP_CLIENT_ID + export GITHUB_APP_PRIVATE_KEY + export GITHUB_APP_CLIENT_SECRET + echo "GitHub App 3 credentials set for current job." +} + +set_github_app_4_credentials() { + GITHUB_APP_APP_ID=$(cat /tmp/secrets/GITHUB_APP_4_APP_ID) + GITHUB_APP_CLIENT_ID=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_ID) + GITHUB_APP_PRIVATE_KEY=$(cat /tmp/secrets/GITHUB_APP_4_PRIVATE_KEY) + GITHUB_APP_CLIENT_SECRET=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_SECRET) + + export GITHUB_APP_APP_ID + export GITHUB_APP_CLIENT_ID + export GITHUB_APP_PRIVATE_KEY + export GITHUB_APP_CLIENT_SECRET + echo "GitHub App 4 credentials set for current job." +} + +apply_yaml_files() { + local dir=$1 + local project=$2 + echo "Applying YAML files to namespace ${project}" + + oc config set-context --current --namespace="${project}" + + local files=( + "$dir/resources/service_account/service-account-rhdh.yaml" + "$dir/resources/cluster_role_binding/cluster-role-binding-k8s.yaml" + "$dir/resources/cluster_role/cluster-role-k8s.yaml" + "$dir/resources/cluster_role/cluster-role-ocm.yaml" + "$dir/auth/secrets-rhdh-secrets.yaml" + ) + + for file in "${files[@]}"; do + sed -i "s/namespace:.*/namespace: ${project}/g" "$file" + done + + DH_TARGET_URL=$(echo -n "test-backstage-customization-provider-${project}.${K8S_CLUSTER_ROUTER_BASE}" | base64 -w 0) + + for key in GITHUB_APP_APP_ID GITHUB_APP_CLIENT_ID GITHUB_APP_PRIVATE_KEY GITHUB_APP_CLIENT_SECRET GITHUB_APP_JANUS_TEST_APP_ID GITHUB_APP_JANUS_TEST_CLIENT_ID GITHUB_APP_JANUS_TEST_CLIENT_SECRET GITHUB_APP_JANUS_TEST_PRIVATE_KEY GITHUB_APP_WEBHOOK_URL GITHUB_APP_WEBHOOK_SECRET KEYCLOAK_CLIENT_SECRET ACR_SECRET GOOGLE_CLIENT_ID GOOGLE_CLIENT_SECRET K8S_CLUSTER_TOKEN_ENCODED OCM_CLUSTER_URL GITLAB_TOKEN DH_TARGET_URL; do + sed -i "s|${key}:.*|${key}: ${!key}|g" "$dir/auth/secrets-rhdh-secrets.yaml" + done + + oc apply -f "$dir/resources/service_account/service-account-rhdh.yaml" --namespace="${project}" + oc apply -f "$dir/auth/service-account-rhdh-secret.yaml" --namespace="${project}" + oc apply -f "$dir/auth/secrets-rhdh-secrets.yaml" --namespace="${project}" + + oc apply -f "$dir/resources/cluster_role/cluster-role-k8s.yaml" --namespace="${project}" + oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-k8s.yaml" --namespace="${project}" + oc apply -f "$dir/resources/cluster_role/cluster-role-ocm.yaml" --namespace="${project}" + oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-ocm.yaml" --namespace="${project}" + + escaped_url=$(printf '%s\n' "${ENCODED_API_SERVER_URL}" | sed 's/[\/&]/\\&/g') + sed -i "s/K8S_CLUSTER_API_SERVER_URL:.*/K8S_CLUSTER_API_SERVER_URL: ${escaped_url}/g" "$dir/auth/secrets-rhdh-secrets.yaml" \ + && echo "Updated K8S_CLUSTER_API_SERVER_URL in secrets file." \ + || echo "Failed to update K8S_CLUSTER_API_SERVER_URL." >&2 + + sed -i "s/K8S_CLUSTER_NAME:.*/K8S_CLUSTER_NAME: ${ENCODED_CLUSTER_NAME}/g" "$dir/auth/secrets-rhdh-secrets.yaml" + + token=$(oc get secret "${secret_name}" -n "${project}" -o=jsonpath='{.data.token}') + sed -i "s/OCM_CLUSTER_TOKEN: .*/OCM_CLUSTER_TOKEN: ${token}/" "$dir/auth/secrets-rhdh-secrets.yaml" + + # Select the configuration file based on the namespace or job + config_file=$(select_config_map_file) + # Apply the ConfigMap with the correct file + if [[ "${project}" == *showcase-k8s* ]]; then + create_app_config_map_k8s "$config_file" "$project" + else + create_app_config_map "$config_file" "$project" + fi + oc create configmap rbac-policy \ + --from-file="rbac-policy.csv"="$dir/resources/config_map/rbac-policy.csv" \ + --namespace="$project" \ + --dry-run=client -o yaml | oc apply -f - + + oc apply -f "$dir/auth/secrets-rhdh-secrets.yaml" --namespace="${project}" + +} + +deploy_test_backstage_provider() { + local project=$1 + echo "Deploying test-backstage-customization-provider in namespace ${project}" + + # Check if the buildconfig already exists + if ! oc get buildconfig test-backstage-customization-provider -n "${project}" >/dev/null 2>&1; then + echo "Creating new app for test-backstage-customization-provider" + oc new-app https://github.com/janus-qe/test-backstage-customization-provider --namespace="${project}" + else + echo "BuildConfig for test-backstage-customization-provider already exists in ${project}. Skipping new-app creation." + fi + + # Ensure the service exists + if ! oc get service test-backstage-customization-provider -n "${project}" >/dev/null 2>&1; then + echo "Exposing service for test-backstage-customization-provider" + oc expose svc/test-backstage-customization-provider --namespace="${project}" + else + echo "Service test-backstage-customization-provider is already exposed in ${project}." + fi +} + +create_app_config_map() { + local config_file=$1 + local project=$2 + + oc create configmap app-config-rhdh \ + --from-file="app-config-rhdh.yaml"="$config_file" \ + --namespace="$project" \ + --dry-run=client -o yaml | oc apply -f - +} + +select_config_map_file() { + if [[ "${project}" == *rbac* ]]; then + echo "$dir/resources/config_map/app-config-rhdh-rbac.yaml" + else + echo "$dir/resources/config_map/app-config-rhdh.yaml" + fi +} + +create_app_config_map_k8s() { + local config_file=$1 + local project=$2 + + echo "Creating app-config ConfigMap for AKS/GKE in namespace ${project}" + + yq 'del(.backend.cache)' "$config_file" \ + | oc create configmap app-config-rhdh \ + --from-file="app-config-rhdh.yaml"="/dev/stdin" \ + --namespace="${project}" \ + --dry-run=client -o yaml \ + | oc apply -f - +} + +run_tests() { + local release_name=$1 + local project=$2 + project=${project%-pr-*} # Remove -pr- suffix if any set for main branchs pr's. + cd "${DIR}/../../e2e-tests" + yarn install + yarn playwright install chromium + + Xvfb :99 & + export DISPLAY=:99 + + ( + set -e + echo "Using PR container image: ${TAG_NAME}" + yarn "$project" + ) 2>&1 | tee "/tmp/${LOGFILE}" + + local RESULT=${PIPESTATUS[0]} + + pkill Xvfb + + mkdir -p "${ARTIFACT_DIR}/${project}/test-results" + mkdir -p "${ARTIFACT_DIR}/${project}/attachments/screenshots" + cp -a /tmp/backstage-showcase/e2e-tests/test-results/* "${ARTIFACT_DIR}/${project}/test-results" + cp -a /tmp/backstage-showcase/e2e-tests/${JUNIT_RESULTS} "${ARTIFACT_DIR}/${project}/${JUNIT_RESULTS}" + + if [ -d "/tmp/backstage-showcase/e2e-tests/screenshots" ]; then + cp -a /tmp/backstage-showcase/e2e-tests/screenshots/* "${ARTIFACT_DIR}/${project}/attachments/screenshots/" + fi + + if [ -d "/tmp/backstage-showcase/e2e-tests/auth-providers-logs" ]; then + cp -a /tmp/backstage-showcase/e2e-tests/auth-providers-logs/* "${ARTIFACT_DIR}/${project}/" + fi + + ansi2html <"/tmp/${LOGFILE}" >"/tmp/${LOGFILE}.html" + cp -a "/tmp/${LOGFILE}.html" "${ARTIFACT_DIR}/${project}" + cp -a /tmp/backstage-showcase/e2e-tests/playwright-report/* "${ARTIFACT_DIR}/${project}" + + droute_send "${release_name}" "${project}" + + echo "${project} RESULT: ${RESULT}" + if [ "${RESULT}" -ne 0 ]; then + OVERALL_RESULT=1 + fi +} + +check_backstage_running() { + local release_name=$1 + local namespace=$2 + local url=$3 + + local max_attempts=30 + local wait_seconds=30 + + echo "Checking if Backstage is up and running at ${url}" + + for ((i = 1; i <= max_attempts; i++)); do + local http_status + http_status=$(curl --insecure -I -s -o /dev/null -w "%{http_code}" "${url}") + + if [ "${http_status}" -eq 200 ]; then + echo "Backstage is up and running!" + export BASE_URL="${url}" + echo "######## BASE URL ########" + echo "${BASE_URL}" + return 0 + else + echo "Attempt ${i} of ${max_attempts}: Backstage not yet available (HTTP Status: ${http_status})" + sleep "${wait_seconds}" + fi + done + + echo "Failed to reach Backstage at ${BASE_URL} after ${max_attempts} attempts." | tee -a "/tmp/${LOGFILE}" + cp -a "/tmp/${LOGFILE}" "${ARTIFACT_DIR}/${namespace}/" + return 1 +} + +install_tekton_pipelines() { + local dir=$1 + + if oc get pods -n "tekton-pipelines" | grep -q "tekton-pipelines"; then + echo "Tekton Pipelines are already installed." + else + echo "Tekton Pipelines is not installed. Installing..." + oc apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml + fi +} + install_pipelines_operator() { + local dir=$1 DISPLAY_NAME="Red Hat OpenShift Pipelines" - # Check if operator is already installed + if oc get csv -n "openshift-operators" | grep -q "${DISPLAY_NAME}"; then echo "Red Hat OpenShift Pipelines operator is already installed." else echo "Red Hat OpenShift Pipelines operator is not installed. Installing..." - # Install the operator and wait for deployment - install_subscription openshift-pipelines-operator openshift-operators openshift-pipelines-operator-rh latest redhat-operators - wait_for_deployment "openshift-operators" "pipelines" + oc apply -f "${dir}/resources/pipeline-run/pipelines-operator.yaml" + fi +} + +initiate_deployments() { + + install_crunchy_postgres_operator + add_helm_repos + + uninstall_helmchart "${NAME_SPACE}" "${RELEASE_NAME}" + + # Deploy redis cache db. + oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE}" + + cd "${DIR}" + apply_yaml_files "${DIR}" "${NAME_SPACE}" + echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE}" + helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" + + configure_namespace "${NAME_SPACE_POSTGRES_DB}" + configure_namespace "${NAME_SPACE_RBAC}" + configure_external_postgres_db "${NAME_SPACE_RBAC}" + + uninstall_helmchart "${NAME_SPACE_RBAC}" "${RELEASE_NAME_RBAC}" + apply_yaml_files "${DIR}" "${NAME_SPACE_RBAC}" + echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${RELEASE_NAME_RBAC}" + helm upgrade -i "${RELEASE_NAME_RBAC}" -n "${NAME_SPACE_RBAC}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_RBAC_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" +} + +initiate_rds_deployment() { + local release_name=$1 + local namespace=$2 + configure_namespace "${namespace}" + uninstall_helmchart "${namespace}" "${release_name}" + sed -i "s|POSTGRES_USER:.*|POSTGRES_USER: $RDS_USER|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" + sed -i "s|POSTGRES_PASSWORD:.*|POSTGRES_PASSWORD: $(echo -n $RDS_PASSWORD | base64 -w 0)|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" + sed -i "s|POSTGRES_HOST:.*|POSTGRES_HOST: $(echo -n $RDS_1_HOST | base64 -w 0)|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" + oc apply -f "$DIR/resources/postgres-db/postgres-crt-rds.yaml" -n "${namespace}" + oc apply -f "$DIR/resources/postgres-db/postgres-cred.yaml" -n "${namespace}" + oc apply -f "$DIR/resources/postgres-db/dynamic-plugins-root-PVC.yaml" -n "${namespace}" + helm upgrade -i "${release_name}" -n "${namespace}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "$DIR/resources/postgres-db/values-showcase-postgres.yaml" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" +} + +check_and_test() { + local release_name=$1 + local namespace=$2 + local url=$3 + if check_backstage_running "${release_name}" "${namespace}" "${url}"; then + echo "Display pods for verification..." + oc get pods -n "${namespace}" + run_tests "${release_name}" "${namespace}" + else + echo "Backstage is not running. Exiting..." + OVERALL_RESULT=1 fi + save_all_pod_logs $namespace } + +# Function to remove finalizers from specific resources in a namespace that are blocking deletion. +remove_finalizers_from_resources() { + local project=$1 + echo "Removing finalizers from resources in namespace ${project} that are blocking deletion." + + # Remove finalizers from stuck PipelineRuns and TaskRuns + for resource_type in "pipelineruns.tekton.dev" "taskruns.tekton.dev"; do + for resource in $(oc get "$resource_type" -n "$project" -o name); do + oc patch "$resource" -n "$project" --type='merge' -p '{"metadata":{"finalizers":[]}}' || true + echo "Removed finalizers from $resource in $project." + done + done + + # Check and remove specific finalizers stuck on 'chains.tekton.dev' resources + for chain_resource in $(oc get pipelineruns.tekton.dev,taskruns.tekton.dev -n "$project" -o name); do + oc patch "$chain_resource" -n "$project" --type='json' -p='[{"op": "remove", "path": "/metadata/finalizers"}]' || true + echo "Removed Tekton finalizers from $chain_resource in $project." + done +} + +# Function to forcibly delete a namespace stuck in 'Terminating' status +force_delete_namespace() { + local project=$1 + echo "Forcefully deleting namespace ${project}." + oc get namespace "$project" -o json | jq '.spec = {"finalizers":[]}' | oc replace --raw "/api/v1/namespaces/$project/finalize" -f - +} + +oc_login() { + export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_URL) + export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_TOKEN) + + oc login --token="${K8S_CLUSTER_TOKEN}" --server="${K8S_CLUSTER_URL}" + echo "OCP version: $(oc version)" + export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') +} + + diff --git a/e2e-tests/playwright.config.ts b/e2e-tests/playwright.config.ts index 6c880fa068..d60763886f 100644 --- a/e2e-tests/playwright.config.ts +++ b/e2e-tests/playwright.config.ts @@ -53,6 +53,7 @@ export default defineConfig({ "**/playwright/e2e/plugins/bulk-import.spec.ts", "**/playwright/e2e/verify-tls-config-health-check.spec.ts", "**/playwright/e2e/configuration-test/config-map.spec.ts", + "**/playwright/e2e/plugins/tekton/tekton.spec.ts", ], }, { diff --git a/e2e-tests/playwright/e2e/plugins/topology/topology.spec.ts b/e2e-tests/playwright/e2e/plugins/topology/topology.spec.ts index 845ef4b877..b29ffd22ff 100644 --- a/e2e-tests/playwright/e2e/plugins/topology/topology.spec.ts +++ b/e2e-tests/playwright/e2e/plugins/topology/topology.spec.ts @@ -3,7 +3,8 @@ import { Common } from "../../../utils/common"; import { UIhelper } from "../../../utils/ui-helper"; import { Catalog } from "../../../support/pages/catalog"; -test.describe("Test Topology Plugin", () => { +// Test disabled due to comments in JIRA ticket RHIDP-3437 +test.describe.skip("Test Topology Plugin", () => { let common: Common; let uiHelper: UIhelper; let catalog: Catalog;