diff --git a/.ibm/pipelines/auth/secrets-rhdh-secrets.yaml b/.ibm/pipelines/auth/secrets-rhdh-secrets.yaml index 51d9e76640..e434164ebe 100644 --- a/.ibm/pipelines/auth/secrets-rhdh-secrets.yaml +++ b/.ibm/pipelines/auth/secrets-rhdh-secrets.yaml @@ -32,4 +32,10 @@ data: DH_TARGET_URL: dGVzdC1iYWNrc3RhZ2UtY3VzdG9taXphdGlvbi1wcm92aWRlci1zaG93Y2FzZS1jaS5yaGRoLXByLW9zLWE5ODA1NjUwODMwYjIyYzNhZWUyNDNlNTFkNzk1NjVkLTAwMDAudXMtZWFzdC5jb250YWluZXJzLmFwcGRvbWFpbi5jbG91ZA== GOOGLE_CLIENT_ID: dGVtcA== GOOGLE_CLIENT_SECRET: dGVtcA== + RHDH_BASE_URL: dGVtcA== + KEYCLOAK_AUTH_BASE_URL: dGVtcA== + KEYCLOAK_AUTH_CLIENTID: dGVtcA== + KEYCLOAK_AUTH_CLIENT_SECRET: dGVtcA== + KEYCLOAK_AUTH_LOGIN_REALM: dGVtcA== + KEYCLOAK_AUTH_REALM: dGVtcA== type: Opaque diff --git a/.ibm/pipelines/cluster/operators/acm/multiclusterhub.yaml b/.ibm/pipelines/cluster/operators/acm/multiclusterhub.yaml new file mode 100644 index 0000000000..fb1c6f0cf1 --- /dev/null +++ b/.ibm/pipelines/cluster/operators/acm/multiclusterhub.yaml @@ -0,0 +1,6 @@ +apiVersion: operator.open-cluster-management.io/v1 +kind: MultiClusterHub +metadata: + name: multiclusterhub + namespace: open-cluster-management +spec: {} diff --git a/.ibm/pipelines/cluster/operators/acm/operator-group.yaml b/.ibm/pipelines/cluster/operators/acm/operator-group.yaml new file mode 100644 index 0000000000..751af1094d --- /dev/null +++ b/.ibm/pipelines/cluster/operators/acm/operator-group.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: open-cluster-management +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: open-cluster-management + namespace: open-cluster-management +spec: + targetNamespaces: + - open-cluster-management diff --git a/.ibm/pipelines/cluster/operators/acm/subscription-acm.yaml b/.ibm/pipelines/cluster/operators/acm/subscription-acm.yaml new file mode 100644 index 0000000000..d2322a6b52 --- /dev/null +++ b/.ibm/pipelines/cluster/operators/acm/subscription-acm.yaml @@ -0,0 +1,11 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: advanced-cluster-management + namespace: open-cluster-management +spec: + channel: release-2.12 + installPlanApproval: Automatic + name: advanced-cluster-management + source: redhat-operators + sourceNamespace: openshift-marketplace diff --git a/.ibm/pipelines/env_variables.sh b/.ibm/pipelines/env_variables.sh index 924321e036..6815aa1a16 100755 --- a/.ibm/pipelines/env_variables.sh +++ b/.ibm/pipelines/env_variables.sh @@ -57,6 +57,7 @@ GITLAB_TOKEN=$(cat /tmp/secrets/GITLAB_TOKEN) RHDH_PR_OS_CLUSTER_URL=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_URL) RHDH_PR_OS_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_TOKEN) +ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) K8S_CLUSTER_API_SERVER_URL=$(printf "%s" "$K8S_CLUSTER_URL" | base64 | tr -d '\n') K8S_SERVICE_ACCOUNT_TOKEN=$K8S_CLUSTER_TOKEN_ENCODED OCM_CLUSTER_URL=$(printf "%s" "$K8S_CLUSTER_URL" | base64 | tr -d '\n') @@ -136,4 +137,10 @@ AUTH_PROVIDERS_NAMESPACE="showcase-auth-providers" STATIC_API_TOKEN="somecicdtoken" AUTH_PROVIDERS_CHART="rhdh-chart/backstage" +KEYCLOAK_AUTH_BASE_URL=$(cat /tmp/secrets/KEYCLOAK_AUTH_BASE_URL) +KEYCLOAK_AUTH_CLIENTID=$(cat /tmp/secrets/KEYCLOAK_AUTH_CLIENTID) +KEYCLOAK_AUTH_CLIENT_SECRET=$(cat /tmp/secrets/KEYCLOAK_AUTH_CLIENT_SECRET) +KEYCLOAK_AUTH_LOGIN_REALM=$(cat /tmp/secrets/KEYCLOAK_AUTH_LOGIN_REALM) +KEYCLOAK_AUTH_REALM=$(cat /tmp/secrets/KEYCLOAK_AUTH_REALM) + set +a # Stop automatically exporting variables diff --git a/.ibm/pipelines/jobs/main.sh b/.ibm/pipelines/jobs/main.sh index 027252e91d..0c21c52f73 100644 --- a/.ibm/pipelines/jobs/main.sh +++ b/.ibm/pipelines/jobs/main.sh @@ -2,39 +2,14 @@ set -x -set_namespace() { - # Enable parallel PR testing for main branch by utilizing a pool of namespaces - local namespaces_pool=("pr-1" "pr-2" "pr-3") - local namespace_found=false - # Iterate through namespace pool to find an available set - for ns in "${namespaces_pool[@]}"; do - if ! oc get namespace "showcase-$ns" >/dev/null 2>&1; then - echo "Namespace "showcase-$ns" does not exist, Using NS: showcase-$ns, showcase-rbac-$ns, postgress-external-db-$ns" - export NAME_SPACE="showcase-$ns" - export NAME_SPACE_RBAC="showcase-rbac-$ns" - export NAME_SPACE_POSTGRES_DB="postgress-external-db-$ns" - namespace_found=true - break - fi - done - if ! $namespace_found; then - echo "Error: All namespaces $namespaces_pool already in Use" - exit 1 - fi -} - handle_main() { echo "Configuring namespace: ${NAME_SPACE}" - set_github_app_4_credentials - set_namespace oc_login - - API_SERVER_URL=$(oc whoami --show-server) - ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) - ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + echo "OCP version: $(oc version)" export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') local url="https://${RELEASE_NAME}-backstage-${NAME_SPACE}.${K8S_CLUSTER_ROUTER_BASE}" + cluster_setup initiate_deployments deploy_test_backstage_provider "${NAME_SPACE}" check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" diff --git a/.ibm/pipelines/jobs/ocp-v4-15.sh b/.ibm/pipelines/jobs/ocp-v4-15.sh deleted file mode 100644 index 7bc8d941c1..0000000000 --- a/.ibm/pipelines/jobs/ocp-v4-15.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -handle_ocp_4_15() { - K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_URL) - K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_TOKEN) - - oc_login - - API_SERVER_URL=$(oc whoami --show-server) - ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) - ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) - - export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') - apply_yaml_files "${DIR}" "${NAME_SPACE}" - deploy_test_backstage_provider "${NAME_SPACE}" - local url="https://${release_name}-backstage-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" - - initiate_deployments - check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" - check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" -} diff --git a/.ibm/pipelines/jobs/ocp-v4-16.sh b/.ibm/pipelines/jobs/ocp-v4-16.sh deleted file mode 100644 index c8c8f8ed4f..0000000000 --- a/.ibm/pipelines/jobs/ocp-v4-16.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -handle_ocp_4_16() { - K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_URL) - K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_TOKEN) - - oc_login - - API_SERVER_URL=$(oc whoami --show-server) - ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) - ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) - - export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') - apply_yaml_files "${DIR}" "${NAME_SPACE}" - deploy_test_backstage_provider "${NAME_SPACE}" - local url="https://${release_name}-backstage-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" - - initiate_deployments - check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" - check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" -} diff --git a/.ibm/pipelines/jobs/operator.sh b/.ibm/pipelines/jobs/operator.sh index 58b70c0442..ed0095a32d 100644 --- a/.ibm/pipelines/jobs/operator.sh +++ b/.ibm/pipelines/jobs/operator.sh @@ -3,10 +3,6 @@ handle_operator() { oc_login - API_SERVER_URL=$(oc whoami --show-server) - ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) - ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) - apply_yaml_files "${DIR}" "${NAME_SPACE}" deploy_test_backstage_provider "${NAME_SPACE}" } diff --git a/.ibm/pipelines/jobs/periodic.sh b/.ibm/pipelines/jobs/periodic.sh index 921eb7fb20..30ddc9dcfc 100644 --- a/.ibm/pipelines/jobs/periodic.sh +++ b/.ibm/pipelines/jobs/periodic.sh @@ -9,19 +9,12 @@ handle_nightly() { oc_login - API_SERVER_URL=$(oc whoami --show-server) - ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) - ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) - export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') configure_namespace "${NAME_SPACE}" deploy_test_backstage_provider "${NAME_SPACE}" local url="https://${RELEASE_NAME}-backstage-${NAME_SPACE}.${K8S_CLUSTER_ROUTER_BASE}" - install_pipelines_operator - sleep 20 # wait for Pipeline Operator/Tekton pipelines to be ready - oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline.yaml" - oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline-run.yaml" + cluster_setup initiate_deployments check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" @@ -34,7 +27,7 @@ handle_nightly() { configure_namespace "${NAME_SPACE_RUNTIME}" uninstall_helmchart "${NAME_SPACE_RUNTIME}" "${RELEASE_NAME}" oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE_RUNTIME}" - apply_yaml_files "${DIR}" "${NAME_SPACE_RUNTIME}" + apply_yaml_files "${DIR}" "${NAME_SPACE_RUNTIME}" "${RELEASE_NAME}" helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_RUNTIME}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" check_and_test "${RELEASE_NAME}" "${NAME_SPACE_RUNTIME}" "${url}" } diff --git a/.ibm/pipelines/ocp-cluster-claim-login.sh b/.ibm/pipelines/ocp-cluster-claim-login.sh new file mode 100755 index 0000000000..e42ae1af03 --- /dev/null +++ b/.ibm/pipelines/ocp-cluster-claim-login.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +# Prompt the user for the prow log url +read -p "Enter the prow log url: " input_url + +id=$(echo "$input_url" | awk -F'/' '{print $NF}') +job=$(echo "$input_url" | awk -F'/' '{print $(NF-1)}') + +build_log_url="https://prow.ci.openshift.org/log?container=test&id=${id}&job=${job}" +namespace=$(curl -s $build_log_url | grep "The claimed cluster" | sed -E 's/.*The claimed cluster ([^.]+)\ is ready after.*/\1/') + +# Output the constructed URL +echo "Prow build log URL: $build_log_url" +echo "hosted-mgmt Namespace: $namespace" + +if [[ -z "$namespace" ]]; then + echo "Cluster claim not found. Please provide a valid prow url that uses cluster claim." + exit 1 +elif [[ ! "$namespace" =~ ^rhdh-4-17-us-east-2 ]]; then + echo "Namespace must start with 'rhdh-4-17-us-east-2'." + exit 1 +fi + +# Log in to the cluster +oc login --web https://api.hosted-mgmt.ci.devcluster.openshift.com:6443 + +if ! oc get namespace "$namespace" >/dev/null 2>&1; then + echo "Namespace ${namespace} is expired or deleted, exiting..." + exit 1 +fi + +# Try to retrieve secrets from the namespace +namespace_secrets=$(oc get secrets -n "$namespace" 2>&1) +if echo "$namespace_secrets" | grep -q "Forbidden"; then + echo "Error: You do not have access to the namespace '$namespace'." + echo "check if you are member of 'rhdh-pool-admins' group at: https://rover.redhat.com/groups/search?q=rhdh-pool-admins" + echo "Please reach out to the rhdh-qe team for assistance." + exit 1 +fi + +cluster_secret=$(oc get secrets -n "$namespace" | grep admin-password | awk '{print $1}') +# Retrieve the kubeadmin password from the specified namespace +password=$(oc get secret $cluster_secret -n "$namespace" -o jsonpath='{.data.password}' | base64 -d) + +# Log out from the current session +oc logout + +# Log in to the namespace-specific cluster +oc login https://api."$namespace".rhdh-qe.devcluster.openshift.com:6443 --username kubeadmin --password "$password" --insecure-skip-tls-verify=true +oc project showcase + +# Prompt the user to open the web console +read -p "Do you want to open the OpenShift web console? (y/n): " open_console + +if [[ "$open_console" == "y" || "$open_console" == "Y" ]]; then + + console_url="https://console-openshift-console.apps.${namespace}.rhdh-qe.devcluster.openshift.com/dashboards" + + echo "Opening web console at $console_url..." + echo "Use bellow user and password to login into web console:" + echo "Username: kubeadmin" + echo "Password: $password" + sleep 3 + + # Attempt to open the web console in the default browser + if command -v xdg-open &> /dev/null; then + xdg-open "$console_url" # For Linux systems + elif command -v open &> /dev/null; then + open "$console_url" # For macOS + else + echo "Unable to detect a browser. Please open the following URL manually:" + echo "$console_url" + fi +else + echo "Web console not opened." +fi \ No newline at end of file diff --git a/.ibm/pipelines/openshift-ci-tests.sh b/.ibm/pipelines/openshift-ci-tests.sh index ca3dfcd8e3..9759706a42 100755 --- a/.ibm/pipelines/openshift-ci-tests.sh +++ b/.ibm/pipelines/openshift-ci-tests.sh @@ -12,38 +12,27 @@ cleanup() { echo "Cleaning up before exiting" if [[ "$JOB_NAME" == *aks* ]]; then az_aks_stop "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" - elif [[ "$JOB_NAME" == *pull-*-main-e2e-tests* ]]; then - # Cleanup namespaces after main branch PR e2e tests execution. - delete_namespace "${NAME_SPACE}" - delete_namespace "${NAME_SPACE_POSTGRES_DB}" - delete_namespace "${NAME_SPACE_RBAC}" fi rm -rf ~/tmpbin } trap cleanup EXIT INT ERR -export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_URL) -export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_TOKEN) +SCRIPTS=( + "env_variables.sh" + "utils.sh" + "jobs/aks.sh" + "jobs/gke.sh" + "jobs/main.sh" + "jobs/operator.sh" + "jobs/periodic.sh" +) -source "${DIR}/env_variables.sh" -echo "Loaded env_variables.sh" -source "${DIR}/utils.sh" -echo "Loaded utils.sh" -source "${DIR}/jobs/aks.sh" -echo "Loaded aks.sh" -source "${DIR}/jobs/gke.sh" -echo "Loaded gke.sh" -source "${DIR}/jobs/main.sh" -echo "Loaded main.sh" -source "${DIR}/jobs/ocp-v4-15.sh" -echo "Loaded ocp-v4-15.sh" -source "${DIR}/jobs/ocp-v4-16.sh" -echo "Loaded ocp-v4-16.sh" -source "${DIR}/jobs/operator.sh" -echo "Loaded operator.sh" -source "${DIR}/jobs/periodic.sh" -echo "Loaded periodic.sh" +# Source each script dynamically +for SCRIPT in "${SCRIPTS[@]}"; do + source "${DIR}/${SCRIPT}" + echo "Loaded ${SCRIPT}" +done main() { echo "Log file: ${LOGFILE}" @@ -58,26 +47,18 @@ main() { echo "Calling handle_gke" handle_gke ;; + *operator*) + echo "Calling Operator" + handle_operator + ;; *periodic*) echo "Calling handle_periodic" handle_nightly ;; - *pull-*-main-e2e-tests*) + *pull*) echo "Calling handle_main" handle_main ;; - *ocp-v4-16*) - echo "Calling handle_ocp_v4_16" - handle_ocp_v4_16 - ;; - *ocp-v4-15*) - echo "Calling handle_ocp_v4_15" - handle_ocp_v4_15 - ;; - *operator*) - echo "Calling Operator" - handle_operator - ;; esac echo "K8S_CLUSTER_ROUTER_BASE : $K8S_CLUSTER_ROUTER_BASE" diff --git a/.ibm/pipelines/resources/config_map/app-config-rhdh-rbac.yaml b/.ibm/pipelines/resources/config_map/app-config-rhdh-rbac.yaml index 70e61421b9..b951b8b68b 100644 --- a/.ibm/pipelines/resources/config_map/app-config-rhdh-rbac.yaml +++ b/.ibm/pipelines/resources/config_map/app-config-rhdh-rbac.yaml @@ -25,18 +25,25 @@ integrations: - host: gitlab.com token: temp auth: - # see https://backstage.io/docs/auth/ to learn about auth providers environment: development + session: + secret: superSecretSecret providers: - # Plugin: GitHub - github: - development: - clientId: ${GITHUB_APP_CLIENT_ID} - clientSecret: ${GITHUB_APP_CLIENT_SECRET} + guest: + dangerouslyAllowOutsideDevelopment: true google: development: clientId: ${GOOGLE_CLIENT_ID} clientSecret: ${GOOGLE_CLIENT_SECRET} + oidc: + development: + metadataUrl: ${KEYCLOAK_AUTH_BASE_URL}/auth/realms/${KEYCLOAK_AUTH_REALM} + clientId: ${KEYCLOAK_AUTH_CLIENTID} + clientSecret: ${KEYCLOAK_AUTH_CLIENT_SECRET} + prompt: auto + callbackUrl: ${RHDH_BASE_URL}/api/auth/oidc/handler/frame + +signInPage: oidc proxy: skipInvalidProxies: true # endpoints: {} @@ -79,6 +86,16 @@ catalog: rules: - allow: [User, Group] providers: + keycloakOrg: + default: + baseUrl: ${KEYCLOAK_AUTH_BASE_URL}/auth + loginRealm: ${KEYCLOAK_AUTH_LOGIN_REALM} + realm: ${KEYCLOAK_AUTH_REALM} + clientId: ${KEYCLOAK_AUTH_CLIENTID} + clientSecret: ${KEYCLOAK_AUTH_CLIENT_SECRET} + schedule: + frequency: { minutes: 1 } + timeout: { minutes: 1 } githubOrg: id: production githubUrl: "${GITHUB_URL}" diff --git a/.ibm/pipelines/resources/config_map/app-config-rhdh.yaml b/.ibm/pipelines/resources/config_map/app-config-rhdh.yaml index ad49b0850c..c58cf65ce6 100644 --- a/.ibm/pipelines/resources/config_map/app-config-rhdh.yaml +++ b/.ibm/pipelines/resources/config_map/app-config-rhdh.yaml @@ -92,19 +92,25 @@ integrations: - host: gitlab.com token: ${GITLAB_TOKEN} auth: + # see https://backstage.io/docs/auth/ to learn about auth providers environment: development + session: + secret: superSecretSecret providers: guest: dangerouslyAllowOutsideDevelopment: true - # Plugin: GitHub - github: - development: - clientId: ${GITHUB_APP_CLIENT_ID} - clientSecret: ${GITHUB_APP_CLIENT_SECRET} google: development: clientId: ${GOOGLE_CLIENT_ID} clientSecret: ${GOOGLE_CLIENT_SECRET} + oidc: + development: + metadataUrl: ${KEYCLOAK_AUTH_BASE_URL}/auth/realms/${KEYCLOAK_AUTH_REALM} + clientId: ${KEYCLOAK_AUTH_CLIENTID} + clientSecret: ${KEYCLOAK_AUTH_CLIENT_SECRET} + prompt: auto + callbackUrl: ${RHDH_BASE_URL}/api/auth/oidc/handler/frame +signInPage: oidc techRadar: url: "http://${DH_TARGET_URL}/tech-radar" proxy: @@ -149,6 +155,16 @@ catalog: rules: - allow: [User, Group] providers: + keycloakOrg: + default: + baseUrl: ${KEYCLOAK_AUTH_BASE_URL}/auth + loginRealm: ${KEYCLOAK_AUTH_LOGIN_REALM} + realm: ${KEYCLOAK_AUTH_REALM} + clientId: ${KEYCLOAK_AUTH_CLIENTID} + clientSecret: ${KEYCLOAK_AUTH_CLIENT_SECRET} + schedule: + frequency: { minutes: 1 } + timeout: { minutes: 1 } github: providerId: organization: '${GITHUB_ORG}' diff --git a/.ibm/pipelines/resources/config_map/rbac-policy.csv b/.ibm/pipelines/resources/config_map/rbac-policy.csv index 6a5a94d832..9a395aa408 100644 --- a/.ibm/pipelines/resources/config_map/rbac-policy.csv +++ b/.ibm/pipelines/resources/config_map/rbac-policy.csv @@ -1,7 +1,7 @@ p, role:default/guests, catalog.entity.create, create, allow p, role:default/team_a, catalog-entity, read, allow g, user:xyz/user, role:xyz/team_a -g, group:janus-qe/rhdh-qe-2-team, role:default/test2-role +g, group:default/rhdh-qe-2-team, role:default/test2-role p, role:xyz/team_a, catalog-entity, read, allow p, role:xyz/team_a, catalog.entity.create, create, allow @@ -17,4 +17,4 @@ p, role:default/qe_rbac_admin, catalog.location.read, read, allow p, role:default/bulk_import, bulk.import, use, allow p, role:default/bulk_import, catalog.location.create, create, allow p, role:default/bulk_import, catalog.entity.create, create, allow -g, group:janus-qe/rhdh-qe-2-team, role:default/bulk_import \ No newline at end of file +g, group:default/rhdh-qe-2-team, role:default/bulk_import \ No newline at end of file diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index 65f0632800..12ee5d0f2e 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -1,7 +1,5 @@ #!/bin/sh -set -x - retrieve_pod_logs() { local pod_name=$1; local container=$2; local namespace=$3 echo " Retrieving logs for container: $container" @@ -329,7 +327,7 @@ delete_namespace() { echo "Namespace ${project} exists. Attempting to delete..." # Remove blocking finalizers - remove_finalizers_from_resources "$project" + # remove_finalizers_from_resources "$project" # Attempt to delete the namespace oc delete namespace "$project" --grace-period=0 --force || true @@ -377,22 +375,10 @@ set_github_app_3_credentials() { echo "GitHub App 3 credentials set for current job." } -set_github_app_4_credentials() { - GITHUB_APP_APP_ID=$(cat /tmp/secrets/GITHUB_APP_4_APP_ID) - GITHUB_APP_CLIENT_ID=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_ID) - GITHUB_APP_PRIVATE_KEY=$(cat /tmp/secrets/GITHUB_APP_4_PRIVATE_KEY) - GITHUB_APP_CLIENT_SECRET=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_SECRET) - - export GITHUB_APP_APP_ID - export GITHUB_APP_CLIENT_ID - export GITHUB_APP_PRIVATE_KEY - export GITHUB_APP_CLIENT_SECRET - echo "GitHub App 4 credentials set for current job." -} - apply_yaml_files() { local dir=$1 local project=$2 + local release_name=$3 echo "Applying YAML files to namespace ${project}" oc config set-context --current --namespace="${project}" @@ -410,8 +396,11 @@ apply_yaml_files() { done DH_TARGET_URL=$(echo -n "test-backstage-customization-provider-${project}.${K8S_CLUSTER_ROUTER_BASE}" | base64 -w 0) - - for key in GITHUB_APP_APP_ID GITHUB_APP_CLIENT_ID GITHUB_APP_PRIVATE_KEY GITHUB_APP_CLIENT_SECRET GITHUB_APP_JANUS_TEST_APP_ID GITHUB_APP_JANUS_TEST_CLIENT_ID GITHUB_APP_JANUS_TEST_CLIENT_SECRET GITHUB_APP_JANUS_TEST_PRIVATE_KEY GITHUB_APP_WEBHOOK_URL GITHUB_APP_WEBHOOK_SECRET KEYCLOAK_CLIENT_SECRET ACR_SECRET GOOGLE_CLIENT_ID GOOGLE_CLIENT_SECRET K8S_CLUSTER_TOKEN_ENCODED OCM_CLUSTER_URL GITLAB_TOKEN DH_TARGET_URL; do + local RHDH_BASE_URL=$(echo -n "https://${release_name}-backstage-${project}.${K8S_CLUSTER_ROUTER_BASE}" | base64 | tr -d '\n') + if [[ "$JOB_NAME" == *aks* || "$JOB_NAME" == *gke* ]]; then + RHDH_BASE_URL=$(echo -n "https://${K8S_CLUSTER_ROUTER_BASE}" | base64 | tr -d '\n') + fi + for key in GITHUB_APP_APP_ID GITHUB_APP_CLIENT_ID GITHUB_APP_PRIVATE_KEY GITHUB_APP_CLIENT_SECRET GITHUB_APP_JANUS_TEST_APP_ID GITHUB_APP_JANUS_TEST_CLIENT_ID GITHUB_APP_JANUS_TEST_CLIENT_SECRET GITHUB_APP_JANUS_TEST_PRIVATE_KEY GITHUB_APP_WEBHOOK_URL GITHUB_APP_WEBHOOK_SECRET KEYCLOAK_CLIENT_SECRET ACR_SECRET GOOGLE_CLIENT_ID GOOGLE_CLIENT_SECRET K8S_CLUSTER_TOKEN_ENCODED OCM_CLUSTER_URL GITLAB_TOKEN KEYCLOAK_AUTH_BASE_URL KEYCLOAK_AUTH_CLIENTID KEYCLOAK_AUTH_CLIENT_SECRET KEYCLOAK_AUTH_LOGIN_REALM KEYCLOAK_AUTH_REALM RHDH_BASE_URL; do sed -i "s|${key}:.*|${key}: ${!key}|g" "$dir/auth/secrets-rhdh-secrets.yaml" done @@ -424,15 +413,14 @@ apply_yaml_files() { oc apply -f "$dir/resources/cluster_role/cluster-role-ocm.yaml" --namespace="${project}" oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-ocm.yaml" --namespace="${project}" - escaped_url=$(printf '%s\n' "${ENCODED_API_SERVER_URL}" | sed 's/[\/&]/\\&/g') - sed -i "s/K8S_CLUSTER_API_SERVER_URL:.*/K8S_CLUSTER_API_SERVER_URL: ${escaped_url}/g" "$dir/auth/secrets-rhdh-secrets.yaml" \ - && echo "Updated K8S_CLUSTER_API_SERVER_URL in secrets file." \ - || echo "Failed to update K8S_CLUSTER_API_SERVER_URL." >&2 + sed -i "s/K8S_CLUSTER_API_SERVER_URL:.*/K8S_CLUSTER_API_SERVER_URL: ${K8S_CLUSTER_API_SERVER_URL}/g" "$dir/auth/secrets-rhdh-secrets.yaml" sed -i "s/K8S_CLUSTER_NAME:.*/K8S_CLUSTER_NAME: ${ENCODED_CLUSTER_NAME}/g" "$dir/auth/secrets-rhdh-secrets.yaml" + set +x token=$(oc get secret "${secret_name}" -n "${project}" -o=jsonpath='{.data.token}') sed -i "s/OCM_CLUSTER_TOKEN: .*/OCM_CLUSTER_TOKEN: ${token}/" "$dir/auth/secrets-rhdh-secrets.yaml" + set -x # Select the configuration file based on the namespace or job config_file=$(select_config_map_file) @@ -449,6 +437,10 @@ apply_yaml_files() { oc apply -f "$dir/auth/secrets-rhdh-secrets.yaml" --namespace="${project}" + # Create Pipeline run for tekton test case. + oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline.yaml" + oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline-run.yaml" + } deploy_test_backstage_provider() { @@ -572,6 +564,7 @@ check_backstage_running() { return 0 else echo "Attempt ${i} of ${max_attempts}: Backstage not yet available (HTTP Status: ${http_status})" + oc get pods -n "${namespace}" sleep "${wait_seconds}" fi done @@ -592,29 +585,58 @@ install_tekton_pipelines() { fi } +# installs the advanced-cluster-management Operator +install_acm_operator(){ + oc apply -f "${DIR}/cluster/operators/acm/operator-group.yaml" + oc apply -f "${DIR}/cluster/operators/acm/subscription-acm.yaml" + wait_for_deployment "open-cluster-management" "multiclusterhub-operator" + oc apply -f "${DIR}/cluster/operators/acm/multiclusterhub.yaml" + # wait until multiclusterhub is Running. + timeout 600 bash -c 'while true; do + CURRENT_PHASE=$(oc get multiclusterhub multiclusterhub -n open-cluster-management -o jsonpath="{.status.phase}") + echo "MulticlusterHub Current Status: $CURRENT_PHASE" + [[ "$CURRENT_PHASE" == "Running" ]] && echo "MulticlusterHub is now in Running phase." && break + sleep 10 + done' || echo "Timed out after 10 minutes" + +} + +# Installs the Red Hat OpenShift Pipelines operator if not already installed install_pipelines_operator() { - local dir=$1 DISPLAY_NAME="Red Hat OpenShift Pipelines" - + # Check if operator is already installed if oc get csv -n "openshift-operators" | grep -q "${DISPLAY_NAME}"; then echo "Red Hat OpenShift Pipelines operator is already installed." else echo "Red Hat OpenShift Pipelines operator is not installed. Installing..." - oc apply -f "${dir}/resources/pipeline-run/pipelines-operator.yaml" + # Install the operator and wait for deployment + install_subscription openshift-pipelines-operator openshift-operators openshift-pipelines-operator-rh latest redhat-operators + wait_for_deployment "openshift-operators" "pipelines" + timeout 300 bash -c ' + while ! oc get svc tekton-pipelines-webhook -n openshift-pipelines &> /dev/null; do + echo "Waiting for tekton-pipelines-webhook service to be created..." + sleep 5 + done + echo "Service tekton-pipelines-webhook is created." + ' || echo "Error: Timed out waiting for tekton-pipelines-webhook service creation." fi } -initiate_deployments() { - +cluster_setup() { + install_pipelines_operator + install_acm_operator install_crunchy_postgres_operator add_helm_repos +} + +initiate_deployments() { configure_namespace ${NAME_SPACE} # Deploy redis cache db. oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE}" cd "${DIR}" - apply_yaml_files "${DIR}" "${NAME_SPACE}" + apply_yaml_files "${DIR}" "${NAME_SPACE}" "${RELEASE_NAME}" "${RELEASE_NAME}" echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE}" helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" @@ -622,8 +644,8 @@ initiate_deployments() { configure_namespace "${NAME_SPACE_RBAC}" configure_external_postgres_db "${NAME_SPACE_RBAC}" - uninstall_helmchart "${NAME_SPACE_RBAC}" "${RELEASE_NAME_RBAC}" - apply_yaml_files "${DIR}" "${NAME_SPACE_RBAC}" + # Initiate rbac instace deployment. + apply_yaml_files "${DIR}" "${NAME_SPACE_RBAC}" "${RELEASE_NAME_RBAC}" echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${RELEASE_NAME_RBAC}" helm upgrade -i "${RELEASE_NAME_RBAC}" -n "${NAME_SPACE_RBAC}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_RBAC_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" } @@ -685,12 +707,8 @@ force_delete_namespace() { } oc_login() { - export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_URL) - export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_TOKEN) - oc login --token="${K8S_CLUSTER_TOKEN}" --server="${K8S_CLUSTER_URL}" echo "OCP version: $(oc version)" export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') } - diff --git a/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts b/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts index d5bfa9f0af..b8ce6cdf72 100644 --- a/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts +++ b/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts @@ -34,13 +34,9 @@ test.describe.serial("Link Scaffolded Templates to Catalog Items", () => { uiHelper = new UIhelper(page); catalogImport = new CatalogImport(page); - await common.loginAsGithubUser(); + await common.loginAsKeycloakUser(); }); - test.beforeEach( - async () => await new Common(page).checkAndClickOnGHloginPopup(), - ); - test("Register an Template", async () => { await uiHelper.openSidebar("Catalog"); await uiHelper.clickButton("Create"); diff --git a/e2e-tests/playwright/e2e/github-discovery.spec.ts b/e2e-tests/playwright/e2e/github-discovery.spec.ts index a97b553fe1..3811ddb290 100644 --- a/e2e-tests/playwright/e2e/github-discovery.spec.ts +++ b/e2e-tests/playwright/e2e/github-discovery.spec.ts @@ -22,7 +22,8 @@ const test = base.extend({ testOrganization: JANUS_QE_ORG, }); -test.describe("Github Discovery Catalog", () => { +//TODO: skipping due to RHIDP-4992 +test.describe.skip("Github Discovery Catalog", () => { test(`Discover Organization's Catalog`, async ({ catalogPage, githubApi, diff --git a/e2e-tests/playwright/e2e/github-happy-path.spec.ts b/e2e-tests/playwright/e2e/github-happy-path.spec.ts index 943ba98c3d..808d540b00 100644 --- a/e2e-tests/playwright/e2e/github-happy-path.spec.ts +++ b/e2e-tests/playwright/e2e/github-happy-path.spec.ts @@ -10,7 +10,9 @@ import { TEMPLATES } from "../support/testData/templates"; let page: Page; -test.describe.serial("GitHub Happy path", () => { +// TODO: replace skip with serial +test.describe.skip("GitHub Happy path", () => { + //TODO: skipping due to RHIDP-4992 let common: Common; let uiHelper: UIhelper; let catalogImport: CatalogImport; diff --git a/e2e-tests/playwright/e2e/github-integration-org-fetch.spec.ts b/e2e-tests/playwright/e2e/github-integration-org-fetch.spec.ts index 691fed46c6..0f71e1631b 100644 --- a/e2e-tests/playwright/e2e/github-integration-org-fetch.spec.ts +++ b/e2e-tests/playwright/e2e/github-integration-org-fetch.spec.ts @@ -11,7 +11,7 @@ test.describe.serial("GitHub integration with Org data fetching", () => { page = (await setupBrowser(browser, testInfo)).page; uiHelper = new UIhelper(page); common = new Common(page); - await common.loginAsGithubUser(); + await common.loginAsKeycloakUser(); }); test("Verify that fetching the groups of the first org works", async () => { diff --git a/e2e-tests/playwright/e2e/plugins/analytics/analytics-disabled-rbac.spec.ts b/e2e-tests/playwright/e2e/plugins/analytics/analytics-disabled-rbac.spec.ts index 14bb138ec6..f3e447badf 100644 --- a/e2e-tests/playwright/e2e/plugins/analytics/analytics-disabled-rbac.spec.ts +++ b/e2e-tests/playwright/e2e/plugins/analytics/analytics-disabled-rbac.spec.ts @@ -11,7 +11,7 @@ test.describe.skip('Check RBAC "analytics-provider-segment" plugin', () => { test.beforeEach(async ({ page }) => { uiHelper = new UIhelper(page); common = new Common(page); - await common.loginAsGithubUser(); + await common.loginAsKeycloakUser(); await uiHelper.openSidebarButton("Administration"); await uiHelper.openSidebar("Plugins"); await uiHelper.verifyHeading("Plugins"); diff --git a/e2e-tests/playwright/e2e/plugins/bulk-import.spec.ts b/e2e-tests/playwright/e2e/plugins/bulk-import.spec.ts index 695b328ba6..c5866a1970 100644 --- a/e2e-tests/playwright/e2e/plugins/bulk-import.spec.ts +++ b/e2e-tests/playwright/e2e/plugins/bulk-import.spec.ts @@ -41,13 +41,12 @@ test.describe.serial("Bulk Import plugin", () => { newRepoDetails.owner, newRepoDetails.repoName, ); - await common.loginAsGithubUser(process.env.GH_USER2_ID); + await common.loginAsKeycloakUser( + process.env.GH_USER2_ID, + process.env.GH_USER2_PASS, + ); }); - test.beforeEach( - async () => await new Common(page).checkAndClickOnGHloginPopup(), - ); - // Select two repos: one with an existing catalog.yaml file and another without it test("Add a Repository from the Repository Tab and Confirm its Preview", async () => { await uiHelper.openSidebar("Bulk import"); @@ -252,13 +251,12 @@ test.describe common = new Common(page); bulkimport = new BulkImport(page); catalogImport = new CatalogImport(page); - await common.loginAsGithubUser(process.env.GH_USER2_ID); + await common.loginAsKeycloakUser( + process.env.GH_USER2_ID, + process.env.GH_USER2_PASS, + ); }); - test.beforeEach( - async () => await new Common(page).checkAndClickOnGHloginPopup(), - ); - test("Verify existing repo from app-config is displayed in bulk import Added repositories", async () => { await uiHelper.openSidebar("Bulk import"); await common.waitForLoad(); @@ -302,10 +300,6 @@ test.describe await common.loginAsGuest(); }); - test.beforeEach( - async () => await new Common(page).checkAndClickOnGHloginPopup(), - ); - test("Bulk Import - Verify users without permission cannot access", async () => { await uiHelper.openSidebar("Bulk import"); await uiHelper.verifyText("Permission required"); diff --git a/e2e-tests/playwright/e2e/plugins/ocm.spec.ts b/e2e-tests/playwright/e2e/plugins/ocm.spec.ts index e648de64b4..abff5a2ec4 100644 --- a/e2e-tests/playwright/e2e/plugins/ocm.spec.ts +++ b/e2e-tests/playwright/e2e/plugins/ocm.spec.ts @@ -10,10 +10,10 @@ import { Clusters } from "../../support/pages/clusters"; const clusterDetails = { clusterName: "testCluster", status: "Ready", - platform: "IBM", + platform: /IBM|AWS|GCP/, cpuCores: /CPU cores\d+/, memorySize: /Memory size\d.*(Gi|Mi)/, - ocVersion: /^\d+\.\d+\.\d+$/, + ocVersion: /^\d+\.\d+\.\d+(Upgrade available)?$/, }; let page: Page; test.describe.serial("Test OCM plugin", () => { @@ -44,7 +44,7 @@ test.describe.serial("Test OCM plugin", () => { await uiHelper.verifyHeading("Your Managed Clusters"); await uiHelper.verifyRowInTableByUniqueText(clusterDetails.clusterName, [ - clusterDetails.status, + new RegExp(clusterDetails.status), clusterDetails.platform, ]); await uiHelper.verifyRowInTableByUniqueText(clusterDetails.clusterName, [ diff --git a/e2e-tests/playwright/e2e/plugins/rbac/rbac.spec.ts b/e2e-tests/playwright/e2e/plugins/rbac/rbac.spec.ts index 58a84cfbd5..f45202f8aa 100644 --- a/e2e-tests/playwright/e2e/plugins/rbac/rbac.spec.ts +++ b/e2e-tests/playwright/e2e/plugins/rbac/rbac.spec.ts @@ -83,60 +83,66 @@ test.describe.skip( }, ); -test.describe - .serial("Test RBAC plugin: Aliases used in conditional access policies", () => { - let common: Common; - let uiHelper: UIhelper; - let page: Page; - - test.beforeAll(async ({ browser }, testInfo) => { - page = (await setupBrowser(browser, testInfo)).page; +//TODO: skipping due to RHIDP-4993 +test.describe.skip( + "Test RBAC plugin: Aliases used in conditional access policies", + () => { + let common: Common; + let uiHelper: UIhelper; + let page: Page; - uiHelper = new UIhelper(page); - common = new Common(page); - await common.loginAsGithubUser(process.env.GH_USER2_ID); - }); + test.beforeAll(async ({ browser }, testInfo) => { + page = (await setupBrowser(browser, testInfo)).page; - test.beforeEach( - async () => await new Common(page).checkAndClickOnGHloginPopup(), - ); + uiHelper = new UIhelper(page); + common = new Common(page); + await common.loginAsGithubUser(process.env.GH_USER2_ID); + }); - test("Check if aliases used in conditions: the user is allowed to unregister only components they own, not those owned by the group.", async () => { - await uiHelper.openSidebar("Catalog"); - await uiHelper.selectMuiBox("Kind", "Component"); + test.beforeEach( + async () => await new Common(page).checkAndClickOnGHloginPopup(), + ); - await uiHelper.searchInputPlaceholder("test-rhdh-qe-2"); - await page - .getByRole("link", { name: "test-rhdh-qe-2", exact: true }) - .click(); + test("Check if aliases used in conditions: the user is allowed to unregister only components they own, not those owned by the group.", async () => { + await uiHelper.openSidebar("Catalog"); + await uiHelper.selectMuiBox("Kind", "Component"); - await expect(page.locator("header")).toContainText("user:rhdh-qe-2"); - await page.getByTestId("menu-button").click(); - const unregisterUserOwned = page.getByText("Unregister entity"); - await expect(unregisterUserOwned).toBeEnabled(); + await uiHelper.searchInputPlaceholder("test-rhdh-qe-2"); + await page + .getByRole("link", { name: "test-rhdh-qe-2", exact: true }) + .click(); - await page.getByText("Unregister entity").click(); - await expect(page.getByRole("heading")).toContainText( - "Are you sure you want to unregister this entity?", - ); - await page.getByRole("button", { name: "Cancel" }).click(); + await expect(page.locator("header")).toContainText("user:rhdh-qe-2"); + await page.getByTestId("menu-button").click(); + const unregisterUserOwned = page.getByText("Unregister entity"); + await expect(unregisterUserOwned).toBeEnabled(); - await uiHelper.openSidebar("Catalog"); - await page.getByRole("link", { name: "test-rhdh-qe-2-team-owned" }).click(); - await expect(page.locator("header")).toContainText( - "janus-qe/rhdh-qe-2-team", - ); - await page.getByTestId("menu-button").click(); - const unregisterGroupOwned = page.getByText("Unregister entity"); - await expect(unregisterGroupOwned).toBeDisabled(); - }); + await page.getByText("Unregister entity").click(); + await expect(page.getByRole("heading")).toContainText( + "Are you sure you want to unregister this entity?", + ); + await page.getByRole("button", { name: "Cancel" }).click(); + + await uiHelper.openSidebar("Catalog"); + await page + .getByRole("link", { name: "test-rhdh-qe-2-team-owned" }) + .click(); + await expect(page.locator("header")).toContainText( + "janus-qe/rhdh-qe-2-team", + ); + await page.getByTestId("menu-button").click(); + const unregisterGroupOwned = page.getByText("Unregister entity"); + await expect(unregisterGroupOwned).toBeDisabled(); + }); - test.afterAll(async () => { - await page.close(); - }); -}); + test.afterAll(async () => { + await page.close(); + }); + }, +); -test.describe.serial("Test RBAC plugin as an admin user", () => { +//TODO: skipping due to RHIDP-4993 +test.describe.skip("Test RBAC plugin as an admin user", () => { let common: Common; let uiHelper: UIhelper; let page: Page; diff --git a/e2e-tests/playwright/e2e/verify-tls-config-with-external-postgres-db.spec.ts b/e2e-tests/playwright/e2e/verify-tls-config-with-external-postgres-db.spec.ts index 3ecb9061cc..6a1b7ba9ce 100644 --- a/e2e-tests/playwright/e2e/verify-tls-config-with-external-postgres-db.spec.ts +++ b/e2e-tests/playwright/e2e/verify-tls-config-with-external-postgres-db.spec.ts @@ -8,8 +8,8 @@ test.describe("Verify TLS configuration with external Postgres DB", () => { }) => { const uiHelper = new UIhelper(page); const common = new Common(page); - await common.loginAsGithubUser(); - await uiHelper.openSidebar("Catalog"); + await common.loginAsKeycloakUser(); + await page.goto("/catalog"); await uiHelper.selectMuiBox("Kind", "Component"); await uiHelper.clickByDataTestId("user-picker-all"); await uiHelper.verifyRowsInTable(["Backstage Showcase"]); diff --git a/e2e-tests/playwright/utils/common.ts b/e2e-tests/playwright/utils/common.ts index 181e10bb35..01c71cee82 100644 --- a/e2e-tests/playwright/utils/common.ts +++ b/e2e-tests/playwright/utils/common.ts @@ -84,6 +84,29 @@ export class Common { }); } + async logintoKeycloak(userid: string, password: string) { + await new Promise((resolve) => { + this.page.once("popup", async (popup) => { + await popup.waitForLoadState(); + await popup.locator("#username").fill(userid); + await popup.locator("#password").fill(password); + await popup.locator("#kc-login").click(); + resolve(); + }); + }); + } + + async loginAsKeycloakUser( + userid: string = process.env.GH_USER_ID, + password: string = process.env.GH_USER_PASS, + ) { + await this.page.goto("/"); + await this.waitForLoad(240000); + await this.uiHelper.clickButton("Sign In"); + await this.logintoKeycloak(userid, password); + await this.uiHelper.waitForSideBarVisible(); + } + async loginAsGithubUser(userid: string = process.env.GH_USER_ID) { const sessionFileName = `authState_${userid}.json`;