diff --git a/.github/workflows/build_awshelper.yaml b/.github/workflows/build_awshelper.yaml new file mode 100644 index 0000000000..36b5745dbd --- /dev/null +++ b/.github/workflows/build_awshelper.yaml @@ -0,0 +1,19 @@ +name: Build awshelper image + +# Always build this image because it contains all the cloud-automation files. +# Some jobs depend on arbitrary files and we need to test them with updated awshelper images. +on: push + +jobs: + awshelper: + name: awshelper + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile" + OVERRIDE_REPO_NAME: "awshelper" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + diff --git a/.github/workflows/build_python3.10.yaml b/.github/workflows/build_python3.10.yaml new file mode 100644 index 0000000000..80d2d76232 --- /dev/null +++ b/.github/workflows/build_python3.10.yaml @@ -0,0 +1,23 @@ +name: Build Python 3.10 image + +on: + push: + paths: + - .github/workflows/build_python3.10.yaml + - Docker/python-nginx/python3.10-buster/** + +jobs: + python_3-10: + name: Python 3.10 + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.10-buster" + OVERRIDE_REPO_NAME: "python" + OVERRIDE_TAG_NAME: "python3.10-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + diff --git a/.github/workflows/build_python3.9.yaml b/.github/workflows/build_python3.9.yaml new file mode 100644 index 0000000000..540e0d4eca --- /dev/null +++ b/.github/workflows/build_python3.9.yaml @@ -0,0 +1,23 @@ +name: Build Python 3.9 image + +on: + push: + paths: + - .github/workflows/build_python3.9.yaml + - Docker/python-nginx/python3.9-buster/** + +jobs: + python_3-9: + name: Python 3.9 + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.9-buster" + OVERRIDE_REPO_NAME: "python" + OVERRIDE_TAG_NAME: "python3.9-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml deleted file mode 100644 index d5bfea351d..0000000000 --- a/.github/workflows/image_build_push.yaml +++ /dev/null @@ -1,42 +0,0 @@ -name: Build Python Base Images - -on: push - -jobs: - python_3-9: - name: Python 3.9 - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.9-buster" - OVERRIDE_REPO_NAME: "python" - OVERRIDE_TAG_NAME: "python3.9-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - python_3-10: - name: Python 3.10 - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.10-buster" - OVERRIDE_REPO_NAME: "python" - OVERRIDE_TAG_NAME: "python3.10-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - awshelper: - name: AwsHelper - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile" - OVERRIDE_REPO_NAME: "awshelper" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2e3ce795b6..82034495d3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: git@github.com:Yelp/detect-secrets - rev: v0.13.1 + rev: v1.4.0 hooks: - id: detect-secrets args: ['--baseline', '.secrets.baseline'] diff --git a/.secrets.baseline b/.secrets.baseline index 0a8fe9cc99..0c4eba0a80 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -1,19 +1,18 @@ { - "exclude": { - "files": "^.secrets.baseline$", - "lines": null - }, - "generated_at": "2023-10-26T21:32:44Z", + "version": "1.4.0", "plugins_used": [ + { + "name": "ArtifactoryDetector" + }, { "name": "AWSKeyDetector" }, { - "name": "ArtifactoryDetector" + "name": "AzureStorageKeyDetector" }, { - "base64_limit": 4.5, - "name": "Base64HighEntropyString" + "name": "Base64HighEntropyString", + "limit": 4.5 }, { "name": "BasicAuthDetector" @@ -22,8 +21,14 @@ "name": "CloudantDetector" }, { - "hex_limit": 3, - "name": "HexHighEntropyString" + "name": "DiscordBotTokenDetector" + }, + { + "name": "GitHubTokenDetector" + }, + { + "name": "HexHighEntropyString", + "limit": 3.0 }, { "name": "IbmCloudIamDetector" @@ -35,21 +40,30 @@ "name": "JwtTokenDetector" }, { - "keyword_exclude": null, - "name": "KeywordDetector" + "name": "KeywordDetector", + "keyword_exclude": "" }, { "name": "MailchimpDetector" }, + { + "name": "NpmDetector" + }, { "name": "PrivateKeyDetector" }, + { + "name": "SendGridDetector" + }, { "name": "SlackDetector" }, { "name": "SoftlayerDetector" }, + { + "name": "SquareOAuthDetector" + }, { "name": "StripeDetector" }, @@ -57,1807 +71,3671 @@ "name": "TwilioKeyDetector" } ], + "filters_used": [ + { + "path": "detect_secrets.filters.allowlist.is_line_allowlisted" + }, + { + "path": "detect_secrets.filters.common.is_baseline_file", + "filename": ".secrets.baseline" + }, + { + "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies", + "min_level": 2 + }, + { + "path": "detect_secrets.filters.heuristic.is_indirect_reference" + }, + { + "path": "detect_secrets.filters.heuristic.is_likely_id_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_lock_file" + }, + { + "path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_potential_uuid" + }, + { + "path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign" + }, + { + "path": "detect_secrets.filters.heuristic.is_sequential_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_swagger_file" + }, + { + "path": "detect_secrets.filters.heuristic.is_templated_secret" + } + ], "results": { "Chef/repo/data_bags/README.md": [ { - "hashed_secret": "8a9250639e092d90f164792e35073a9395bff366", - "is_secret": false, - "is_verified": false, - "line_number": 45, - "type": "Secret Keyword" - }, - { + "type": "Secret Keyword", + "filename": "Chef/repo/data_bags/README.md", "hashed_secret": "6367c48dd193d56ea7b0baad25b19455e529f5ee", - "is_secret": false, "is_verified": false, - "line_number": 51, - "type": "Secret Keyword" + "line_number": 38 } ], - "Docker/jenkins/Jenkins-CI-Worker/Dockerfile": [ + "Docker/sidecar/service.key": [ { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, + "type": "Private Key", + "filename": "Docker/sidecar/service.key", + "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", "is_verified": false, - "line_number": 121, - "type": "Secret Keyword" + "line_number": 1 } ], - "Docker/jenkins/Jenkins-Worker/Dockerfile": [ + "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, + "type": "Secret Keyword", + "filename": "Jenkins/Stacks/Jenkins/jenkins.env.sample", + "hashed_secret": "f41a52528dd2d592d2c05de5f388101c2948aa98", "is_verified": false, - "line_number": 143, - "type": "Secret Keyword" + "line_number": 5 } ], - "Docker/jenkins/Jenkins/Dockerfile": [ + "Jenkinsfile": [ { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, + "type": "Secret Keyword", + "filename": "Jenkinsfile", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", "is_verified": false, - "line_number": 107, - "type": "Secret Keyword" - } - ], - "Docker/jenkins/Jenkins2/Dockerfile": [ + "line_number": 144 + }, { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, + "type": "Secret Keyword", + "filename": "Jenkinsfile", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 108, - "type": "Secret Keyword" + "line_number": 147 } ], - "Docker/sidecar/service.key": [ + "ansible/roles/slurm/README.md": [ { - "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "ansible/roles/slurm/README.md", + "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", "is_verified": false, - "line_number": 1, - "type": "Private Key" + "line_number": 86 } ], - "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ + "apis_configs/fence_settings.py": [ { - "hashed_secret": "eecee33686ac5861c2a7edc8b46bd0e5432bfddd", - "is_secret": false, + "type": "Basic Auth Credentials", + "filename": "apis_configs/fence_settings.py", + "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_verified": false, - "line_number": 5, - "type": "Secret Keyword" + "line_number": 80 } ], - "ansible/roles/awslogs/defaults/main.yaml": [ + "apis_configs/peregrine_settings.py": [ { - "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", - "is_secret": false, + "type": "Basic Auth Credentials", + "filename": "apis_configs/peregrine_settings.py", + "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_verified": false, - "line_number": 30, - "type": "Basic Auth Credentials" + "line_number": 46 } ], - "ansible/roles/slurm/README.md": [ - { - "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", - "is_secret": false, - "is_verified": false, - "line_number": 86, - "type": "Base64 High Entropy String" - }, + "apis_configs/sheepdog_settings.py": [ { - "hashed_secret": "579649582303921502d9e6d3f8755f13fdd2b476", - "is_secret": false, + "type": "Basic Auth Credentials", + "filename": "apis_configs/sheepdog_settings.py", + "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_verified": false, - "line_number": 86, - "type": "Secret Keyword" + "line_number": 46 } ], - "apis_configs/config_helper.py": [ + "aws-inspec/kubernetes/chef_inspec-cron.yaml": [ { - "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "aws-inspec/kubernetes/chef_inspec-cron.yaml", + "hashed_secret": "a3ba27250861948a554629a0e21168821ddfa9f1", "is_verified": false, - "line_number": 66, - "type": "Basic Auth Credentials" + "line_number": 35 } ], - "apis_configs/fence_credentials.json": [ + "doc/api.md": [ { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Hex High Entropy String", + "filename": "doc/api.md", + "hashed_secret": "625de83a7517422051911680cc803921ff99db90", "is_verified": false, - "line_number": 23, - "type": "Secret Keyword" + "line_number": 47 } ], - "apis_configs/fence_settings.py": [ + "doc/gen3OnK8s.md": [ { - "hashed_secret": "3ef0fb8a603abdc0b6caac44a23fdc6792f77ddf", - "is_secret": false, + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "55c100ba37d2df35ec1e5f5d6302f060387df6cc", "is_verified": false, - "line_number": 6, - "type": "Basic Auth Credentials" + "line_number": 113 }, { - "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "262d8e9b8ac5f06e7612dfb608f7267f88679801", "is_verified": false, - "line_number": 58, - "type": "Secret Keyword" + "line_number": 120 }, { - "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "1c17e556736c4d23933f99d199e7c2c572895fd2", + "is_verified": false, + "line_number": 143 + }, + { + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "76a4acaf31b815aa2c41cc2a2176b11fa9edf00a", + "is_verified": false, + "line_number": 145 + }, + { + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "9d678cbce5a343920f754d5836f03346ee01cde5", "is_verified": false, - "line_number": 80, - "type": "Basic Auth Credentials" + "line_number": 154 } ], - "apis_configs/indexd_settings.py": [ + "files/scripts/psql-fips-fix.sh": [ { - "hashed_secret": "0a0d18c85e096611b5685b62bc60ec534d19bacc", - "is_secret": false, + "type": "Secret Keyword", + "filename": "files/scripts/psql-fips-fix.sh", + "hashed_secret": "2f1aa1e2a58704b452a5dd60ab1bd2b761bf296a", "is_verified": false, - "line_number": 59, - "type": "Basic Auth Credentials" + "line_number": 9 } ], - "apis_configs/peregrine_settings.py": [ + "gen3/bin/bucket-manifest.sh": [ { - "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/bin/bucket-manifest.sh", + "hashed_secret": "2be88ca4242c76e8253ac62474851065032d6833", "is_verified": false, - "line_number": 46, - "type": "Basic Auth Credentials" + "line_number": 58 } ], - "apis_configs/sheepdog_settings.py": [ + "gen3/bin/bucket-replicate.sh": [ { - "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/bin/bucket-replicate.sh", + "hashed_secret": "2be88ca4242c76e8253ac62474851065032d6833", "is_verified": false, - "line_number": 46, - "type": "Basic Auth Credentials" + "line_number": 39 } ], - "doc/Gen3-data-upload.md": [ + "gen3/bin/secrets.sh": [ { - "hashed_secret": "b8bd20d4a2701dc3aba0efbbf325f1359392d93e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/bin/secrets.sh", + "hashed_secret": "fb6220478aaba649aac37271a1d7c6317abc03a6", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" + "line_number": 135 } ], - "doc/api.md": [ + "gen3/lib/aws.sh": [ { - "hashed_secret": "625de83a7517422051911680cc803921ff99db90", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/aws.sh", + "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce", "is_verified": false, - "line_number": 47, - "type": "Hex High Entropy String" + "line_number": 640 } ], - "doc/gen3OnK8s.md": [ + "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ { - "hashed_secret": "2db6d21d365f544f7ca3bcfb443ac96898a7a069", - "is_secret": false, + "type": "Basic Auth Credentials", + "filename": "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml", + "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", "is_verified": false, - "line_number": 113, - "type": "Secret Keyword" - }, + "line_number": 33 + } + ], + "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json": [ { - "hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json", + "hashed_secret": "0447a636536df0264b2000403fbefd69f603ceb1", "is_verified": false, - "line_number": 143, - "type": "Secret Keyword" + "line_number": 54 }, { - "hashed_secret": "70374248fd7129088fef42b8f568443f6dce3a48", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 170, - "type": "Secret Keyword" + "line_number": 60 }, { - "hashed_secret": "bcf22dfc6fb76b7366b1f1675baf2332a0e6a7ce", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 189, - "type": "Secret Keyword" + "line_number": 108 } ], - "doc/kube-setup-data-ingestion-job.md": [ + "gen3/lib/onprem.sh": [ { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/onprem.sh", + "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", "is_verified": false, - "line_number": 30, - "type": "Secret Keyword" - } - ], - "doc/logs.md": [ + "line_number": 68 + }, { - "hashed_secret": "9addbf544119efa4a64223b649750a510f0d463f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/onprem.sh", + "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", "is_verified": false, - "line_number": 6, - "type": "Secret Keyword" + "line_number": 84 } ], - "doc/slurm_cluster.md": [ + "gen3/lib/testData/default/expectedFenceResult.yaml": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 93 + }, { - "hashed_secret": "2ace62c1befa19e3ea37dd52be9f6d508c5163e6", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", "is_verified": false, - "line_number": 184, - "type": "Secret Keyword" + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 99 } ], - "files/dashboard/usage-reports/package-lock.json": [ + "gen3/lib/testData/default/expectedSheepdogResult.yaml": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", + "is_verified": false, + "line_number": 63 + }, { - "hashed_secret": "e095101882f706c4de95e0f75c5bcb9666e3f448", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", "is_verified": false, - "line_number": 10, - "type": "Base64 High Entropy String" + "line_number": 69 }, { - "hashed_secret": "5422e4f96964d5739998b25ac214520c1b113e5b", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 15, - "type": "Base64 High Entropy String" + "line_number": 72 } ], - "gen3/bin/api.sh": [ + "gen3/lib/testData/etlconvert/expected2.yaml": [ + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", + "is_verified": false, + "line_number": 10 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", + "is_verified": false, + "line_number": 13 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", + "is_verified": false, + "line_number": 16 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", + "is_verified": false, + "line_number": 18 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", + "is_verified": false, + "line_number": 33 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", "is_verified": false, - "line_number": 407, - "type": "Secret Keyword" + "line_number": 35 }, { - "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", "is_verified": false, - "line_number": 477, - "type": "Secret Keyword" + "line_number": 36 } ], - "gen3/bin/kube-dev-namespace.sh": [ + "gen3/lib/testData/etlconvert/users2.yaml": [ + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", + "is_verified": false, + "line_number": 543 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", + "is_verified": false, + "line_number": 553 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", + "is_verified": false, + "line_number": 558 + }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", + "is_verified": false, + "line_number": 568 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", + "is_verified": false, + "line_number": 643 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", + "is_verified": false, + "line_number": 653 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", "is_verified": false, - "line_number": 135, - "type": "Secret Keyword" + "line_number": 658 } ], - "gen3/bin/kube-setup-argo.sh": [ + "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 77 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 93 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 99 + }, { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 191, - "type": "Secret Keyword" + "line_number": 102 } ], - "gen3/bin/kube-setup-aurora-monitoring.sh": [ + "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", + "is_verified": false, + "line_number": 63 + }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 59, - "type": "Secret Keyword" + "line_number": 66 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 75 } ], - "gen3/bin/kube-setup-certs.sh": [ + "gen3/test/secretsTest.sh": [ { - "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/test/secretsTest.sh", + "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", "is_verified": false, - "line_number": 50, - "type": "Secret Keyword" + "line_number": 25 } ], - "gen3/bin/kube-setup-dashboard.sh": [ + "gen3/test/terraformTest.sh": [ { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce", "is_verified": false, - "line_number": 40, - "type": "Secret Keyword" + "line_number": 156 }, { - "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "d869db7fe62fb07c25a0403ecaea55031744b5fb", "is_verified": false, - "line_number": 41, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-data-ingestion-job.sh": [ + "line_number": 163 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", + "is_verified": false, + "line_number": 172 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", + "is_verified": false, + "line_number": 172 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", + "is_verified": false, + "line_number": 175 + }, { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", + "is_verified": false, + "line_number": 175 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "212e1d3823c8c9af9e4c0c172164ee292b9a6768", + "is_verified": false, + "line_number": 311 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "cb80dbb67a1a5bdf4957eea1473789f1c65357c6", + "is_verified": false, + "line_number": 312 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "5f35c25f4bf588b5fad46e249fcd9221f5257ce4", "is_verified": false, - "line_number": 37, - "type": "Secret Keyword" + "line_number": 313 }, { - "hashed_secret": "8695a632956b1b0ea7b66993dcc98732da39148c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "5308421b43dde5775f1993bd25a8163070d65598", + "is_verified": false, + "line_number": 314 + } + ], + "kube/services/access-backend/access-backend-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/access-backend/access-backend-deploy.yaml", + "hashed_secret": "dbf88a0c3d905c669c0fd13bf8172bb34d4b1168", "is_verified": false, - "line_number": 102, - "type": "Secret Keyword" + "line_number": 60 } ], - "gen3/bin/kube-setup-dicom-server.sh": [ + "kube/services/acronymbot/acronymbot-deploy.yaml": [ { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/acronymbot/acronymbot-deploy.yaml", + "hashed_secret": "600833390a6b9891d0d8a5f6e3326abb237ac8ca", "is_verified": false, - "line_number": 43, - "type": "Secret Keyword" + "line_number": 49 } ], - "gen3/bin/kube-setup-dicom.sh": [ + "kube/services/arborist/arborist-deploy-2.yaml": [ { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy-2.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", "is_verified": false, - "line_number": 78, - "type": "Secret Keyword" + "line_number": 59 } ], - "gen3/bin/kube-setup-jenkins.sh": [ + "kube/services/arborist/arborist-deploy.yaml": [ { - "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 18, - "type": "Secret Keyword" + "line_number": 64 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 22, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-metadata.sh": [ + "line_number": 67 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 35, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-revproxy.sh": [ + "line_number": 70 + }, { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 38, - "type": "Secret Keyword" + "line_number": 77 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 55, - "type": "Secret Keyword" + "line_number": 80 }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 57, - "type": "Secret Keyword" + "line_number": 83 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", + "is_verified": false, + "line_number": 86 } ], - "gen3/bin/kube-setup-secrets.sh": [ + "kube/services/argo/workflows/fence-usersync-wf.yaml": [ { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 79, - "type": "Secret Keyword" + "line_number": 108 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 82, - "type": "Secret Keyword" + "line_number": 111 }, { - "hashed_secret": "6f7531b95bbc99ac25a5cc82edb825f319c5dee8", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 95, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-sftp.sh": [ + "line_number": 114 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 36, - "type": "Secret Keyword" + "line_number": 117 }, { - "hashed_secret": "83d11e3aec005a3b9a2077c6800683e202a95af4", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", "is_verified": false, - "line_number": 51, - "type": "Secret Keyword" + "line_number": 120 } ], - "gen3/bin/kube-setup-sheepdog.sh": [ + "kube/services/argocd/values.yaml": [ { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/argocd/values.yaml", + "hashed_secret": "bfc1b86ce643b65bd540989213254b01fd6ad418", "is_verified": false, - "line_number": 33, - "type": "Secret Keyword" + "line_number": 1489 } ], - "gen3/bin/kube-setup-sower-jobs.sh": [ + "kube/services/arranger/arranger-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/arranger/arranger-deploy.yaml", + "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761", + "is_verified": false, + "line_number": 61 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/arranger/arranger-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 64 + } + ], + "kube/services/audit-service/audit-service-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/audit-service/audit-service-deploy.yaml", + "hashed_secret": "42cde1c58c36d8bb5804a076e55ac6ec07ef99fc", + "is_verified": false, + "line_number": 64 + } + ], + "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml", + "hashed_secret": "7f834ccb442433fc12ec9532f75c3a4b6a748d4c", + "is_verified": false, + "line_number": 46 + } + ], + "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 56 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml", + "hashed_secret": "5949b79e0c7082dc78d543cde662871a4f8b8913", + "is_verified": false, + "line_number": 59 + } + ], + "kube/services/cogwheel/cogwheel-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/cogwheel/cogwheel-deploy.yaml", + "hashed_secret": "09b772df628fd10bca646b6a877eb661122210ab", + "is_verified": false, + "line_number": 35 + } + ], + "kube/services/cohort-middleware/cohort-middleware-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/cohort-middleware/cohort-middleware-deploy.yaml", + "hashed_secret": "bf22f6c4bd03572f1ef593efc3eb1a7e0b6dcab4", + "is_verified": false, + "line_number": 62 + } + ], + "kube/services/dashboard/dashboard-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/dashboard/dashboard-deploy.yaml", + "hashed_secret": "9e722d12ce045c8718ab803ed465b2fbe199f3d3", + "is_verified": false, + "line_number": 61 + } + ], + "kube/services/datadog/values.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/datadog/values.yaml", + "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b", + "is_verified": false, + "line_number": 23 + } + ], + "kube/services/datasim/datasim-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 66 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 76 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 79 + } + ], + "kube/services/dicom-server/dicom-server-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/dicom-server/dicom-server-deploy.yaml", + "hashed_secret": "706168ac2565a93cceffe2202ac45d3d31c075fb", + "is_verified": false, + "line_number": 40 + } + ], + "kube/services/fence/fence-canary-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 93 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 99 + } + ], + "kube/services/fence/fence-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 77 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 93 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 99 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 102 + } + ], + "kube/services/fenceshib/fenceshib-canary-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 62 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 65 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 78 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 81 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 93 + } + ], + "kube/services/fenceshib/fenceshib-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 69 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 75 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 85 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 88 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 91 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 94 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 97 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 100 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "6c4789c3be186fd5dcbf06723462ccdd2c86dc37", + "is_verified": false, + "line_number": 103 + } + ], + "kube/services/frontend-framework/frontend-framework-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", + "is_verified": false, + "line_number": 66 + } + ], + "kube/services/frontend-framework/frontend-framework-root-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", + "is_verified": false, + "line_number": 66 + } + ], + "kube/services/gdcapi/gdcapi-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/gdcapi/gdcapi-deploy.yaml", + "hashed_secret": "e8c2f0bacaffbf2f9897217c6770413879945296", + "is_verified": false, + "line_number": 38 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/gdcapi/gdcapi-deploy.yaml", + "hashed_secret": "517cded9f3e3ab79237fde330b97a93f5a943316", + "is_verified": false, + "line_number": 41 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/gdcapi/gdcapi-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 44 + } + ], + "kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml", + "hashed_secret": "38ded89f83435a558169dedb91a38f72d6cebf41", + "is_verified": false, + "line_number": 27 + } + ], + "kube/services/google-sa-validation/google-sa-validation-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 70 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 73 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 76 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 79 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 82 + } + ], + "kube/services/guppy/guppy-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/guppy/guppy-deploy.yaml", + "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761", + "is_verified": false, + "line_number": 65 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/guppy/guppy-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 68 + } + ], + "kube/services/indexd/indexd-canary-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", + "is_verified": false, + "line_number": 59 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", + "is_verified": false, + "line_number": 62 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 71 + } + ], + "kube/services/indexd/indexd-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", + "is_verified": false, + "line_number": 66 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 75 + } + ], + "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 143 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 146 + } + ], + "kube/services/jenkins-worker/jenkins-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 150 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 153 + } + ], + "kube/services/jenkins/jenkins-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins/jenkins-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 157 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins/jenkins-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 160 + } + ], + "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 143 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 146 + } + ], + "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 146 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 149 + } + ], + "kube/services/jenkins2/jenkins2-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2/jenkins2-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 153 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2/jenkins2-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 156 + } + ], + "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", + "is_verified": false, + "line_number": 37 + } + ], + "kube/services/jobs/arborist-rm-expired-access-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/arborist-rm-expired-access-job.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", + "is_verified": false, + "line_number": 37 + } + ], + "kube/services/jobs/arboristdb-create-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/arboristdb-create-job.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/aws-bucket-replicate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml", + "hashed_secret": "deb02468778f4041fb189654698ac948e436732d", + "is_verified": false, + "line_number": 33 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml", + "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/bucket-manifest-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-manifest-job.yaml", + "hashed_secret": "6c36710fe8825b381388d7005f2c9b5c70175fba", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/bucket-replicate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-replicate-job.yaml", + "hashed_secret": "84954f7729144580d612cbb0517aeca8880e3483", + "is_verified": false, + "line_number": 46 + } + ], + "kube/services/jobs/bucket-replication-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-replication-job.yaml", + "hashed_secret": "84954f7729144580d612cbb0517aeca8880e3483", + "is_verified": false, + "line_number": 32 + } + ], + "kube/services/jobs/bucket-size-report-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-size-report-job.yaml", + "hashed_secret": "7cccf62cb63863d9d3baabed4f576eb0f7039735", + "is_verified": false, + "line_number": 34 + } + ], + "kube/services/jobs/cedar-ingestion-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/cedar-ingestion-job.yaml", + "hashed_secret": "e1c426d126dcc618dcd0686fc718d509ca6ee3b8", + "is_verified": false, + "line_number": 54 + } + ], + "kube/services/jobs/client-modify-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 41 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 44 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 50 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 60 + } + ], + "kube/services/jobs/cogwheel-register-client-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/cogwheel-register-client-job.yaml", + "hashed_secret": "09b772df628fd10bca646b6a877eb661122210ab", + "is_verified": false, + "line_number": 40 + } + ], + "kube/services/jobs/config-fence-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/config-fence-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 44 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/config-fence-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/config-fence-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 57 + } + ], + "kube/services/jobs/covid19-etl-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/covid19-etl-job.yaml", + "hashed_secret": "a7a2b42615b2b256a7c601c77c426e5d6cafb212", + "is_verified": false, + "line_number": 34 + } + ], + "kube/services/jobs/covid19-notebook-etl-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/covid19-notebook-etl-job.yaml", + "hashed_secret": "a7a2b42615b2b256a7c601c77c426e5d6cafb212", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/data-ingestion-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "81e4388059839f71aed21999aa51095c7e545094", + "is_verified": false, + "line_number": 34 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 48 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 51 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 66 + } + ], + "kube/services/jobs/etl-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/etl-cronjob.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", + "is_verified": false, + "line_number": 38 + } + ], + "kube/services/jobs/etl-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/etl-job.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", + "is_verified": false, + "line_number": 35 + } + ], + "kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 43 + } + ], + "kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + } + ], + "kube/services/jobs/fence-db-migrate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-db-migrate-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-db-migrate-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 39 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-db-migrate-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/fence-delete-expired-clients-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-delete-expired-clients-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 38 + } + ], + "kube/services/jobs/fence-visa-update-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 42 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 45 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 48 + } + ], + "kube/services/jobs/fence-visa-update-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 39 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/fencedb-create-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fencedb-create-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/gdcdb-create-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gdcdb-create-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/gen3qa-check-bucket-access-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 177 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 180 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 186 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 190 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 193 + } + ], + "kube/services/jobs/gentestdata-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 70 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 76 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 80 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 83 + } + ], + "kube/services/jobs/google-bucket-manifest-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-manifest-job.yaml", + "hashed_secret": "5ca8fff7767e5dd6ebed80e2c8eab66d6f3bf5eb", + "is_verified": false, + "line_number": 31 + } + ], + "kube/services/jobs/google-bucket-replicate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-replicate-job.yaml", + "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6", + "is_verified": false, + "line_number": 35 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-replicate-job.yaml", + "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063", + "is_verified": false, + "line_number": 38 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-replicate-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", + "is_verified": false, + "line_number": 41 + } + ], + "kube/services/jobs/google-create-bucket-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 78 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 81 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 91 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 94 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 97 + } + ], + "kube/services/jobs/google-delete-expired-access-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 43 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 46 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 49 + } + ], + "kube/services/jobs/google-delete-expired-access-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 39 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 48 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 51 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 61 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 64 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 67 + } + ], + "kube/services/jobs/google-delete-expired-service-account-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 40 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 43 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 49 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 53 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 56 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 59 + } + ], + "kube/services/jobs/google-init-proxy-groups-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 48 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 51 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 54 + }, { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 25, - "type": "Secret Keyword" + "line_number": 61 }, { - "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" + "line_number": 64 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 120, - "type": "Secret Keyword" + "line_number": 67 }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 122, - "type": "Secret Keyword" + "line_number": 70 } ], - "gen3/bin/kube-setup-ssjdispatcher.sh": [ + "kube/services/jobs/google-init-proxy-groups-job.yaml": [ { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 117, - "type": "Secret Keyword" + "line_number": 40 }, { - "hashed_secret": "7992309146efaa8da936e34b0bd33242cd0e9f93", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 184, - "type": "Secret Keyword" + "line_number": 43 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 197, - "type": "Secret Keyword" - } - ], - "gen3/lib/aws.sh": [ - { - "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, - "is_verified": false, - "line_number": 640, - "type": "Secret Keyword" + "line_number": 46 }, { - "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8", - "is_secret": false, - "is_verified": false, - "line_number": 660, - "type": "Secret Keyword" - } - ], - "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ - { - "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 33, - "type": "Basic Auth Credentials" + "line_number": 53 }, { - "hashed_secret": "5d07e1b80e448a213b392049888111e1779a52db", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 286, - "type": "Secret Keyword" - } - ], - "gen3/lib/bootstrap/templates/Gen3Secrets/creds.json": [ + "line_number": 56 + }, { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" - } - ], - "gen3/lib/bootstrap/templates/Gen3Secrets/g3auto/dbfarm/servers.json": [ + "line_number": 59 + }, { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 5, - "type": "Secret Keyword" + "line_number": 62 } ], - "gen3/lib/logs/utils.sh": [ + "kube/services/jobs/google-manage-account-access-cronjob.yaml": [ { - "hashed_secret": "76143b4ffc8aa2a53f9700ce229f904e69f1e8b5", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 3, - "type": "Secret Keyword" - } - ], - "gen3/lib/manifestDefaults/hatchery/hatchery.json": [ + "line_number": 48 + }, { - "hashed_secret": "0da0e0005ca04acb407af2681d0bede6d9406039", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 78, - "type": "Secret Keyword" - } - ], - "gen3/lib/onprem.sh": [ + "line_number": 51 + }, { - "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 68, - "type": "Secret Keyword" + "line_number": 54 }, { - "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 84, - "type": "Secret Keyword" - } - ], - "gen3/lib/secrets/rotate-postgres.sh": [ + "line_number": 61 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 162, - "type": "Secret Keyword" + "line_number": 64 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 250, - "type": "Secret Keyword" + "line_number": 67 } ], - "gen3/lib/testData/etlconvert/expected2.yaml": [ - { - "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", - "is_secret": false, - "is_verified": false, - "line_number": 10, - "type": "Base64 High Entropy String" - }, + "kube/services/jobs/google-manage-account-access-job.yaml": [ { - "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 13, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 16, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 18, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 33, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 35, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", - "is_secret": false, - "is_verified": false, - "line_number": 36, - "type": "Base64 High Entropy String" - } - ], - "gen3/test/secretsTest.sh": [ - { - "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 25, - "type": "Secret Keyword" + "line_number": 59 } ], - "gen3/test/terraformTest.sh": [ + "kube/services/jobs/google-manage-keys-cronjob.yaml": [ { - "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 156, - "type": "Secret Keyword" + "line_number": 48 }, { - "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 172, - "type": "Base64 High Entropy String" + "line_number": 51 }, { - "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 175, - "type": "Base64 High Entropy String" + "line_number": 54 }, { - "hashed_secret": "329b7cd8191942bedd337107934d365c43a86e6c", - "is_secret": false, - "is_verified": false, - "line_number": 175, - "type": "Secret Keyword" - } - ], - "kube/services/argocd/values.yaml": [ - { - "hashed_secret": "27c6929aef41ae2bcadac15ca6abcaff72cda9cd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 360, - "type": "Private Key" + "line_number": 61 }, { - "hashed_secret": "edbd5e119f94badb9f99a67ac6ff4c7a5204ad61", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 379, - "type": "Secret Keyword" + "line_number": 64 }, { - "hashed_secret": "91dfd9ddb4198affc5c194cd8ce6d338fde470e2", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 412, - "type": "Secret Keyword" + "line_number": 67 } ], - "kube/services/datadog/values.yaml": [ + "kube/services/jobs/google-manage-keys-job.yaml": [ { - "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 23, - "type": "Secret Keyword" - } - ], - "kube/services/fenceshib/fenceshib-configmap.yaml": [ + "line_number": 40 + }, { - "hashed_secret": "a985e14b9d6744a2d04f29347693b55c116e478c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 375, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "adc747bc5eb82ef4b017f5c3759dcee5aa28c36f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 376, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "59b1702ff0eaf92c9271cbd12f587de97df7e13b", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 377, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "b4a748bbfbbca8925d932a47ab3dcb970d34caf5", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 378, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "af646701a84f7dd9f0e87753f54def881326e78a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 379, - "type": "Base64 High Entropy String" - }, + "line_number": 59 + } + ], + "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml": [ { - "hashed_secret": "20c15ad9742124dc06e1612282c49bb443ebcbd9", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 380, - "type": "Base64 High Entropy String" + "line_number": 48 }, { - "hashed_secret": "9caded71b967a11b7a6cd0f20db91f06f3517d12", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 381, - "type": "Base64 High Entropy String" + "line_number": 51 }, { - "hashed_secret": "8f19501bc9241b71f7b6db929fb35ab12635dcd7", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 382, - "type": "Base64 High Entropy String" + "line_number": 54 }, { - "hashed_secret": "d6220f6a55df1ed11c4250f42ab07bb9da20541a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 383, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "dadd9b96636f9529f2547d05d754dc310ceba0c3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 384, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "3074bc66584550e20c3697a28f67a0762394943c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 385, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/jobs/google-verify-bucket-access-group-job.yaml": [ { - "hashed_secret": "823131319b4c4b4688f44d3e832bfa9696f16b52", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 386, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "015b780cbfb76988caf52de8ac974a6781e53110", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 387, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "5c8fac33207d74d667680ade09447ea8f43b76d7", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 388, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "c0c4bb09d8394e8f001e337bd27ccac355433d9e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 389, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "f95631bcbbbc56e18487dcb242cfb1b3e74b16a1", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 390, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "01a692ab6232e0882a313d148981bab58ab98f53", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 391, - "type": "Base64 High Entropy String" - }, + "line_number": 59 + } + ], + "kube/services/jobs/graph-create-job.yaml": [ { - "hashed_secret": "658060a680d415ce6690ad2c3b622ddb33ddd50a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/graph-create-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 392, - "type": "Base64 High Entropy String" - }, + "line_number": 33 + } + ], + "kube/services/jobs/indexd-authz-job.yaml": [ { - "hashed_secret": "80915b0bd9daa5e1f95cad573892980b1b5a2294", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-authz-job.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", "is_verified": false, - "line_number": 393, - "type": "Base64 High Entropy String" + "line_number": 32 }, { - "hashed_secret": "cc55977b293d8cdca8a2c19dfea6874e70057c41", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-authz-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 394, - "type": "Base64 High Entropy String" + "line_number": 35 }, { - "hashed_secret": "e400ed02add75dd5f3a8c212857acf12027437d1", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-authz-job.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", "is_verified": false, - "line_number": 395, - "type": "Base64 High Entropy String" - }, + "line_number": 38 + } + ], + "kube/services/jobs/indexd-userdb-job.yaml": [ { - "hashed_secret": "2e819c8baa3b0508a32b77de258655b3f3a6f7cb", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-userdb-job.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", "is_verified": false, - "line_number": 396, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "546ed926d58ea5492ab6adb8be94a67aa44ac433", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-userdb-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 397, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "f056f2deceed268e7af6dbdaf2577079c76e006a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-userdb-job.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", "is_verified": false, - "line_number": 398, - "type": "Base64 High Entropy String" - }, + "line_number": 46 + } + ], + "kube/services/jobs/metadata-aggregate-sync-job.yaml": [ { - "hashed_secret": "d75efee28f4798c3a9c6f44b78a8500513ef28b2", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/metadata-aggregate-sync-job.yaml", + "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9", "is_verified": false, - "line_number": 399, - "type": "Base64 High Entropy String" + "line_number": 31 }, { - "hashed_secret": "7803ae08cdc22a5e0b025eff3c9ef0628eedc165", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/metadata-aggregate-sync-job.yaml", + "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d", "is_verified": false, - "line_number": 419, - "type": "Base64 High Entropy String" - }, + "line_number": 34 + } + ], + "kube/services/jobs/metadata-delete-expired-objects-job.yaml": [ { - "hashed_secret": "b8b61e87f5b58b0eeb597b2122ea0cea2ccab3d9", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/metadata-delete-expired-objects-job.yaml", + "hashed_secret": "0cc8bac3fabe63722716d1e6fe04a8dded1e3ad0", "is_verified": false, - "line_number": 420, - "type": "Base64 High Entropy String" - }, + "line_number": 24 + } + ], + "kube/services/jobs/remove-objects-from-clouds-job.yaml": [ { - "hashed_secret": "787745fc904c3bd7eddc3d1aab683a376c13890f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml", + "hashed_secret": "deb02468778f4041fb189654698ac948e436732d", "is_verified": false, - "line_number": 423, - "type": "Base64 High Entropy String" + "line_number": 34 }, { - "hashed_secret": "81361d672f238f505a6246ef9b655ee2f48d67e7", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml", + "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6", "is_verified": false, - "line_number": 424, - "type": "Base64 High Entropy String" + "line_number": 37 }, { - "hashed_secret": "7c98bff76ac3f273d15ed9bc3dd5294d323ab577", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", "is_verified": false, - "line_number": 425, - "type": "Base64 High Entropy String" - }, + "line_number": 43 + } + ], + "kube/services/jobs/replicate-validation-job.yaml": [ { - "hashed_secret": "46038fc88daceed8dd46817ca45c72ae0270fdd4", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "deb02468778f4041fb189654698ac948e436732d", "is_verified": false, - "line_number": 426, - "type": "Base64 High Entropy String" + "line_number": 34 }, { - "hashed_secret": "acad0c57b4f5cbed1b4863ed06d02784180a9f92", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6", "is_verified": false, - "line_number": 427, - "type": "Base64 High Entropy String" + "line_number": 37 }, { - "hashed_secret": "1b57f49a6ee337c16ecd6aabfc0dff3b3821cd09", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063", "is_verified": false, - "line_number": 428, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "5b688158be36e8b3f265a462ed599dcf69290084", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", "is_verified": false, - "line_number": 429, - "type": "Base64 High Entropy String" - }, + "line_number": 43 + } + ], + "kube/services/jobs/s3sync-cronjob.yaml": [ { - "hashed_secret": "965996e12c8b50b3c325d96003e8984a4ece658a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/s3sync-cronjob.yaml", + "hashed_secret": "27f6dfe15698a3bfaa183c84701cfb2bf4115415", "is_verified": false, - "line_number": 430, - "type": "Base64 High Entropy String" - }, + "line_number": 44 + } + ], + "kube/services/jobs/usersync-job.yaml": [ { - "hashed_secret": "584f0c58e764e948af1a35c9e60447aa0f84c6f5", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 431, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "bcaf897786d060a675ee9d654a84ae8baf96e9d0", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 432, - "type": "Base64 High Entropy String" + "line_number": 67 }, { - "hashed_secret": "0c09277fa183e06d32065f9386a3b4190b445df3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 433, - "type": "Base64 High Entropy String" + "line_number": 70 }, { - "hashed_secret": "5a51be06b305d6664e4afd25f21869b0f8b5039b", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 434, - "type": "Base64 High Entropy String" + "line_number": 77 }, { - "hashed_secret": "b38404f8853d734e3d03577b2c1084b4540c8708", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 435, - "type": "Base64 High Entropy String" + "line_number": 80 }, { - "hashed_secret": "126ccc602cffcb8292beb57137f7f6719e317b72", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 436, - "type": "Base64 High Entropy String" + "line_number": 83 }, { - "hashed_secret": "6681c1d7e1d327642a32cb8864ad51e4b8f981e5", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", "is_verified": false, - "line_number": 437, - "type": "Base64 High Entropy String" - }, + "line_number": 86 + } + ], + "kube/services/jobs/useryaml-job.yaml": [ { - "hashed_secret": "7f7b1f316ece195e5f584fe2faf6f9edc6942c6f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 439, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "bb908c7bc655057f2edc42815c5dff82e9dea529", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 440, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "bc2a0d18e3dd142df7b34e95342d47bf8aadabcb", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 441, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "d60f0bcea109bb6edb6e45fd387f5f2c86e49e1a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 442, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "e549dd40a741557cc1c4e377df0a141354e22688", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 443, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "2dd2486dae84cad50387c20bf687b6fbc6162b58", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 444, - "type": "Base64 High Entropy String" + "line_number": 59 }, { - "hashed_secret": "71622010fc7eb09d9273f59c548bde6a5da5dc0e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", "is_verified": false, - "line_number": 445, - "type": "Base64 High Entropy String" - }, + "line_number": 65 + } + ], + "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml": [ { - "hashed_secret": "6f0115cf53bd49ec990c562ac6cbfc452c83cd46", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 446, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "70dddd534b2f9bb70871fefe0845b79c3b69363f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml", + "hashed_secret": "fb7ea689a364feb7aafbf8d553eb77073fa7ba11", "is_verified": false, - "line_number": 448, - "type": "Base64 High Entropy String" - }, + "line_number": 59 + } + ], + "kube/services/kubecost-standalone/thanos-deploy.yaml": [ { - "hashed_secret": "acf3536b0416aa99608b0be17e87655370ece829", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/kubecost-standalone/thanos-deploy.yaml", + "hashed_secret": "064376809efc3acda5bd341aca977e149b989696", "is_verified": false, - "line_number": 449, - "type": "Base64 High Entropy String" - }, + "line_number": 127 + } + ], + "kube/services/kubecost-standalone/values.yaml": [ { - "hashed_secret": "1d13ee35c7279c1fae1c6474ed47611994273e41", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/kubecost-standalone/values.yaml", + "hashed_secret": "ec9786daee68e3541963a51299160859fe4db663", "is_verified": false, - "line_number": 450, - "type": "Base64 High Entropy String" - }, + "line_number": 30 + } + ], + "kube/services/manifestservice/manifestservice-deploy.yaml": [ { - "hashed_secret": "d38cf89b25bd7378cdb4e00b4b59293001dd500b", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", + "hashed_secret": "3da2c49c267b6c58401bbf05e379b38d20434f78", "is_verified": false, - "line_number": 451, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "1648f34ce2f1b563a8ed1c6d5d55b5e76a395903", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", + "hashed_secret": "469e0c2b1a67aa94955bae023ddc727be31581a7", "is_verified": false, - "line_number": 452, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "9bf63f6f49fb01ff80959bc5a60c8688df92cc02", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 453, - "type": "Base64 High Entropy String" + "line_number": 67 } ], - "kube/services/jobs/indexd-authz-job.yaml": [ + "kube/services/metadata/metadata-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/metadata/metadata-deploy.yaml", + "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9", + "is_verified": false, + "line_number": 61 + }, { - "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/metadata/metadata-deploy.yaml", + "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d", "is_verified": false, - "line_number": 87, - "type": "Basic Auth Credentials" + "line_number": 66 } ], "kube/services/monitoring/grafana-values.yaml": [ { + "type": "Secret Keyword", + "filename": "kube/services/monitoring/grafana-values.yaml", "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385", - "is_secret": false, "is_verified": false, - "line_number": 162, - "type": "Secret Keyword" + "line_number": 162 }, { - "hashed_secret": "7a64ff8446b06d38dc271019994f13823a2cbcf4", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/monitoring/grafana-values.yaml", + "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", "is_verified": false, - "line_number": 166, - "type": "Secret Keyword" + "line_number": 331 } ], - "kube/services/revproxy/helpers.js": [ + "kube/services/monitoring/thanos-deploy.yaml": [ { - "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/monitoring/thanos-deploy.yaml", + "hashed_secret": "064376809efc3acda5bd341aca977e149b989696", "is_verified": false, - "line_number": 10, - "type": "Base64 High Entropy String" + "line_number": 130 } ], - "kube/services/revproxy/helpersTest.js": [ + "kube/services/ohif-viewer/ohif-viewer-deploy.yaml": [ { - "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/ohif-viewer/ohif-viewer-deploy.yaml", + "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818", "is_verified": false, - "line_number": 22, - "type": "JSON Web Token" + "line_number": 40 } ], - "kube/services/superset/superset-deploy.yaml": [ + "kube/services/orthanc/orthanc-deploy.yaml": [ { - "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/orthanc/orthanc-deploy.yaml", + "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818", "is_verified": false, - "line_number": 38, - "type": "Secret Keyword" + "line_number": 41 } ], - "kube/services/superset/superset-redis.yaml": [ + "kube/services/peregrine/peregrine-canary-deploy.yaml": [ { - "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3", "is_verified": false, - "line_number": 165, - "type": "Secret Keyword" + "line_number": 61 }, { - "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", + "is_verified": false, + "line_number": 64 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec", + "is_verified": false, + "line_number": 70 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 265, - "type": "Secret Keyword" + "line_number": 73 } ], - "kube/services/superset/values.yaml": [ + "kube/services/peregrine/peregrine-deploy.yaml": [ { - "hashed_secret": "6f803b24314c39062efe38d0c1da8c472f47eab3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3", "is_verified": false, - "line_number": 54, - "type": "Secret Keyword" + "line_number": 67 }, { - "hashed_secret": "6eae3a5b062c6d0d79f070c26e6d62486b40cb46", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 86, - "type": "Secret Keyword" + "line_number": 70 }, { - "hashed_secret": "3eb416223e9e69e6bb8ee19793911ad1ad2027d8", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec", "is_verified": false, - "line_number": 212, - "type": "Secret Keyword" + "line_number": 76 }, { - "hashed_secret": "ff55435345834a3fe224936776c2aa15f6ed5358", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 79 + } + ], + "kube/services/pidgin/pidgin-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/pidgin/pidgin-deploy.yaml", + "hashed_secret": "49af232c7adfcd54a40202e06261396a757e4ddd", "is_verified": false, - "line_number": 396, - "type": "Secret Keyword" + "line_number": 59 }, { - "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/pidgin/pidgin-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 503, - "type": "Secret Keyword" + "line_number": 62 } ], - "package-lock.json": [ + "kube/services/portal/portal-deploy.yaml": [ { - "hashed_secret": "0656ad0df3af4633dc369f13d5e8806973c5fd9d", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 1481, - "type": "Base64 High Entropy String" + "line_number": 55 }, { - "hashed_secret": "00091d875d922437c5fc9e6067a08e78c2482e87", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa", "is_verified": false, - "line_number": 1489, - "type": "Base64 High Entropy String" + "line_number": 58 }, { - "hashed_secret": "c4e5cc37e115bf7d86e76e3d799705bf691e4d00", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb", "is_verified": false, - "line_number": 1521, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "0512e37fbedf1d16828680a038a241b4780a5c04", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", "is_verified": false, - "line_number": 1547, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/portal/portal-root-deploy.yaml": [ { - "hashed_secret": "01868fd50edbfe6eb91e5b01209b543adc6857af", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 1611, - "type": "Base64 High Entropy String" + "line_number": 55 }, { - "hashed_secret": "a6f48bf1e398deffc7fd31da17c3506b46c97a93", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa", "is_verified": false, - "line_number": 1640, - "type": "Base64 High Entropy String" + "line_number": 58 }, { - "hashed_secret": "85ce358dbdec0996cf3ccd2bf1c6602af68c181e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb", "is_verified": false, - "line_number": 1648, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", "is_verified": false, - "line_number": 1664, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml": [ { - "hashed_secret": "7098a3e6d6d2ec0a40f04fe12509c5c6f4c49c0e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 1683, - "type": "Base64 High Entropy String" + "line_number": 74 }, { - "hashed_secret": "1664ad175bba1795a7ecad572bae7e0740b94f56", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 1733, - "type": "Base64 High Entropy String" + "line_number": 77 }, { - "hashed_secret": "1ec4ce2eb945ce2f816dcb6ebdd1e10247f439a3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 1742, - "type": "Base64 High Entropy String" + "line_number": 80 }, { - "hashed_secret": "a7af5768a6d936e36f28e1030d7f894d7aaf555e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 1755, - "type": "Base64 High Entropy String" + "line_number": 90 }, { - "hashed_secret": "6fbc7dd864586173160874f2a86ca7d2d552cb85", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 1769, - "type": "Base64 High Entropy String" + "line_number": 93 }, { - "hashed_secret": "81a961f2c89c6209328b74a8768e30fd76c3ac72", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 1855, - "type": "Base64 High Entropy String" + "line_number": 96 }, { - "hashed_secret": "797d4751c536c421cb82b9f62e0a804af30d78f5", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 1889, - "type": "Base64 High Entropy String" + "line_number": 99 }, { - "hashed_secret": "0d55babfa89f240142c0adfc7b560500a1d3ae7c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", "is_verified": false, - "line_number": 1894, - "type": "Base64 High Entropy String" + "line_number": 102 }, { - "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 1921, - "type": "Base64 High Entropy String" - }, + "line_number": 105 + } + ], + "kube/services/qa-dashboard/qa-dashboard-deployment.yaml": [ { - "hashed_secret": "4cf9419259c0ce8eee84b468af3c72db8b001620", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/qa-dashboard/qa-dashboard-deployment.yaml", + "hashed_secret": "253939a955a575ac69f409e5914dd0191b704760", "is_verified": false, - "line_number": 1950, - "type": "Base64 High Entropy String" - }, + "line_number": 63 + } + ], + "kube/services/qabot/qabot-deploy.yaml": [ { - "hashed_secret": "24816e3eb4308e247bde7c1d09ffb7b79c519b71", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/qabot/qabot-deploy.yaml", + "hashed_secret": "a9fa7aa8c08b647c3fb696e6598642d4a63e25be", "is_verified": false, - "line_number": 1983, - "type": "Base64 High Entropy String" - }, + "line_number": 86 + } + ], + "kube/services/requestor/requestor-deploy.yaml": [ { - "hashed_secret": "e9adfe8a333d45f4776fe0eab31608be5d7b6a7d", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/requestor/requestor-deploy.yaml", + "hashed_secret": "15debe4170aa5b89858d939f4c0644307ae7789b", "is_verified": false, - "line_number": 2004, - "type": "Base64 High Entropy String" - }, + "line_number": 61 + } + ], + "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf": [ { - "hashed_secret": "03d6fb388dd1b185129b14221f7127715822ece6", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf", + "hashed_secret": "f89523833036f85fed37ce3ebf25492189bc9397", "is_verified": false, - "line_number": 2013, - "type": "Base64 High Entropy String" - }, + "line_number": 41 + } + ], + "kube/services/revproxy/gen3.nginx.conf/metadata-service.conf": [ { - "hashed_secret": "ee161bb3f899720f95cee50a5f9ef9c9ed96278b", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/revproxy/gen3.nginx.conf/metadata-service.conf", + "hashed_secret": "18c0871af26eb9875c0f840b13211f097c133fd2", "is_verified": false, - "line_number": 2046, - "type": "Base64 High Entropy String" - }, + "line_number": 24 + } + ], + "kube/services/revproxy/helpers.js": [ { - "hashed_secret": "ebeb5b574fa1ed24a40248275e6136759e766466", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "kube/services/revproxy/helpers.js", + "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", "is_verified": false, - "line_number": 2078, - "type": "Base64 High Entropy String" - }, + "line_number": 10 + } + ], + "kube/services/revproxy/helpersTest.js": [ { - "hashed_secret": "a6a555a428522ccf439fd516ce7c7e269274363f", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "kube/services/revproxy/helpersTest.js", + "hashed_secret": "389c3ec21b7325359051e97ff569b078843d2d37", "is_verified": false, - "line_number": 2083, - "type": "Base64 High Entropy String" + "line_number": 19 }, { - "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265", - "is_secret": false, + "type": "JSON Web Token", + "filename": "kube/services/revproxy/helpersTest.js", + "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", "is_verified": false, - "line_number": 2111, - "type": "Base64 High Entropy String" - }, + "line_number": 22 + } + ], + "kube/services/revproxy/revproxy-deploy.yaml": [ { - "hashed_secret": "3f1646b60abe74297d2f37a1eee5dc771ad834fc", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/revproxy/revproxy-deploy.yaml", + "hashed_secret": "c7a87a61893a647e29289845cb51e61afb06800b", "is_verified": false, - "line_number": 2138, - "type": "Base64 High Entropy String" + "line_number": 74 }, { - "hashed_secret": "fd933c71e82d5519ae0cb0779b370d02f6935759", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/revproxy/revproxy-deploy.yaml", + "hashed_secret": "b3a4e2dea4c1fae8c58a07a84065b73b3a2d831c", "is_verified": false, - "line_number": 2143, - "type": "Base64 High Entropy String" + "line_number": 77 }, { - "hashed_secret": "7090aa59cb52ad1f1810b08c4ac1ddf5c8fce523", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/revproxy/revproxy-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 2150, - "type": "Base64 High Entropy String" - }, + "line_number": 80 + } + ], + "kube/services/sftp/sftp-deploy.yaml": [ { - "hashed_secret": "756444bea4ea3d67844d8ddf58ad32356e9c2430", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/sftp/sftp-deploy.yaml", + "hashed_secret": "9fdebf62e477d59d25730744c8b3089c67c3db85", "is_verified": false, - "line_number": 2188, - "type": "Base64 High Entropy String" - }, + "line_number": 39 + } + ], + "kube/services/sheepdog/sheepdog-canary-deploy.yaml": [ { - "hashed_secret": "f74135fdd6b8dafdfb01ebbc61c5e5c24ee27cf8", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", "is_verified": false, - "line_number": 2291, - "type": "Base64 High Entropy String" + "line_number": 58 }, { - "hashed_secret": "56fbae787f4aed7d0632e95840d71bd378d3a36f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 2303, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "81cb6be182eb79444202c4563080aee75296a672", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", "is_verified": false, - "line_number": 2308, - "type": "Base64 High Entropy String" + "line_number": 67 }, { - "hashed_secret": "f0f3f7bce32184893046ac5f8cc80da56c3ca539", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 70 + } + ], + "kube/services/sheepdog/sheepdog-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", "is_verified": false, - "line_number": 2317, - "type": "Base64 High Entropy String" + "line_number": 63 }, { - "hashed_secret": "097893233346336f4003acfb6eb173ee59e648f0", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 2327, - "type": "Base64 High Entropy String" + "line_number": 66 }, { - "hashed_secret": "bb14c3b4ef4a9f2e86ffdd44b88d9b6729419671", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", "is_verified": false, - "line_number": 2332, - "type": "Base64 High Entropy String" + "line_number": 72 }, { - "hashed_secret": "71344a35cff67ef081920095d1406601fb5e9b97", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 75 + } + ], + "kube/services/shiny/shiny-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/shiny/shiny-deploy.yaml", + "hashed_secret": "327a1bbc6dc0ce857472ee9162a3415133862d50", + "is_verified": false, + "line_number": 43 + } + ], + "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml", + "hashed_secret": "7f932449df74fc78573fea502df8a484aef3f69d", + "is_verified": false, + "line_number": 61 + } + ], + "kube/services/superset/superset-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/superset/superset-deploy.yaml", + "hashed_secret": "3e9d1737117ff62b23e37aedc72b522b0134997a", "is_verified": false, - "line_number": 2340, - "type": "Base64 High Entropy String" + "line_number": 235 }, { - "hashed_secret": "eb3db6990fd43477a35dfeffc90b3f1ffa83c7bd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/superset/superset-deploy.yaml", + "hashed_secret": "6ac08eaa58d425783ff8b5a38fe16ee66c0bce15", + "is_verified": false, + "line_number": 311 + } + ], + "kube/services/superset/superset-redis.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/superset/superset-redis.yaml", + "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9", + "is_verified": false, + "line_number": 265 + } + ], + "kube/services/superset/values.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "9a09d4081ddc128a80384712ce6df3578e6bc58e", "is_verified": false, - "line_number": 2349, - "type": "Base64 High Entropy String" + "line_number": 173 }, { - "hashed_secret": "266288bdc14807b538d1e48a5891e361fa9b4a14", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "118c413f3fc929a1624f4c3e1da1e3d24377a693", "is_verified": false, - "line_number": 2357, - "type": "Base64 High Entropy String" + "line_number": 299 }, { - "hashed_secret": "800477261175fd21f23e7321923e1fba6ae55471", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "d2a8d1ddfa75398366cff06545380c73481ec17d", "is_verified": false, - "line_number": 2369, - "type": "Base64 High Entropy String" + "line_number": 445 }, { - "hashed_secret": "3f0c251b9c2c21454445a98fde6915ceacde2136", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc", + "is_verified": false, + "line_number": 459 + } + ], + "kube/services/thor/thor-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/thor/thor-deploy.yaml", + "hashed_secret": "1f3f96a3887209d0dda357e5516231ee9c5cd9a7", + "is_verified": false, + "line_number": 100 + } + ], + "kube/services/tube/tube-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/tube/tube-deploy.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 2387, - "type": "Base64 High Entropy String" + "line_number": 58 } ], - "tf_files/aws/cognito/README.md": [ + "kube/services/ws-storage/ws-storage-deploy.yaml": [ { - "hashed_secret": "f6920f370a30262b7dd70e97293c73ec89739b70", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/ws-storage/ws-storage-deploy.yaml", + "hashed_secret": "ec2d9395e11f353370a4abac21a1565641b35ce9", "is_verified": false, - "line_number": 106, - "type": "Secret Keyword" + "line_number": 66 + } + ], + "kube/services/wts/wts-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/wts/wts-deploy.yaml", + "hashed_secret": "5de687ae886f19c3cb68d4980e3f2e77cca3db9e", + "is_verified": false, + "line_number": 65 + } + ], + "packer/buildAll.sh": [ + { + "type": "Secret Keyword", + "filename": "packer/buildAll.sh", + "hashed_secret": "6e1d66a1596528c308e601c10aa0b92d53606ab9", + "is_verified": false, + "line_number": 15 + } + ], + "packer/variables.example.json": [ + { + "type": "Secret Keyword", + "filename": "packer/variables.example.json", + "hashed_secret": "a3a0648a036bebf78ba1a1eb498a66081059da10", + "is_verified": false, + "line_number": 5 } ], "tf_files/aws/commons/README.md": [ { - "hashed_secret": "d02e53411e8cb4cd709778f173f7bc9a3455f8ed", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/aws/commons/README.md", + "hashed_secret": "5f02a3fb14ab1ce5c18c362b04b8ffc603ea5951", "is_verified": false, - "line_number": 60, - "type": "Secret Keyword" + "line_number": 60 }, { - "hashed_secret": "9dc0da3613af850c5a018b0a88a5626fb8888e4e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/aws/commons/README.md", + "hashed_secret": "49cfceed8aa8df159e53aa5c5951cad48a3f1216", "is_verified": false, - "line_number": 78, - "type": "Secret Keyword" + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "tf_files/aws/commons/README.md", + "hashed_secret": "18ad13589ca5fb3c432d7d9f0fe49f8ed6e2c478", + "is_verified": false, + "line_number": 70 } ], "tf_files/aws/eks/sample.tfvars": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/eks/sample.tfvars", "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, - "line_number": 107, - "type": "Hex High Entropy String" + "line_number": 107 } ], "tf_files/aws/eks/variables.tf": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/eks/variables.tf", "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, - "line_number": 133, - "type": "Hex High Entropy String" + "line_number": 133 } ], "tf_files/aws/modules/common-logging/README.md": [ { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/README.md", "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", - "is_secret": false, - "is_verified": false, - "line_number": 57, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_secret": false, "is_verified": false, - "line_number": 59, - "type": "Hex High Entropy String" + "line_number": 57 } ], "tf_files/aws/modules/common-logging/lambda_function.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, - "line_number": 18, - "type": "Base64 High Entropy String" + "line_number": 18 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", + "is_verified": false, + "line_number": 18 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", - "is_secret": false, "is_verified": false, - "line_number": 30, - "type": "Hex High Entropy String" + "line_number": 30 } ], "tf_files/aws/modules/common-logging/testLambda.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, - "line_number": 5, - "type": "Base64 High Entropy String" + "line_number": 5 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", + "is_verified": false, + "line_number": 5 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 + }, + { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", + "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", + "is_verified": false, + "line_number": 10 } ], "tf_files/aws/modules/eks/variables.tf": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/eks/variables.tf", "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, - "line_number": 113, - "type": "Hex High Entropy String" + "line_number": 113 } ], "tf_files/aws/modules/management-logs/README.md": [ { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/README.md", "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", - "is_secret": false, "is_verified": false, - "line_number": 54, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_secret": false, - "is_verified": false, - "line_number": 56, - "type": "Hex High Entropy String" + "line_number": 54 } ], "tf_files/aws/modules/management-logs/lambda_function.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, - "line_number": 18, - "type": "Base64 High Entropy String" + "line_number": 18 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", + "is_verified": false, + "line_number": 18 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", - "is_secret": false, "is_verified": false, - "line_number": 30, - "type": "Hex High Entropy String" + "line_number": 30 } ], "tf_files/aws/modules/management-logs/testLambda.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, - "line_number": 5, - "type": "Base64 High Entropy String" + "line_number": 5 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 }, { - "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", - "is_secret": false, + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 6, - "type": "Base64 High Entropy String" + "line_number": 5 }, { - "hashed_secret": "51118900cd675df1b44f254057398f3e52902a5d", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", "is_verified": false, - "line_number": 6, - "type": "Hex High Entropy String" + "line_number": 6 }, { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", "hashed_secret": "60a6dfc8d43cd2f5c6292899fc2f94f2d4fc32c4", - "is_secret": false, "is_verified": false, - "line_number": 6, - "type": "Hex High Entropy String" + "line_number": 6 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "d484ccb4ced21e0149078377f14b913bf5c613d0", + "is_verified": false, + "line_number": 6 } ], "tf_files/aws/slurm/README.md": [ { - "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/aws/slurm/README.md", + "hashed_secret": "c16686250cd583de64e02a47a8b194cd5578b2a1", "is_verified": false, - "line_number": 83, - "type": "Secret Keyword" + "line_number": 83 } ], "tf_files/azure/cloud.tf": [ { - "hashed_secret": "7c1a4b52b64e4106041971c345a1f3eab58fb2a4", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/azure/cloud.tf", + "hashed_secret": "38d930120a56321ceaa147b2bc1f19db53a0b993", "is_verified": false, - "line_number": 424, - "type": "Secret Keyword" + "line_number": 361 } ], "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars": [ { + "type": "Secret Keyword", + "filename": "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars", "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, - "line_number": 231, - "type": "Secret Keyword" + "line_number": 231 } ], "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars": [ { + "type": "Secret Keyword", + "filename": "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars", "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, - "line_number": 231, - "type": "Secret Keyword" + "line_number": 231 } ], "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP": [ { + "type": "Secret Keyword", + "filename": "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP", "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, - "line_number": 262, - "type": "Secret Keyword" + "line_number": 262 } ], - "tf_files/gcp/commons/sample.tfvars": [ + "tf_files/gcp/commons/root.tf": [ { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/gcp/commons/root.tf", + "hashed_secret": "013b6be0bd7ef38a9ee3472cec65c208a19421e6", "is_verified": false, - "line_number": 11, - "type": "Secret Keyword" - }, + "line_number": 65 + } + ], + "tf_files/gcp/commons/sample.tfvars": [ { - "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/gcp/commons/sample.tfvars", + "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" + "line_number": 26 }, { - "hashed_secret": "253c7b5e7c83a86346fc4501495b130813f08105", - "is_secret": false, - "is_verified": false, - "line_number": 37, - "type": "Secret Keyword" - } - ], - "tf_files/shared/modules/k8s_configs/creds.tpl": [ - { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/gcp/commons/sample.tfvars", + "hashed_secret": "791191ef9eafc75f5dd28e37df837b4991556876", "is_verified": false, - "line_number": 8, - "type": "Secret Keyword" + "line_number": 31 } ] }, - "version": "0.13.1", - "word_list": { - "file": null, - "hash": null - } + "generated_at": "2024-03-07T21:26:14Z" } diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index f0da68f696..6eeb8f4fd6 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/inbound-agent:jdk11 +FROM jenkins/inbound-agent:jdk21 USER root @@ -116,6 +116,9 @@ RUN curl -sS -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-ke && apt-get -y update \ && apt-get -y install google-chrome-stable +# data-simulator needs "/usr/share/dict/words" to generate data that isn't random strings +RUN apt-get install --reinstall wamerican + # update /etc/sudoers RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ && /bin/echo -e "\n%sudo ALL=(ALL:ALL) NOPASSWD:ALL\n" >> /etc/sudoers.bak \ diff --git a/Docker/jenkins/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile index c824690def..fec6b3203c 100644 --- a/Docker/jenkins/Jenkins-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/inbound-agent:jdk11 +FROM jenkins/inbound-agent:jdk21 USER root diff --git a/Docker/jenkins/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile index ae39ac5740..04ebe5864a 100644 --- a/Docker/jenkins/Jenkins/Dockerfile +++ b/Docker/jenkins/Jenkins/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.415-jdk11 +FROM jenkins/jenkins:2.426.3-lts-jdk21 USER root diff --git a/Docker/jenkins/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile index 9976a07c20..e6b73bc76d 100644 --- a/Docker/jenkins/Jenkins2/Dockerfile +++ b/Docker/jenkins/Jenkins2/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.415-jdk11 +FROM jenkins/jenkins:2.426.3-lts-jdk21 USER root diff --git a/Jenkinsfile b/Jenkinsfile index 4e3470ded6..908c2d01a5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -99,8 +99,8 @@ spec: resources: requests: cpu: 0.2 - memory: 200Mi - ephemeral-storage: 200Mi + memory: 400Mi + ephemeral-storage: 1Gi env: - name: AWS_DEFAULT_REGION value: us-east-1 @@ -134,8 +134,8 @@ spec: readOnly: true mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" subPath: "ca.pem" - - name: dockersock - mountPath: "/var/run/docker.sock" + - name: containerdsock + mountPath: "/var/run/containerd/containerd.sock" serviceAccount: jenkins-service serviceAccountName: jenkins-service volumes: @@ -145,9 +145,9 @@ spec: - name: ca-volume secret: secretName: "service-ca" - - name: dockersock + - name: containerdsock hostPath: - path: /var/run/docker.sock + path: /var/run/containerd/containerd.sock ''' defaultContainer 'shell' } @@ -293,8 +293,8 @@ spec: script { try { if(!skipUnitTests) { - sh '/usr/bin/pip3 install boto3 --upgrade --user' - sh '/usr/bin/pip3 install kubernetes --upgrade --user' + sh '/usr/local/bin/pip3 install boto3 --upgrade --user' + sh '/usr/local/bin/pip3 install kubernetes --upgrade --user' sh 'python3 -m pytest cloud-automation/apis_configs/' sh 'python3 -m pytest cloud-automation/gen3/lib/dcf/' sh 'cd cloud-automation/tf_files/aws/modules/common-logging && python3 -m pytest testLambda.py' diff --git a/doc/s3-to-google-replication.md b/doc/s3-to-google-replication.md new file mode 100644 index 0000000000..82d0374c7c --- /dev/null +++ b/doc/s3-to-google-replication.md @@ -0,0 +1,68 @@ +# S3 to Google Cloud Storage Replication Pipeline + +This document will guide you through setting up a replication pipeline from AWS S3 to Google Cloud Storage (GCS) using VPC Service Controls and Storage Transfer Service. This solution is compliant with security best practices, ensuring that data transfer between AWS S3 and GCS is secure and efficient. + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Step-by-step Guide](#step-by-step-guide) + - [Setup VPC Service Controls](#setup-vpc-service-controls) + - [Initiate Storage Transfer Service](#initiate-storage-transfer-service) +- [Compliance Benefits](#compliance-benefits) +- [Cost Benefit Analysis](#cost-benefit-analysis) + +## Prerequisites + +1. **AWS account** with access to the S3 bucket. +2. **Google Cloud account** with permissions to create buckets in GCS and set up VPC Service Controls and Storage Transfer Service. +3. Familiarity with AWS IAM for S3 bucket access and Google Cloud IAM for GCS access. + +## Step-by-step Guide + +### Setup VPC Service Controls + +1. **Access the VPC Service Controls** in the Google Cloud Console. +2. **Create a new VPC Service Control perimeter**. + - Name the perimeter and choose the desired region. + - Add the necessary GCP services. Ensure to include `storagetransfer.googleapis.com` for Storage Transfer Service. +3. **Setup VPC Service Control Policy** to allow connections from AWS. + - Use the [documentation](https://cloud.google.com/vpc-service-controls/docs/set-up) to help set up. + +### Initiate Storage Transfer Service + +1. Navigate to **Storage Transfer Service** in the Google Cloud Console. +2. Click **Create Transfer Job**. +3. **Select Source**: Choose Amazon S3 bucket and provide the necessary details. + - Ensure to have necessary permissions for the S3 bucket in AWS IAM. +4. **Select Destination**: Choose your GCS bucket. +5. **Schedule & Advanced Settings**: Set the frequency and conditions for the transfer. Consider setting up notifications for job completion or errors. +6. **Review & Create**: Confirm the details and initiate the transfer job. + +## Compliance Benefits + +Setting up a secure replication pipeline from AWS S3 to GCS using VPC Service Controls and Storage Transfer Service offers the following compliance benefits: + +1. **Data Security**: The VPC Service Controls provide an additional layer of security by ensuring that the transferred data remains within a defined security perimeter, reducing potential data leak risks. +2. **Auditability**: Both AWS and GCS offer logging and monitoring tools that can provide audit trails for data transfer. This can help in meeting regulatory compliance requirements. +3. **Consistent Data Replication**: The Storage Transfer Service ensures that data in GCS is up to date with the source S3 bucket, which is essential for consistent backup and disaster recovery strategies. + +## Cost Benefit Analysis + +**Benefits**: + +1. **Data Redundancy**: Having data stored in multiple cloud providers can be a part of a robust disaster recovery strategy. +2. **Flexibility**: Replicating data to GCS provides flexibility in multi-cloud strategies, enabling seamless migrations or usage of GCP tools and services. +3. **Security**: Utilizing VPC Service Controls strengthens the security posture. + +**Costs**: + +1. **Data Transfer Costs**: Both AWS and Google Cloud might charge for data transfer. It's crucial to analyze the cost, especially for large data transfers. +2. **Storage Costs**: Storing data redundantly incurs additional storage costs in GCS. + +**Analysis**: + +To stay in compliance, we require multiple copies of our data in separate datacenters or clouds. After our security audit, we found the important of not keeping data in a single cloud. It may be expensive to transfer data from AWS to GCP and to store it in 2 clouds simultaniously, but if we need to, then this solution could be an easy way to achieve compliance. + +--- + +Please note that while this guide is based on the provided Google Cloud documentation, it's crucial to refer to the original [documentation](https://cloud.google.com/architecture/transferring-data-from-amazon-s3-to-cloud-storage-using-vpc-service-controls-and-storage-transfer-service) for the most accurate and up-to-date information. diff --git a/files/dashboard/maintenance-page/index.html b/files/dashboard/maintenance-page/index.html index a3e34479b7..fac49e64e1 100644 --- a/files/dashboard/maintenance-page/index.html +++ b/files/dashboard/maintenance-page/index.html @@ -16,7 +16,7 @@ @@ -27,12 +27,12 @@

This site is under maintenance...

Please check back later.

- + A shiba dog looking into the distance diff --git a/files/scripts/ecr-access-job-requirements.txt b/files/scripts/ecr-access-job-requirements.txt new file mode 100644 index 0000000000..bb6d4b847d --- /dev/null +++ b/files/scripts/ecr-access-job-requirements.txt @@ -0,0 +1 @@ +boto3<2 diff --git a/files/scripts/ecr-access-job.md b/files/scripts/ecr-access-job.md new file mode 100644 index 0000000000..9659b186b9 --- /dev/null +++ b/files/scripts/ecr-access-job.md @@ -0,0 +1,85 @@ +# ecr-access-job + +### How to run + +Configure `global.ecr-access-job-role-arn` to the ARN of the `EcrRepoPolicyUpdateRole` role (described below) in the `manifest.json` file. + +Run `gen3 kube-setup-ecr-access-cronjob` to set up the ECR access cronjob. + +### What does it do? + +The job runs the `ecr-access-job.py` script. + +This script updates the configuration of ECR repositories so that users can access the repositories that were created for them. + +It queries a DynamoDB table which has the following (simplified) structure: +| user_id | workspace_type | account_id | +| ------------------ | -------------------- | ---------- | +| user1@username.com | Direct Pay | 123456 | +| user2@username.com | Direct Pay | 789012 | +| user1@username.com | Other workspace type | | + +and then allows each AWS account to acccess the appropriate ECR repositories. The users' ECR repositories are based on their username as stored in the table. For example, `user1@username.com`'s ECR repository is assumed to be `nextflow-approved/user1-40username-2ecom`. + +### Access needed + +- "EcrRepoPolicyUpdateRole" role in the account (Acct1) that contains the ECR repositories: + +**Note:** `kube-setup-ecr-access-cronjob.sh` assumes this role already exists. + +Permissions: +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "UpdateEcrRepoPolicy", + "Effect": "Allow", + "Action": "ecr:SetRepositoryPolicy", + "Resource": "arn:aws:ecr:us-east-1::repository/nextflow-approved/*" + } + ] +} +``` + +Trust policy (allows Acct2): +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowAssumingRole", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::root" + }, + "Action": "sts:AssumeRole" + } + ] +} +``` + +- Policy in the account (Acct2) that contains the DynamoDB table (created automatically by `kube-setup-ecr-access-job.sh`): +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ReadDynamoDB", + "Effect": "Allow", + "Action": [ + "dynamodb:Scan" + ], + "Resource": "arn:aws:dynamodb:::table/" + }, + { + "Sid": "AssumeEcrRole", + "Effect": "Allow", + "Action": [ + "sts:AssumeRole" + ], + "Resource": "arn:aws:iam:::role/" + } + ] +} +``` diff --git a/files/scripts/ecr-access-job.py b/files/scripts/ecr-access-job.py new file mode 100644 index 0000000000..828d94c96b --- /dev/null +++ b/files/scripts/ecr-access-job.py @@ -0,0 +1,177 @@ +""" +See documentation at https://github.com/uc-cdis/cloud-automation/blob/master/files/scripts/ecr-access-job.md +""" + +from decimal import Decimal +import json +import os +from typing import List +import uuid + +import boto3 +from boto3.dynamodb.conditions import Attr + + +REGION = "us-east-1" + +# for local testing. in production, use a service account instead of a key. +MAIN_ACCOUNT_CREDS = {"key_id": os.environ.get("KEY_ID"), "key_secret": os.environ.get("KEY_SECRET")} + + +def escapism(string: str) -> str: + """ + This is a direct translation of Hatchery's `escapism` golang function to python. + We need to escape the username in the same way it's escaped by Hatchery's `escapism` function because + special chars cannot be used in an ECR repo name, and so that the ECR repo generated here matches the + name expected by Hatchery. + """ + safeBytes = "abcdefghijklmnopqrstuvwxyz0123456789" + escaped = "" + for v in string: + if v not in safeBytes: + hexCode = "{0:02x}".format(ord(v)) + escaped += "-" + hexCode + else: + escaped += v + return escaped + + +def get_configs() -> (str, str): + table_name = os.environ.get("PAY_MODELS_DYNAMODB_TABLE") + if not table_name: + raise Exception("Missing 'PAY_MODELS_DYNAMODB_TABLE' environment variable") + + ecr_role_arn = os.environ.get("ECR_ACCESS_JOB_ARN") + if not ecr_role_arn: + raise Exception("Missing 'ECR_ACCESS_JOB_ARN' environment variable") + + return table_name, ecr_role_arn + + +def query_usernames_and_account_ids(table_name: str) -> List[dict]: + """ + Returns: + List[dict]: [ { "user_id": "user1@username.com", "account_id": "123456" } ] + """ + if MAIN_ACCOUNT_CREDS["key_id"]: + session = boto3.Session( + aws_access_key_id=MAIN_ACCOUNT_CREDS["key_id"], + aws_secret_access_key=MAIN_ACCOUNT_CREDS["key_secret"], + ) + else: + session = boto3.Session() + dynamodb = session.resource("dynamodb", region_name=REGION) + table = dynamodb.Table(table_name) + + # get usernames and AWS account IDs from DynamoDB + queried_keys = ["user_id", "account_id"] + filter_expr = Attr("workspace_type").eq("Direct Pay") + proj = ", ".join("#" + key for key in queried_keys) + expr = {"#" + key: key for key in queried_keys} + response = table.scan( + FilterExpression=filter_expr, + ProjectionExpression=proj, + ExpressionAttributeNames=expr, + ) + assert response.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200, response + items = response["Items"] + # if the response is paginated, get the rest of the items + while response["Count"] > 0: + if "LastEvaluatedKey" not in response: + break + response = table.scan( + FilterExpression=filter_expr, + ProjectionExpression=proj, + ExpressionAttributeNames=expr, + ExclusiveStartKey=response["LastEvaluatedKey"], + ) + assert ( + response.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200 + ), response + items.extend(response["Items"]) + + return items + + +def update_access_in_ecr(repo_to_account_ids: List[dict], ecr_role_arn: str) -> None: + # get access to ECR in the account that contains the ECR repos + if MAIN_ACCOUNT_CREDS["key_id"]: + sts = boto3.client( + "sts", + aws_access_key_id=MAIN_ACCOUNT_CREDS["key_id"], + aws_secret_access_key=MAIN_ACCOUNT_CREDS["key_secret"], + ) + else: + sts = boto3.client("sts") + assumed_role = sts.assume_role( + RoleArn=ecr_role_arn, + DurationSeconds=900, # minimum time for aws assume role as per boto docs + RoleSessionName=f"ecr-access-assume-role-{str(uuid.uuid4())[:8]}", + ) + assert "Credentials" in assumed_role, "Unable to assume role" + ecr = boto3.client( + "ecr", + aws_access_key_id=assumed_role["Credentials"]["AccessKeyId"], + aws_secret_access_key=assumed_role["Credentials"]["SecretAccessKey"], + aws_session_token=assumed_role["Credentials"]["SessionToken"], + ) + + # for each ECR repo, whitelist the account IDs so users can access the repo + for repo, account_ids in repo_to_account_ids.items(): + print(f"Allowing AWS accounts {account_ids} to use ECR repository '{repo}'") + policy = { + "Version": "2008-10-17", + "Statement": [ + { + "Sid": "AllowCrossAccountPull", + "Effect": "Allow", + "Principal": { + "AWS": [ + f"arn:aws:iam::{account_id}:root" + for account_id in account_ids + ] + }, + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + ], + } + ], + } + # Note that this is overwriting the repo policy, not appending to it. This means we can't have 2 dynamodb + # tables pointing at the same set of ECR repos: the repos would only allow the accounts in the table for + # which the script was run most recently. eg QA and Staging can't use the same ECR repos. + # Appending is not possible since this code will eventually rely on Arborist for authorization information + # and we'll need to overwrite in order to remove expired access. + try: + ecr.set_repository_policy( + repositoryName=repo, + policyText=json.dumps(policy), + ) + except Exception as e: + print(f" Unable to update '{repo}'; skipping it: {e}") + + +def main() -> None: + table_name, ecr_role_arn = get_configs() + items = query_usernames_and_account_ids(table_name) + + # construct mapping: { ECR repo url: [ AWS account IDs with access ] } + ecr_repo_prefix = "nextflow-approved" + repo_to_account_ids = { + f"{ecr_repo_prefix}/{escapism(e['user_id'])}": [e["account_id"]] + for e in items + if "account_id" in e + } + print( + "Mapping of ECR repository to allowed AWS accounts:\n", + json.dumps(repo_to_account_ids, indent=2), + ) + + update_access_in_ecr(repo_to_account_ids, ecr_role_arn) + + +if __name__ == "__main__": + main() diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index e95ab8604f..7b4c638ab4 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -1,4 +1,5 @@ import argparse +import copy import json import sys import requests @@ -12,7 +13,7 @@ "study_metadata.study_type.study_subject_type": "Subject Type", "study_metadata.human_subject_applicability.gender_applicability": "Gender", "study_metadata.human_subject_applicability.age_applicability": "Age", - "research_program": "Research Program" + "research_program": "Research Program", } # Defines how to handle special cases for values in filters @@ -24,12 +25,15 @@ "Questionnaire/Survey/Assessment - unvalidated instrument": "Questionnaire/Survey/Assessment", "Cis Male": "Male", "Cis Female": "Female", - "Trans Male": "Female-to-male transsexual", - "Trans Female": "Male-to-female transsexual", - "Agender, Non-binary, gender non-conforming": "Other", - "Gender Queer": "Other", - "Intersex": "Intersexed", - "Buisness Development": "Business Development" + "Trans Male": "Transgender man/trans man/female-to-male (FTM)", + "Female-to-male transsexual": "Transgender man/trans man/female-to-male (FTM)", + "Trans Female": "Transgender woman/trans woman/male-to-female (MTF)", + "Male-to-female transsexual": "Transgender woman/trans woman/male-to-female (MTF)", + "Agender, Non-binary, gender non-conforming": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Gender Queer": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Intersex": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Intersexed": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Buisness Development": "Business Development", } # Defines field that we don't want to include in the filters @@ -37,27 +41,38 @@ "study_metadata.human_subject_applicability.gender_applicability": "Not applicable" } +# repository links +REPOSITORY_STUDY_ID_LINK_TEMPLATE = { + "NIDDK Central": "https://repository.niddk.nih.gov/studies//", + "NIDA Data Share": "https://datashare.nida.nih.gov/study/", + "NICHD DASH": "https://dash.nichd.nih.gov/study/", + "ICPSR": "https://www.icpsr.umich.edu/web/ICPSR/studies/", + "BioSystics-AP": "https://biosystics-ap.com/assays/assaystudy//", +} + + def is_valid_uuid(uuid_to_test, version=4): """ Check if uuid_to_test is a valid UUID. - + Parameters ---------- uuid_to_test : str version : {1, 2, 3, 4} - + Returns ------- `True` if uuid_to_test is a valid UUID, otherwise `False`. - + """ - + try: uuid_obj = UUID(uuid_to_test, version=version) except ValueError: return False return str(uuid_obj) == uuid_to_test + def update_filter_metadata(metadata_to_update): filter_metadata = [] for metadata_field_key, filter_field_key in FILTER_FIELD_MAPPINGS.items(): @@ -69,22 +84,25 @@ def update_filter_metadata(metadata_to_update): print(filter_field_values) raise TypeError("Neither a string nor a list") for filter_field_value in filter_field_values: - if (metadata_field_key, filter_field_value) in OMITTED_VALUES_MAPPING.items(): + if ( + metadata_field_key, + filter_field_value, + ) in OMITTED_VALUES_MAPPING.items(): continue if filter_field_value in SPECIAL_VALUE_MAPPINGS: filter_field_value = SPECIAL_VALUE_MAPPINGS[filter_field_value] - filter_metadata.append({"key": filter_field_key, "value": filter_field_value}) + filter_metadata.append( + {"key": filter_field_key, "value": filter_field_value} + ) filter_metadata = pydash.uniq(filter_metadata) metadata_to_update["advSearchFilters"] = filter_metadata # Retain these from existing tags save_tags = ["Data Repository"] - tags = [ - tag - for tag in metadata_to_update["tags"] - if tag["category"] in save_tags - ] + tags = [tag for tag in metadata_to_update["tags"] if tag["category"] in save_tags] # Add any new tags from advSearchFilters for f in metadata_to_update["advSearchFilters"]: + if f["key"] == "Gender": + continue tag = {"name": f["value"], "category": f["key"]} if tag not in tags: tags.append(tag) @@ -95,20 +113,57 @@ def update_filter_metadata(metadata_to_update): def get_client_token(client_id: str, client_secret: str): try: token_url = f"http://revproxy-service/user/oauth2/token" - headers = {'Content-Type': 'application/x-www-form-urlencoded'} - params = {'grant_type': 'client_credentials'} - data = 'scope=openid user data' + headers = {"Content-Type": "application/x-www-form-urlencoded"} + params = {"grant_type": "client_credentials"} + data = "scope=openid user data" token_result = requests.post( - token_url, params=params, headers=headers, data=data, + token_url, + params=params, + headers=headers, + data=data, auth=(client_id, client_secret), ) - token = token_result.json()["access_token"] + token = token_result.json()["access_token"] except: raise Exception("Could not get token") return token +def get_related_studies(serial_num, guid, hostname): + related_study_result = [] + + if serial_num: + mds = requests.get( + f"http://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000" + ) + if mds.status_code == 200: + related_study_metadata = mds.json() + + for ( + related_study_metadata_key, + related_study_metadata_value, + ) in related_study_metadata.items(): + if related_study_metadata_key == guid or ( + related_study_metadata_value["_guid_type"] != "discovery_metadata" + and related_study_metadata_value["_guid_type"] + != "unregistered_discovery_metadata" + ): + # do nothing for self, or for archived studies + continue + title = ( + related_study_metadata_value.get("gen3_discovery", {}) + .get("study_metadata", {}) + .get("minimal_info", {}) + .get("study_name", "") + ) + link = ( + f"https://{hostname}/portal/discovery/{related_study_metadata_key}/" + ) + related_study_result.append({"title": title, "link": link}) + return related_study_result + + parser = argparse.ArgumentParser() parser.add_argument("--directory", help="CEDAR Directory ID for registering ") @@ -139,7 +194,7 @@ def get_client_token(client_id: str, client_secret: str): print("Getting CEDAR client access token") access_token = get_client_token(client_id, client_secret) -token_header = {"Authorization": 'bearer ' + access_token} +token_header = {"Authorization": "bearer " + access_token} limit = 10 offset = 0 @@ -151,36 +206,45 @@ def get_client_token(client_id: str, client_secret: str): print("Directory ID is not in UUID format!") sys.exit(1) -while((limit + offset <= total)): +while limit + offset <= total: # Get the metadata from cedar to register print("Querying CEDAR...") - cedar = requests.get(f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}", headers=token_header) + cedar = requests.get( + f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}", + headers=token_header, + ) # If we get metadata back now register with MDS if cedar.status_code == 200: metadata_return = cedar.json() if "metadata" not in metadata_return: - print("Got 200 from CEDAR wrapper but no metadata in body, something is not right!") + print( + "Got 200 from CEDAR wrapper but no metadata in body, something is not right!" + ) sys.exit(1) total = metadata_return["metadata"]["totalCount"] returned_records = len(metadata_return["metadata"]["records"]) print(f"Successfully got {returned_records} record(s) from CEDAR directory") for cedar_record in metadata_return["metadata"]["records"]: - # get the appl id from cedar for querying in our MDS - cedar_appl_id = pydash.get(cedar_record, "metadata_location.nih_application_id") - if cedar_appl_id is None: - print("This record doesn't have appl_id, skipping...") + # get the CEDAR instance id from cedar for querying in our MDS + cedar_instance_id = pydash.get( + cedar_record, "metadata_location.cedar_study_level_metadata_template_instance_ID" + ) + if cedar_instance_id is None: + print("This record doesn't have CEDAR instance id, skipping...") continue - # Get the metadata record for the nih_application_id - mds = requests.get(f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.nih_application_id={cedar_appl_id}&data=true") + # Get the metadata record for the CEDAR instance id + mds = requests.get( + f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.cedar_study_level_metadata_template_instance_ID={cedar_instance_id}&data=true" + ) if mds.status_code == 200: mds_res = mds.json() # the query result key is the record of the metadata. If it doesn't return anything then our query failed. if len(list(mds_res.keys())) == 0 or len(list(mds_res.keys())) > 1: - print("Query returned nothing for", cedar_appl_id, "appl id") + print(f"Query returned nothing for template_instance_ID={cedar_instance_id}&data=true") continue # get the key for our mds record @@ -193,9 +257,13 @@ def get_client_token(client_id: str, client_secret: str): if mds_res["_guid_type"] == "discovery_metadata": print("Metadata is already registered. Updating MDS record") elif mds_res["_guid_type"] == "unregistered_discovery_metadata": - print("Metadata has not been registered. Registering it in MDS record") + print( + "Metadata has not been registered. Registering it in MDS record" + ) else: - print(f"This metadata data record has a special GUID type \"{mds_res['_guid_type']}\" and will be skipped") + print( + f"This metadata data record has a special GUID type \"{mds_res['_guid_type']}\" and will be skipped" + ) continue if "clinicaltrials_gov" in cedar_record: @@ -203,11 +271,79 @@ def get_client_token(client_id: str, client_secret: str): del cedar_record["clinicaltrials_gov"] # some special handing for this field, because its parent will be deleted before we merging the CEDAR and MDS SLMD to avoid duplicated values - cedar_record_other_study_websites = cedar_record.get("metadata_location", {}).get("other_study_websites", []) + cedar_record_other_study_websites = cedar_record.get( + "metadata_location", {} + ).get("other_study_websites", []) del cedar_record["metadata_location"] mds_res["gen3_discovery"]["study_metadata"].update(cedar_record) - mds_res["gen3_discovery"]["study_metadata"]["metadata_location"]["other_study_websites"] = cedar_record_other_study_websites + mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][ + "other_study_websites" + ] = cedar_record_other_study_websites + + # setup citations + doi_citation = mds_res["gen3_discovery"]["study_metadata"].get( + "doi_citation", "" + ) + mds_res["gen3_discovery"]["study_metadata"]["citation"][ + "heal_platform_citation" + ] = doi_citation + + # setup repository_study_link + data_repositories = ( + mds_res.get("gen3_discovery", {}) + .get("study_metadata", {}) + .get("metadata_location", {}) + .get("data_repositories", []) + ) + repository_citation = "Users must also include a citation to the data as specified by the local repository." + repository_citation_additional_text = ' The link to the study page at the local repository can be found in the "Data" tab.' + for repository in data_repositories: + if ( + repository["repository_name"] + and repository["repository_name"] + in REPOSITORY_STUDY_ID_LINK_TEMPLATE + and repository["repository_study_ID"] + ): + repository_study_link = REPOSITORY_STUDY_ID_LINK_TEMPLATE[ + repository["repository_name"] + ].replace("", repository["repository_study_ID"]) + repository.update( + {"repository_study_link": repository_study_link} + ) + if ( + repository_citation_additional_text + not in repository_citation + ): + repository_citation += repository_citation_additional_text + if len(data_repositories): + data_repositories[0] = { + **data_repositories[0], + "repository_citation": repository_citation, + } + + mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][ + "data_repositories" + ] = copy.deepcopy(data_repositories) + + # set up related studies + serial_num = None + try: + serial_num = ( + mds_res.get("nih_reporter", {}) + .get("project_num_split", {}) + .get("serial_num", None) + ) + except Exception: + print("Unable to get serial number for study") + + if serial_num is None: + print("Unable to get serial number for study") + + related_study_result = get_related_studies( + serial_num, mds_record_guid, hostname + ) + mds_res["gen3_discovery"]["related_studies"] = copy.deepcopy(related_study_result) # merge data from cedar that is not study level metadata into a level higher deleted_keys = [] @@ -218,29 +354,39 @@ def get_client_token(client_id: str, client_secret: str): for key in deleted_keys: del mds_res["gen3_discovery"]["study_metadata"][key] - mds_discovery_data_body = update_filter_metadata(mds_res["gen3_discovery"]) + mds_discovery_data_body = update_filter_metadata( + mds_res["gen3_discovery"] + ) mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body if mds_clinical_trials: - mds_cedar_register_data_body["clinicaltrials_gov"] = {**mds_cedar_register_data_body.get("clinicaltrials_gov", {}), **mds_clinical_trials} + mds_cedar_register_data_body["clinicaltrials_gov"] = { + **mds_cedar_register_data_body.get("clinicaltrials_gov", {}), + **mds_clinical_trials, + } mds_cedar_register_data_body["_guid_type"] = "discovery_metadata" print(f"Metadata {mds_record_guid} is now being registered.") - mds_put = requests.put(f"http://revproxy-service/mds/metadata/{mds_record_guid}", + mds_put = requests.put( + f"http://revproxy-service/mds/metadata/{mds_record_guid}", headers=token_header, - json = mds_cedar_register_data_body + json=mds_cedar_register_data_body, ) if mds_put.status_code == 200: print(f"Successfully registered: {mds_record_guid}") else: - print(f"Failed to register: {mds_record_guid}. Might not be MDS admin") + print( + f"Failed to register: {mds_record_guid}. Might not be MDS admin" + ) print(f"Status from MDS: {mds_put.status_code}") else: print(f"Failed to get information from MDS: {mds.status_code}") - + else: - print(f"Failed to get information from CEDAR wrapper service: {cedar.status_code}") + print( + f"Failed to get information from CEDAR wrapper service: {cedar.status_code}" + ) if offset + limit == total: break @@ -248,3 +394,6 @@ def get_client_token(client_id: str, client_secret: str): offset = offset + limit if (offset + limit) > total: limit = total - offset + + if limit < 0: + break diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 349d1e022f..6896314abb 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -7,6 +7,7 @@ achecker.ca apache.github.io api.epigraphdb.org api.monqcle.com +awslabs.github.io biodata-integration-tests.net marketing.biorender.com clinicaltrials.gov @@ -15,7 +16,6 @@ ctds-planx.atlassian.net data.cityofchicago.org dataguids.org api.login.yahoo.com -api.snapcraft.io apt.kubernetes.io argoproj.github.io archive.cloudera.com @@ -34,6 +34,7 @@ cernvm.cern.ch charts.bitnami.com charts.helm.sh cloud.r-project.org +coredns.github.io coreos.com covidstoplight.org cpan.mirrors.tds.net @@ -76,6 +77,8 @@ go.googlesource.com golang.org gopkg.in grafana.com +grafana.github.io +helm.elastic.co http.us.debian.org ifconfig.io ingress.coralogix.us @@ -144,6 +147,7 @@ repos.sensuapp.org repo.vmware.com repository.cloudera.com resource.metadatacenter.org +rmq.n3c.ncats.io rules.emergingthreats.net rweb.quant.ku.edu sa-update.dnswl.org @@ -162,3 +166,5 @@ www.rabbitmq.com www.uniprot.org vpodc.org yahoo.com +idp.stage.qdr.org +stage.qdr.org \ No newline at end of file diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 44f4680971..1717b44432 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -11,6 +11,7 @@ .bioconductor.org .bionimbus.org .bitbucket.org +.blob.core.windows.net .bloodpac.org .braincommons.org .bsc.es @@ -39,6 +40,7 @@ .dockerproject.org .dph.illinois.gov .elasticsearch.org +.eramba.org .erlang-solutions.com .external-secrets.io .extjs.com @@ -97,9 +99,12 @@ .sks-keyservers.net .slack.com .slack-msgs.com +.snapcraft.io +.snapcraftcontent.com .sourceforge.net .southsideweekly.com .theanvil.io +.tigera.io .twistlock.com .ubuntu.com .ucsc.edu diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index 05607f3044..812a9f7387 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -8,6 +8,9 @@ DISTRO=$(awk -F '[="]*' '/^NAME/ { print $2 }' < /etc/os-release) WORK_USER="ubuntu" if [[ $DISTRO == "Amazon Linux" ]]; then WORK_USER="ec2-user" + if [[ $(awk -F '[="]*' '/^VERSION_ID/ { print $2 }' < /etc/os-release) == "2023" ]]; then + DISTRO="al2023" + fi fi HOME_FOLDER="/home/${WORK_USER}" SUB_FOLDER="${HOME_FOLDER}/cloud-automation" @@ -60,6 +63,8 @@ fi function install_basics(){ if [[ $DISTRO == "Ubuntu" ]]; then apt -y install atop + elif [[ $DISTRO == "al2023" ]]; then + sudo dnf install cronie nc -y fi } @@ -69,10 +74,18 @@ function install_docker(){ # Docker ############################################################### # Install docker from sources - curl -fsSL ${DOCKER_DOWNLOAD_URL}/gpg | sudo apt-key add - - add-apt-repository "deb [arch=amd64] ${DOCKER_DOWNLOAD_URL} $(lsb_release -cs) stable" - apt update - apt install -y docker-ce + if [[ $DISTRO == "Ubuntu" ]]; then + curl -fsSL ${DOCKER_DOWNLOAD_URL}/gpg | sudo apt-key add - + add-apt-repository "deb [arch=amd64] ${DOCKER_DOWNLOAD_URL} $(lsb_release -cs) stable" + apt update + apt install -y docker-ce + else + sudo yum update -y + sudo yum install -y docker + # Start and enable Docker service + sudo systemctl start docker + sudo systemctl enable docker + fi mkdir -p /etc/docker cp ${SUB_FOLDER}/flavors/squid_auto/startup_configs/docker-daemon.json /etc/docker/daemon.json chmod -R 0644 /etc/docker @@ -201,8 +214,10 @@ function install_awslogs { if [[ $DISTRO == "Ubuntu" ]]; then wget ${AWSLOGS_DOWNLOAD_URL} -O amazon-cloudwatch-agent.deb dpkg -i -E ./amazon-cloudwatch-agent.deb - else + elif [[ $DISTRO == "Amazon Linux" ]]; then sudo yum install amazon-cloudwatch-agent nc -y + elif [[ $DISTRO == "al2023" ]]; then + sudo dnf install amazon-cloudwatch-agent -y fi # Configure the AWS logs @@ -292,6 +307,19 @@ function main(){ --volume ${SQUID_CACHE_DIR}:${SQUID_CACHE_DIR} \ --volume ${SQUID_CONFIG_DIR}:${SQUID_CONFIG_DIR}:ro \ quay.io/cdis/squid:${SQUID_IMAGE_TAG} + + max_attempts=10 + attempt_counter=0 + while [ $attempt_counter -lt $max_attempts ]; do + #((attempt_counter++)) + sleep 10 + if [[ -z "$(sudo lsof -i:3128)" ]]; then + echo "Squid not healthy, restarting." + docker restart squid + else + echo "Squid healthy" + fi + done } main diff --git a/gen3/bin/awsrole.sh b/gen3/bin/awsrole.sh index 144b7a4fea..dd19ea7a48 100644 --- a/gen3/bin/awsrole.sh +++ b/gen3/bin/awsrole.sh @@ -25,16 +25,16 @@ gen3_awsrole_help() { function gen3_awsrole_ar_policy() { local serviceAccount="$1" shift || return 1 - if [[ ! -z $1 ]]; then - local namespace=$1 + if [[ -z $1 ]] || [[ $1 == -* ]]; then + namespace=$(gen3 db namespace) else - local namespace=$(gen3 db namespace) + namespace=$1 + shift fi local issuer_url local account_id local vpc_name - shift || return 1 - local flag=$1 + local flag=$flag vpc_name="$(gen3 api environment)" || return 1 issuer_url="$(aws eks describe-cluster \ @@ -46,7 +46,7 @@ function gen3_awsrole_ar_policy() { local provider_arn="arn:aws:iam::${account_id}:oidc-provider/${issuer_url}" - if [[ "$flag" == "all_namespaces" ]]; then + if [[ "$flag" == "-all_namespaces" ]]; then # Use a trust policy that allows role to be used by multiple namespaces. cat - < config.tfvars @@ -230,10 +226,14 @@ gen3_awsrole_create() { gen3_log_err "use: gen3 awsrole create roleName saName" return 1 fi - if [[ ! -z $1 ]]; then - local namespace=$1 + if [[ -z $1 ]] || [[ $1 == -* ]]; then + namespace=$(gen3 db namespace) else - local namespace=$(gen3 db namespace) + namespace=$1 + shift + fi + if [[ ! -z $1 ]]; then + flag=$1 fi # do simple validation of name local regexp="^[a-z][a-z0-9\-]*$" @@ -247,13 +247,7 @@ EOF gen3_log_err $errMsg return 1 fi - shift || return 1 - local flag="" - # Check if the "all_namespaces" flag is provided - if [[ "$1" == "-f" || "$1" == "--flag" ]]; then - flag="$2" - shift 2 - fi + # check if the name is already used by another entity local entity_type @@ -271,9 +265,11 @@ EOF fi TF_IN_AUTOMATION="true" - if ! _tfplan_role $rolename $saName $namespace -f $flag; then + + if ! _tfplan_role $rolename $saName $namespace $flag; then return 1 fi + if ! _tfapply_role $rolename; then return 1 fi @@ -422,4 +418,4 @@ gen3_awsrole() { # Let testsuite source file if [[ -z "$GEN3_SOURCE_ONLY" ]]; then gen3_awsrole "$@" -fi +fi \ No newline at end of file diff --git a/gen3/bin/create-es7-cluster.sh b/gen3/bin/create-es7-cluster.sh index d18c4203f8..553dc26524 100644 --- a/gen3/bin/create-es7-cluster.sh +++ b/gen3/bin/create-es7-cluster.sh @@ -40,6 +40,7 @@ else --vpc-options "SubnetIds=${subnet_ids[*]},SecurityGroupIds=${security_groups[*]}" \ --access-policies "$access_policies" \ --encryption-at-rest-options "Enabled=true,KmsKeyId=$kms_key_id"\ + --node-to-node-encryption-options "Enabled=true" > /dev/null 2>&1 # Wait for the new cluster to be available @@ -60,4 +61,4 @@ else if [ $retry_count -eq $max_retries ]; then echo "New cluster creation may still be in progress. Please check the AWS Management Console for the status." fi -fi \ No newline at end of file +fi diff --git a/gen3/bin/dbbackup.sh b/gen3/bin/dbbackup.sh index 29f267221d..eb9611a907 100644 --- a/gen3/bin/dbbackup.sh +++ b/gen3/bin/dbbackup.sh @@ -173,6 +173,10 @@ db_restore() { gen3 job run psql-db-prep-restore } +va_testing_db_dump() { + gen3 job run psql-db-dump-va-testing +} + # main function to determine whether dump or restore main() { @@ -191,8 +195,15 @@ main() { create_s3_bucket db_restore ;; + va-dump) + gen3_log_info "Running a va-testing DB dump..." + create_policy + create_service_account_and_role + create_s3_bucket + va_testing_db_dump + ;; *) - echo "Invalid command. Usage: gen3 dbbackup [dump|restore]" + echo "Invalid command. Usage: gen3 dbbackup [dump|restore|va-dump]" return 1 ;; esac diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index 930202a876..36af791ef6 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -32,6 +32,8 @@ accountList=( 205252583234 885078588865 922467707295 +533267425233 +048463324059 ) principalStr="" diff --git a/gen3/bin/gitops.sh b/gen3/bin/gitops.sh index fda6d4ffa9..bc0358499d 100644 --- a/gen3/bin/gitops.sh +++ b/gen3/bin/gitops.sh @@ -445,8 +445,13 @@ gen3_gitops_sync() { echo "DRYRUN flag detected, not rolling" gen3_log_info "dict_roll: $dict_roll; versions_roll: $versions_roll; portal_roll: $portal_roll; etl_roll: $etl_roll; fence_roll: $fence_roll" else - if [[ ( "$dict_roll" = true ) || ( "$versions_roll" = true ) || ( "$portal_roll" = true )|| ( "$etl_roll" = true ) || ( "$covid_cronjob_roll" = true ) || ("fence_roll" = true) ]]; then + if [[ ( "$dict_roll" = true ) || ( "$versions_roll" = true ) || ( "$portal_roll" = true )|| ( "$etl_roll" = true ) || ( "$covid_cronjob_roll" = true ) || ("$fence_roll" = true) ]]; then echo "changes detected, rolling" + tmpHostname=$(gen3 api hostname) + if [[ $slack = true ]]; then + curl -X POST --data-urlencode "payload={\"text\": \"Gitops-sync Cron: Changes detected on ${tmpHostname} - rolling...\"}" "${slackWebHook}" + fi + # run etl job before roll all so guppy can pick up changes if [[ "$etl_roll" = true ]]; then gen3 update_config etl-mapping "$(gen3 gitops folder)/etlMapping.yaml" @@ -472,7 +477,6 @@ gen3_gitops_sync() { rollRes=$? # send result to slack if [[ $slack = true ]]; then - tmpHostname=$(gen3 api hostname) resStr="SUCCESS" color="#1FFF00" if [[ $rollRes != 0 ]]; then diff --git a/gen3/bin/healthcheck.sh b/gen3/bin/healthcheck.sh index 149cb1aaa9..b658ff033b 100644 --- a/gen3/bin/healthcheck.sh +++ b/gen3/bin/healthcheck.sh @@ -137,6 +137,10 @@ gen3_healthcheck() { internetAccessExplicitProxy=false fi + gen3_log_info "Clearing Evicted pods" + sleep 5 + clear_evicted_pods + local healthJson=$(cat - < /dev/null; then - gen3 kube-setup-indexd & -else - gen3_log_info "no manifest entry for indexd" -fi - if g3k_manifest_lookup .versions.arborist 2> /dev/null; then gen3 kube-setup-arborist || gen3_log_err "arborist setup failed?" else gen3_log_info "no manifest entry for arborist" fi +if g3k_manifest_lookup .versions.indexd 2> /dev/null; then + gen3 kube-setup-indexd & +else + gen3_log_info "no manifest entry for indexd" +fi + if g3k_manifest_lookup '.versions["audit-service"]' 2> /dev/null; then gen3 kube-setup-audit-service else @@ -243,6 +243,12 @@ else gen3_log_info "not deploying dicom-viewer - no manifest entry for '.versions[\"dicom-viewer\"]'" fi +if g3k_manifest_lookup '.versions["gen3-discovery-ai"]' 2> /dev/null; then + gen3 kube-setup-gen3-discovery-ai & +else + gen3_log_info "not deploying gen3-discovery-ai - no manifest entry for '.versions[\"gen3-discovery-ai\"]'" +fi + if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then gen3 kube-setup-ohdsi & else @@ -274,7 +280,7 @@ if [[ "$GEN3_ROLL_FAST" != "true" ]]; then else gen3 kube-setup-autoscaler & fi - gen3 kube-setup-kube-dns-autoscaler & + #gen3 kube-setup-kube-dns-autoscaler & gen3 kube-setup-metrics deploy || true gen3 kube-setup-tiller || true # diff --git a/gen3/bin/kube-setup-argo-wrapper.sh b/gen3/bin/kube-setup-argo-wrapper.sh index 5727a703e0..9f7cc52ce4 100644 --- a/gen3/bin/kube-setup-argo-wrapper.sh +++ b/gen3/bin/kube-setup-argo-wrapper.sh @@ -18,6 +18,26 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then gen3 roll argo-wrapper g3kubectl apply -f "${GEN3_HOME}/kube/services/argo-wrapper/argo-wrapper-service.yaml" + + + if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then + export ARGO_HOST=$(g3k_manifest_lookup .argo.argo_server_service_url) + else + export ARGO_HOST="http://argo-argo-workflows-server.argo.svc.cluster.local:2746" + fi + + if g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json 2> /dev/null; then + export ARGO_NAMESPACE=$(g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) + else + export ARGO_NAMESPACE="argo" + fi + + envsubst <"${GEN3_HOME}/kube/services/argo-wrapper/config.ini" > /tmp/config.ini + + g3kubectl delete configmap argo-wrapper-namespace-config + g3kubectl create configmap argo-wrapper-namespace-config --from-file /tmp/config.ini + + rm /tmp/config.ini gen3_log_info "the argo-wrapper service has been deployed onto the kubernetes cluster" -fi \ No newline at end of file +fi diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index ff2438833c..4c6c55eee0 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -5,10 +5,25 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" gen3_load "gen3/lib/kube-setup-init" +override_namespace=false +force=false + +for arg in "${@}"; do + if [ "$arg" == "--override-namespace" ]; then + override_namespace=true + elif [ "$arg" == "--force" ]; then + force=true + else + #Print usage info and exit + gen3_log_info "Usage: gen3 kube-setup-argo [--override-namespace] [--force]" + exit 1 + fi +done ctx="$(g3kubectl config current-context)" ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" +argo_namespace=$(g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) function setup_argo_buckets { local accountNumber @@ -32,13 +47,13 @@ function setup_argo_buckets { roleName="gen3-argo-${environment//_/-}-role" bucketPolicy="argo-bucket-policy-${nameSpace}" internalBucketPolicy="argo-internal-bucket-policy-${nameSpace}" - if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."s3-bucket"') ]]; then - if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then + if [[ ! -z $(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."downloadable-s3-bucket"') ]]; then + if [[ ! -z $(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" - bucketName=$(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) + bucketName=$(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) else gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" - bucketName=$(g3k_config_lookup '.argo."s3-bucket"') + bucketName=$(g3k_config_lookup '.argo."downloadable-s3-bucket"') fi fi if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."internal-s3-bucket"') ]]; then @@ -131,19 +146,19 @@ EOF g3kubectl create namespace argo || true g3kubectl label namespace argo app=argo || true # Grant admin access within the argo namespace to the default SA in the argo namespace - g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n argo || true + g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n $argo_namespace || true fi gen3_log_info "Creating IAM role ${roleName}" if aws iam get-role --role-name "${roleName}" > /dev/null 2>&1; then gen3_log_info "IAM role ${roleName} already exists.." roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text) gen3_log_info "Role annotate" - g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n argo - g3kubectl annotate serviceaccount argo eks.amazonaws.com/role-arn=${roleArn} -n $nameSpace + g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $argo_namespace + g3kubectl annotate serviceaccount argo eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $nameSpace else - gen3 awsrole create $roleName argo $nameSpace -f all_namespaces + gen3 awsrole create $roleName argo $nameSpace -all_namespaces roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text) - g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n argo + g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n $argo_namespace fi # Grant admin access within the current namespace to the argo SA in the current namespace @@ -177,34 +192,47 @@ EOF for serviceName in indexd; do secretName="${serviceName}-creds" # Only delete if secret is found to prevent early exits - if [[ ! -z $(g3kubectl get secrets -n argo | grep $secretName) ]]; then - g3kubectl delete secret "$secretName" -n argo > /dev/null 2>&1 + if [[ ! -z $(g3kubectl get secrets -n $argo_namespace | grep $secretName) ]]; then + g3kubectl delete secret "$secretName" -n $argo_namespace > /dev/null 2>&1 fi done sleep 1 # I think delete is async - give backend a second to finish indexdFencePassword=$(cat $(gen3_secrets_folder)/creds.json | jq -r .indexd.user_db.$indexd_admin_user) - g3kubectl create secret generic "indexd-creds" --from-literal=user=$indexd_admin_user --from-literal=password=$indexdFencePassword -n argo + g3kubectl create secret generic "indexd-creds" --from-literal=user=$indexd_admin_user --from-literal=password=$indexdFencePassword -n $argo_namespace fi } function setup_argo_db() { - if ! secret="$(g3kubectl get secret argo-db-creds -n argo 2> /dev/null)"; then + if ! secret="$(g3kubectl get secret argo-db-creds -n $argo_namespace 2> /dev/null)"; then gen3_log_info "Setting up argo db persistence" gen3 db setup argo || true dbCreds=$(gen3 secrets decode argo-g3auto dbcreds.json) - g3kubectl create secret -n argo generic argo-db-creds --from-literal=db_host=$(echo $dbCreds | jq -r .db_host) --from-literal=db_username=$(echo $dbCreds | jq -r .db_username) --from-literal=db_password=$(echo $dbCreds | jq -r .db_password) --from-literal=db_database=$(echo $dbCreds | jq -r .db_database) + g3kubectl create secret -n $argo_namespace generic argo-db-creds --from-literal=db_host=$(echo $dbCreds | jq -r .db_host) --from-literal=db_username=$(echo $dbCreds | jq -r .db_username) --from-literal=db_password=$(echo $dbCreds | jq -r .db_password) --from-literal=db_database=$(echo $dbCreds | jq -r .db_database) else gen3_log_info "Argo DB setup already completed" fi } - setup_argo_buckets +function setup_argo_template_secret() { + gen3_log_info "Started the template secret process" + downloadable_bucket_name=$(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) + # Check if the secret already exists + if [[ ! -z $(g3kubectl get secret argo-template-values-secret -n $argo_namespace) ]]; then + gen3_log_info "Argo template values secret already exists, assuming it's stale and deleting" + g3kubectl delete secret argo-template-values-secret -n $argo_namespace + fi + gen3_log_info "Creating argo template values secret" + g3kubectl create secret generic argo-template-values-secret --from-literal=DOWNLOADABLE_BUCKET=$downloadable_bucket_name -n $argo_namespace +} + +setup_argo_buckets # only do this if we are running in the default namespace -if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then +if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then setup_argo_db - if (! helm status argo -n argo > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then - DBHOST=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_host | base64 -d) - DBNAME=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_database | base64 -d) + setup_argo_template_secret + if (! helm status argo -n $argo_namespace > /dev/null 2>&1 ) || [[ "$force" == true ]]; then + DBHOST=$(kubectl get secrets -n $argo_namespace argo-db-creds -o json | jq -r .data.db_host | base64 -d) + DBNAME=$(kubectl get secrets -n $argo_namespace argo-db-creds -o json | jq -r .data.db_database | base64 -d) if [[ -z $internalBucketName ]]; then BUCKET=$bucketName else @@ -218,7 +246,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} --version 0.29.1 + helm upgrade --install argo argo/argo-workflows -n $argo_namespace -f ${valuesFile} --version 0.29.1 else gen3_log_info "kube-setup-argo exiting - argo already deployed, use --force to redeploy" fi diff --git a/gen3/bin/kube-setup-audit-service.sh b/gen3/bin/kube-setup-audit-service.sh index b7565194cd..92c70f352c 100644 --- a/gen3/bin/kube-setup-audit-service.sh +++ b/gen3/bin/kube-setup-audit-service.sh @@ -65,7 +65,7 @@ EOM } setup_audit_sqs() { - local sqsName="$(gen3 api safe-name audit-sqs)" + local sqsName="audit-sqs" sqsInfo="$(gen3 sqs create-queue-if-not-exist $sqsName)" || exit 1 sqsUrl="$(jq -e -r '.["url"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; } sqsArn="$(jq -e -r '.["arn"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; } diff --git a/gen3/bin/kube-setup-cedar-wrapper.sh b/gen3/bin/kube-setup-cedar-wrapper.sh index 9a899a7700..a56bebc406 100644 --- a/gen3/bin/kube-setup-cedar-wrapper.sh +++ b/gen3/bin/kube-setup-cedar-wrapper.sh @@ -1,6 +1,58 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" +create_client_and_secret() { + local hostname=$(gen3 api hostname) + local client_name="cedar_ingest_client" + gen3_log_info "kube-setup-cedar-wrapper" "creating fence ${client_name} for $hostname" + # delete any existing fence cedar clients + g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client ${client_name} > /dev/null 2>&1 + local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client ${client_name} --grant-types client_credentials | tail -1) + # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET') + if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then + gen3_log_err "kube-setup-cedar-wrapper" "Failed generating ${client_name}" + return 1 + else + local client_id="${BASH_REMATCH[2]}" + local client_secret="${BASH_REMATCH[3]}" + gen3_log_info "Create cedar-client secrets file" + cat - < /dev/null 2>&1; then + local have_cedar_client_secret="1" + else + gen3_log_info "No g3auto cedar-client key present in secret" + fi + + local client_name="cedar_ingest_client" + local client_list=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-list) + local client_count=$(echo "$client_list=" | grep -cE "'name':.*'${client_name}'") + gen3_log_info "CEDAR client count = ${client_count}" + + if [[ -z $have_cedar_client_secret ]] || [[ ${client_count} -lt 1 ]]; then + gen3_log_info "Creating new cedar-ingest client and secret" + local credsPath="$(gen3_secrets_folder)/g3auto/cedar/${cedar_creds_file}" + if ! create_client_and_secret > $credsPath; then + gen3_log_err "Failed to setup cedar-ingest secret" + return 1 + else + gen3 secrets sync + gen3 job run usersync + fi + fi +} + [[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then @@ -8,6 +60,13 @@ if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then return 1 fi +if [[ -n "$JENKINS_HOME" ]]; then + gen3_log_info "Skipping cedar-client creds setup in non-adminvm environment" +else + gen3_log_info "Checking cedar-client creds" + setup_creds +fi + if ! gen3 secrets decode cedar-g3auto cedar_api_key.txt > /dev/null 2>&1; then gen3_log_err "No CEDAR api key present in cedar-g3auto secret, not rolling CEDAR wrapper" return 1 diff --git a/gen3/bin/kube-setup-dicom.sh b/gen3/bin/kube-setup-dicom.sh index 85114f33fe..e49060ecb6 100644 --- a/gen3/bin/kube-setup-dicom.sh +++ b/gen3/bin/kube-setup-dicom.sh @@ -82,6 +82,19 @@ EOM } EOM fi + + if g3k_manifest_lookup '.versions["dicom-server"]' > /dev/null 2>&1; then + export DICOM_SERVER_URL="/dicom-server" + gen3_log_info "attaching ohif viewer to old dicom-server (orthanc w/ aurora)" + fi + + if g3k_manifest_lookup .versions.orthanc > /dev/null 2>&1; then + export DICOM_SERVER_URL="/orthanc" + gen3_log_info "attaching ohif viewer to new dicom-server (orthanc w/ s3)" + fi + + envsubst <"${GEN3_HOME}/kube/services/ohif-viewer/app-config.js" > "$secretsFolder/app-config.js" + gen3 secrets sync 'setup orthanc-s3-g3auto secrets' } diff --git a/gen3/bin/kube-setup-ecr-access-cronjob.sh b/gen3/bin/kube-setup-ecr-access-cronjob.sh new file mode 100644 index 0000000000..5c645ad35d --- /dev/null +++ b/gen3/bin/kube-setup-ecr-access-cronjob.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +setup_ecr_access_job() { + if g3kubectl get configmap manifest-global > /dev/null; then + ecrRoleArn=$(g3kubectl get configmap manifest-global -o jsonpath={.data.ecr-access-job-role-arn}) + fi + if [ -z "$ecrRoleArn" ]; then + gen3_log_err "Missing 'global.ecr-access-job-role-arn' configuration in manifest.json" + return 1 + fi + + local saName="ecr-access-job-sa" + if ! g3kubectl get sa "$saName" > /dev/null 2>&1; then + tempFile="ecr-access-job-policy.json" + cat - > $tempFile < /dev/null 2>&1; then + gen3_log_info "gen3-discovery-ai-g3auto secret already configured" + return 0 + fi + if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then + gen3_log_err "skipping db setup in non-adminvm environment" + return 0 + fi + # Setup .env file that gen3-discovery-ai service consumes + if [[ ! -f "$secretsFolder/gen3-discovery-ai.env" || ! -f "$secretsFolder/base64Authz.txt" ]]; then + local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3-discovery-ai" + + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + if ! gen3 db setup gen3-discovery-ai; then + gen3_log_err "Failed setting up database for gen3-discovery-ai service" + return 1 + fi + fi + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + gen3_log_err "dbcreds not present in Gen3Secrets/" + return 1 + fi + + # go ahead and rotate the password whenever we regen this file + local password="$(gen3 random)" + cat - > "$secretsFolder/gen3-discovery-ai.env" < "$secretsFolder/base64Authz.txt" + fi + gen3 secrets sync 'setup gen3-discovery-ai-g3auto secrets' +} + +if ! g3k_manifest_lookup '.versions."gen3-discovery-ai"' 2> /dev/null; then + gen3_log_info "kube-setup-gen3-discovery-ai exiting - gen3-discovery-ai service not in manifest" + exit 0 +fi + +# There's no db for this service *yet* +# +# if ! setup_database; then +# gen3_log_err "kube-setup-gen3-discovery-ai bailing out - database failed setup" +# exit 1 +# fi + +setup_storage() { + local saName="gen3-discovery-ai-sa" + g3kubectl create sa "$saName" > /dev/null 2>&1 || true + + local secret + local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3-discovery-ai" + + secret="$(g3kubectl get secret gen3-discovery-ai-g3auto -o json 2> /dev/null)" + local hasStorageCfg + hasStorageCfg=$(jq -r '.data | has("storage_config.json")' <<< "$secret") + + if [ "$hasStorageCfg" = "false" ]; then + gen3_log_info "setting up storage for gen3-discovery-ai service" + # + # gen3-discovery-ai-g3auto secret still does not exist + # we need to setup an S3 bucket and IAM creds + # let's avoid creating multiple buckets for different + # deployments to the same k8s cluster (dev, etc) + # + local bucketName + local accountNumber + local environment + + if ! accountNumber="$(aws sts get-caller-identity --output text --query 'Account')"; then + gen3_log_err "could not determine account numer" + return 1 + fi + + gen3_log_info "accountNumber: ${accountNumber}" + + if ! environment="$(g3kubectl get configmap manifest-global -o json | jq -r .data.environment)"; then + gen3_log_err "could not determine environment from manifest-global - bailing out of gen3-discovery-ai setup" + return 1 + fi + + gen3_log_info "environment: ${environment}" + + # try to come up with a unique but composable bucket name + bucketName="gen3-discovery-ai-${accountNumber}-${environment//_/-}" + + gen3_log_info "bucketName: ${bucketName}" + + if aws s3 ls --page-size 1 "s3://${bucketName}" > /dev/null 2>&1; then + gen3_log_info "${bucketName} s3 bucket already exists - probably in use by another namespace - copy the creds from there to $(gen3_secrets_folder)/g3auto/gen3-discovery-ai" + # continue on ... + elif ! gen3 s3 create "${bucketName}"; then + gen3_log_err "maybe failed to create bucket ${bucketName}, but maybe not, because the terraform script is flaky" + fi + + local hostname + hostname="$(gen3 api hostname)" + jq -r -n --arg bucket "${bucketName}" --arg hostname "${hostname}" '.bucket=$bucket | .prefix=$hostname' > "${secretsFolder}/storage_config.json" + gen3 secrets sync 'setup gen3-discovery-ai credentials' + + local roleName + roleName="$(gen3 api safe-name gen3-discovery-ai)" || return 1 + + if ! gen3 awsrole info "$roleName" > /dev/null; then # setup role + bucketName="$( (gen3 secrets decode 'gen3-discovery-ai-g3auto' 'storage_config.json' || echo ERROR) | jq -r .bucket)" || return 1 + gen3 awsrole create "$roleName" "$saName" || return 1 + gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${roleName}" + # try to give the gitops role read/write permissions on the bucket + local gitopsRoleName + gitopsRoleName="$(gen3 api safe-name gitops)" + gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${gitopsRoleName}" + fi + fi + + return 0 +} + +if ! setup_storage; then + gen3_log_err "kube-setup-gen3-discovery-ai bailing out - storage failed setup" + exit 1 +fi + +gen3_log_info "Setup complete, syncing configuration to bucket" + +bucketName="$( (gen3 secrets decode 'gen3-discovery-ai-g3auto' 'storage_config.json' || echo ERROR) | jq -r .bucket)" || exit 1 +aws s3 sync "$(dirname $(g3k_manifest_path))/gen3-discovery-ai/knowledge" "s3://$bucketName" --delete + +gen3 roll gen3-discovery-ai +g3kubectl apply -f "${GEN3_HOME}/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml" + +if [[ -z "$GEN3_ROLL_ALL" ]]; then + gen3 kube-setup-networkpolicy + gen3 kube-setup-revproxy +fi + +gen3_log_info "The gen3-discovery-ai service has been deployed onto the kubernetes cluster" +gen3_log_info "test with: curl https://commons-host/ai" diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index 691fb354ab..bdcff8ed0b 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -20,11 +20,81 @@ gen3 jupyter j-namespace setup # (g3k_kv_filter ${GEN3_HOME}/kube/services/hatchery/serviceaccount.yaml BINDING_ONE "name: hatchery-binding1-$namespace" BINDING_TWO "name: hatchery-binding2-$namespace" CURRENT_NAMESPACE "namespace: $namespace" | g3kubectl apply -f -) || true +function exists_or_create_gen3_license_table() { + # Create dynamodb table for gen3-license if it does not exist. + TARGET_TABLE="$1" + echo "Checking for dynamoDB table: ${TARGET_TABLE}" -# cron job to distribute licenses if using Stata workspaces -if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ]; -then - gen3 job cron distribute-licenses '* * * * *' + FOUND_TABLE=`aws dynamodb list-tables | jq -r .TableNames | jq -c -r '.[]' | grep $TARGET_TABLE` + if [ -n "$FOUND_TABLE" ]; then + echo "Target table already exists in dynamoDB: $FOUND_TABLE" + else + echo "Creating table ${TARGET_TABLE}" + GSI=`g3kubectl get configmaps/manifest-hatchery -o json | jq -r '.data."license-user-maps-global-secondary-index"'` + if [[ -z "$GSI" || "$GSI" == "null" ]]; then + echo "Error: No global-secondary-index in configuration" + return 0 + fi + aws dynamodb create-table \ + --no-cli-pager \ + --table-name "$TARGET_TABLE" \ + --attribute-definitions AttributeName=itemId,AttributeType=S \ + AttributeName=environment,AttributeType=S \ + AttributeName=isActive,AttributeType=S \ + --key-schema AttributeName=itemId,KeyType=HASH \ + AttributeName=environment,KeyType=RANGE \ + --provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 \ + --global-secondary-indexes \ + "[ + { + \"IndexName\": \"$GSI\", + \"KeySchema\": [{\"AttributeName\":\"environment\",\"KeyType\":\"HASH\"}, + {\"AttributeName\":\"isActive\",\"KeyType\":\"RANGE\"}], + \"Projection\":{ + \"ProjectionType\":\"INCLUDE\", + \"NonKeyAttributes\":[\"itemId\",\"userId\",\"licenseId\",\"licenseType\"] + }, + \"ProvisionedThroughput\": { + \"ReadCapacityUnits\": 5, + \"WriteCapacityUnits\": 3 + } + } + ]" + fi +} + +TARGET_TABLE=`g3kubectl get configmaps/manifest-hatchery -o json | jq -r '.data."license-user-maps-dynamodb-table"'` +if [[ -z "$TARGET_TABLE" || "$TARGET_TABLE" == "null" ]]; then + echo "No gen3-license table in configuration" + # cron job to distribute licenses if using Stata workspaces but not using dynamoDB + if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ]; + then + gen3 job cron distribute-licenses '* * * * *' + fi +else + echo "Found gen3-license table in configuration: $TARGET_TABLE" + exists_or_create_gen3_license_table "$TARGET_TABLE" +fi + +# if `nextflow-global.imagebuilder-reader-role-arn` is set in hatchery config, allow hatchery +# to assume the configured role +imagebuilderRoleArn=$(g3kubectl get configmap manifest-hatchery -o jsonpath={.data.nextflow-global} | jq -r '."imagebuilder-reader-role-arn"') +assumeImageBuilderRolePolicyBlock="" +if [ -z "$imagebuilderRoleArn" ]; then + gen3_log_info "No 'nexftlow-global.imagebuilder-reader-role-arn' in Hatchery configuration, not granting AssumeRole" +else + gen3_log_info "Found 'nexftlow-global.imagebuilder-reader-role-arn' in Hatchery configuration, granting AssumeRole" + assumeImageBuilderRolePolicyBlock=$( cat < /dev/null 2>&1; then roleName="$(gen3 api safe-name hatchery-sa)" gen3 awsrole create $roleName $saName @@ -127,7 +204,6 @@ if ! g3kubectl get sa "$saName" -o json | jq -e '.metadata.annotations | ."eks.a # create the new version gen3_aws_run aws iam create-policy-version --policy-arn "$policyArn" --policy-document "$policy" --set-as-default fi - gen3_log_info "Attaching policy '${policyName}' to role '${roleName}'" gen3 awsrole attach-policy ${policyArn} --role-name ${roleName} --force-aws-cli || exit 1 gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${roleName} --force-aws-cli || exit 1 diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh index d0bcff9a4b..b75470f733 100644 --- a/gen3/bin/kube-setup-ingress.sh +++ b/gen3/bin/kube-setup-ingress.sh @@ -232,6 +232,28 @@ gen3_ingress_setup_role() { } } }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags" + ], + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*" + ], + "Condition": { + "StringEquals": { + "elasticloadbalancing:CreateAction": [ + "CreateTargetGroup", + "CreateLoadBalancer" + ] + }, + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "false" + } + } + }, { "Effect": "Allow", "Action": [ @@ -329,4 +351,4 @@ g3kubectl apply -f "${GEN3_HOME}/kube/services/revproxy/revproxy-service.yaml" envsubst <$scriptDir/ingress.yaml | g3kubectl apply -f - if [ "$deployWaf" = true ]; then gen3_ingress_setup_waf -fi \ No newline at end of file +fi diff --git a/gen3/bin/kube-setup-jenkins2.sh b/gen3/bin/kube-setup-jenkins2.sh new file mode 100644 index 0000000000..f5233f978c --- /dev/null +++ b/gen3/bin/kube-setup-jenkins2.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# +# Just a little helper for deploying jenkins onto k8s the first time +# + +set -e + +export WORKSPACE="${WORKSPACE:-$HOME}" +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +gen3 kube-setup-secrets + +# +# Assume Jenkins should use 'jenkins' profile credentials in "${WORKSPACE}"/.aws/credentials +# +aws_access_key_id="$(aws configure get jenkins.aws_access_key_id)" +aws_secret_access_key="$(aws configure get jenkins.aws_secret_access_key)" +google_acct1_email="$(jq -r '.jenkins.google_acct1.email' < $(gen3_secrets_folder)/creds.json)" +google_acct1_password="$(jq -r '.jenkins.google_acct1.password' < $(gen3_secrets_folder)/creds.json)" +google_acct2_email="$(jq -r '.jenkins.google_acct2.email' < $(gen3_secrets_folder)/creds.json)" +google_acct2_password="$(jq -r '.jenkins.google_acct2.password' < $(gen3_secrets_folder)/creds.json)" + +if [ -z "$aws_access_key_id" -o -z "$aws_secret_access_key" ]; then + gen3_log_err 'not configuring jenkins - could not extract secrets from aws configure' + exit 1 +fi +if [[ -z "$google_acct1_email" || -z "$google_acct1_password" || -z "$google_acct2_email" || -z "$google_acct2_password" ]]; then + gen3_log_err "missing google credentials in '.jenkins' of creds.json" + exit 1 +fi + +if ! g3kubectl get secrets jenkins-secret > /dev/null 2>&1; then + # make it easy to rerun kube-setup-jenkins.sh + g3kubectl create secret generic jenkins-secret "--from-literal=aws_access_key_id=$aws_access_key_id" "--from-literal=aws_secret_access_key=$aws_secret_access_key" +fi +if ! g3kubectl get secrets google-acct1 > /dev/null 2>&1; then + g3kubectl create secret generic google-acct1 "--from-literal=email=${google_acct1_email}" "--from-literal=password=${google_acct1_password}" +fi +if ! g3kubectl get secrets google-acct2 > /dev/null 2>&1; then + g3kubectl create secret generic google-acct2 "--from-literal=email=${google_acct2_email}" "--from-literal=password=${google_acct2_password}" +fi + +if ! g3kubectl get storageclass gp2 > /dev/null 2>&1; then + g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/10storageclass.yaml" +fi +if ! g3kubectl get persistentvolumeclaim datadir-jenkins > /dev/null 2>&1; then + g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/00pvc.yaml" +fi + +# Note: jenkins service account is configured by `kube-setup-roles` +gen3 kube-setup-roles +# Note: only the 'default' namespace jenkins-service account gets a cluster rolebinding +g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/clusterrolebinding-devops.yaml" + +# Note: requires Jenkins entry in cdis-manifest +gen3 roll jenkins2 +gen3 roll jenkins2-worker +gen3 roll jenkins2-ci-worker + +# +# Get the ARN of the SSL certificate for the commons - +# We'll optimistically assume it's a wildcard cert that +# is appropriate to also attach to the jenkins ELB +# +export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}') +if [[ ! -z $ARN ]]; then + envsubst <"${GEN3_HOME}/kube/services/jenkins/jenkins-service.yaml" | g3kubectl apply -f - +else + gen3_log_info "Global configmap not configured - not launching service (require SSL cert ARN)" +fi diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 8ba8ed9d97..949c1ccd13 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -23,13 +23,15 @@ gen3_deploy_karpenter() { if g3k_config_lookup .global.karpenter_version; then karpenter=$(g3k_config_lookup .global.karpenter_version) fi - export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor` - if [ "${clusterversion}" = "24+" ]; then + export clusterversion=`kubectl version -o json | jq -r .serverVersion.minor` + if [ "${clusterversion}" = "25+" ]; then + karpenter=${karpenter:-v0.27.0} + elif [ "${clusterversion}" = "24+" ]; then karpenter=${karpenter:-v0.24.0} else karpenter=${karpenter:-v0.22.0} fi - local queue_name="karpenter-sqs-${vpc_name}" + local queue_name="$(gen3 api safe-name karpenter-sqs)" echo '{ "Statement": [ { @@ -77,6 +79,14 @@ gen3_deploy_karpenter() { "Effect": "Allow", "Resource": "*", "Sid": "ConditionalEC2Termination" + }, + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "kms:*" + ], + "Resource": "*" } ], "Version": "2012-10-17" @@ -192,9 +202,9 @@ gen3_update_karpenter_configs() { } gen3_create_karpenter_sqs_eventbridge() { - local queue_name="karpenter-sqs-${vpc_name}" + local queue_name="$(gen3 api safe-name karpenter-sqs)" local eventbridge_rule_name="karpenter-eventbridge-${vpc_name}" - #gen3 sqs create-queue-if-not-exist $queue_name >> "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" + gen3 sqs create-queue-if-not-exist karpenter-sqs >> "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" local queue_url=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.url') local queue_arn=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.arn') # Create eventbridge rules diff --git a/gen3/bin/kube-setup-ohdsi.sh b/gen3/bin/kube-setup-ohdsi.sh index d586570dba..3d8165547f 100644 --- a/gen3/bin/kube-setup-ohdsi.sh +++ b/gen3/bin/kube-setup-ohdsi.sh @@ -14,13 +14,8 @@ new_client() { local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client atlas --urls https://${atlas_hostname}/WebAPI/user/oauth/callback?client_name=OidcClient --username atlas --allowed-scopes openid profile email user | tail -1) # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET') if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then - # try delete client - g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client atlas > /dev/null 2>&1 - secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client atlas --urls https://${atlas_hostname}/WebAPI/user/oauth/callback?client_name=OidcClient --username atlas --allowed-scopes openid profile email user | tail -1) - if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then - gen3_log_err "kube-setup-ohdsi" "Failed generating oidc client for atlas: $secrets" - return 1 - fi + gen3_log_err "kube-setup-ohdsi" "Failed generating oidc client for atlas: $secrets" + return 1 fi local FENCE_CLIENT_ID="${BASH_REMATCH[2]}" local FENCE_CLIENT_SECRET="${BASH_REMATCH[3]}" @@ -87,6 +82,8 @@ setup_secrets() { export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds") export FENCE_URL="https://${hostname}/user/user" + # get arborist_url from manifest.json: + export ARBORIST_URL=$(g3k_manifest_lookup .global.arborist_url) export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration" export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds") export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds") diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index fcc2ef3b73..fd30b478b3 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -111,15 +111,14 @@ for name in $(g3kubectl get services -o json | jq -r '.items[] | .metadata.name' fi done -if g3kubectl get namespace argo > /dev/null 2>&1; -then - for argo in $(g3kubectl get services -n argo -o jsonpath='{.items[*].metadata.name}'); - do - filePath="$scriptDir/gen3.nginx.conf/${argo}.conf" - if [[ -f "$filePath" ]]; then - confFileList+=("--from-file" "$filePath") - fi - done + +if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then + argo_server_service_url=$(g3k_manifest_lookup .argo.argo_server_service_url) + g3k_kv_filter "${scriptDir}/gen3.nginx.conf/argo-server.conf" SERVICE_URL "${argo_server_service_url}" > /tmp/argo-server-with-url$(gen3 db namespace).conf + filePath="/tmp/argo-server-with-url$(gen3 db namespace).conf" + if [[ -f "$filePath" ]]; then + confFileList+=("--from-file" "$filePath") + fi fi if g3kubectl get namespace argocd > /dev/null 2>&1; diff --git a/gen3/bin/kube-setup-system-services.sh b/gen3/bin/kube-setup-system-services.sh index 609ee01c76..c26a04cb5d 100644 --- a/gen3/bin/kube-setup-system-services.sh +++ b/gen3/bin/kube-setup-system-services.sh @@ -19,7 +19,7 @@ gen3_load "gen3/gen3setup" kubeproxy=${kubeproxy:-1.24.7} coredns=${coredns:-1.8.7} kubednsautoscaler=${kubednsautoscaler:-1.8.6} -cni=${cni:-1.12.2} +cni=${cni:-1.14.1} calico=${calico:-1.7.8} @@ -39,7 +39,7 @@ calico_yaml="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${calico} g3kubectl set image daemonset.apps/kube-proxy -n kube-system kube-proxy=${kube_proxy_image} g3kubectl set image --namespace kube-system deployment.apps/coredns coredns=${coredns_image} -g3k_kv_filter "${GEN3_HOME}/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml" SERVICE "coredns" IMAGE "$kubednsautoscaler_image" | g3kubectl apply -f - +#g3k_kv_filter "${GEN3_HOME}/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml" SERVICE "coredns" IMAGE "$kubednsautoscaler_image" | g3kubectl apply -f - g3kubectl apply -f ${cni_image} g3kubectl apply -f ${calico_yaml} diff --git a/gen3/bin/migrate-to-vpc-cni.sh b/gen3/bin/migrate-to-vpc-cni.sh new file mode 100644 index 0000000000..510d9ebeff --- /dev/null +++ b/gen3/bin/migrate-to-vpc-cni.sh @@ -0,0 +1,138 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +#Get the K8s NS +ctx="$(g3kubectl config current-context)" +ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" + +# Set the cluster name variable +CLUSTER_NAME=`gen3 api environment` + +# Check if in default ns +if [[ ("$ctxNamespace" != "default" && "$ctxNamespace" != "null") ]]; then + gen3_log_err "Namespace must be default" + exit 1 +fi + +# Cd into Cloud-automation repo and pull the latest from master +gen3_log_info "Pulling the latest from Cloud-Auto" +cd /home/$CLUSTER_NAME/cloud-automation || { gen3_log_err "Cloud-automation repo not found"; exit 1; } +#### Change to master +git checkout master || { gen3_log_err "Failed to checkout master branch"; exit 1; } +git pull || { gen3_log_err "Failed to pull from the repository"; exit 1; } + +# Update the Karpenter Node Template +gen3_log_info "Apply new Karpenter Node Template" +if [[ -d $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter ]]; then + gen3_log_info "Karpenter setup in manifest. Open a cdismanifest PR and add this line to aws node templates: https://github.com/uc-cdis/cloud-automation/blob/master/kube/services/karpenter/nodeTemplateDefault.yaml#L40" + while true; do + read -p "Have you updated your manifest? (yes/no): " yn + case $yn in + [Yy]* ) + gen3_log_info "Proceeding with Karpenter deployment..." + gen3 kube-setup-karpenter deploy --force || { gen3_log_err "kube-setup-karpenter failed"; exit 1; } + break + ;; + [Nn]* ) + gen3_log_info "Please update the cdismanifest before proceeding." + exit 1 + ;; + * ) + gen3_log_info "Please answer yes or no." + ;; + esac + done +else + gen3 kube-setup-karpenter deploy --force || { gen3_log_err "kube-setup-karpenter failed"; exit 1; } +fi + +# Cordon all the nodes before running gen3 roll all" +gen3_log_info "Cordoning all nodes" +kubectl get nodes --no-headers -o custom-columns=":metadata.name" | grep -v '^fargate' | xargs -I{} kubectl cordon {} + +# Run a "gen3 roll all" so all nodes use the new mounted BPF File System +gen3_log_info "Cycling all the nodes by running gen3 roll all" +gen3 roll all --fast || exit 1 + +# Confirm that all nodes have been rotated +while true; do + read -p "Roll all complete. Have all cordoned nodes been rotated? (yes/no): " yn + case $yn in + [Yy]* ) + gen3_log_info "Continuing with script..." + break + ;; + [Nn]* ) + gen3_log_info "Please drain any remaining nodes with 'kubectl drain --ignore-daemonsets --delete-emptydir-data'" + ;; + * ) + gen3_log_info "Please answer yes or no." + ;; + esac +done + + +# Delete all existing network policies +gen3_log_info "Deleting networkpolicies" +kubectl delete networkpolicies --all + +# Delete all Calico related resources from the “kube-system” namespace +gen3_log_info "Deleting all Calico related resources" +kubectl get deployments -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete deployment -n kube-system +kubectl get daemonsets -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete daemonset -n kube-system +kubectl get services -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete service -n kube-system +kubectl get replicasets -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete replicaset -n kube-system + +# Backup the current VPC CNI configuration in case of rollback +gen3_log_info "Backing up current VPC CNI Configuration..." +kubectl get daemonset aws-node -n kube-system -o yaml > aws-k8s-cni-old.yaml || { gen3_log_err "Error backig up VPC CNI configuration"; exit 1; } + +# Check to ensure we are not using an AWS plugin to manage the VPC CNI Plugin +if aws eks describe-addon --cluster-name "$CLUSTER_NAME" --addon-name vpc-cni --query addon.addonVersion --output text 2>/dev/null; then + gen3_log_err "Error: VPC CNI Plugin is managed by AWS. Please log into the AWS UI and delete the VPC CNI Plugin in Amazon EKS, then re-run this script." + exit 1 +else + gen3_log_info "No managed VPC CNI Plugin found, proceeding with the script." +fi + +# Apply the new VPC CNI Version +gen3_log_info "Applying new version of VPC CNI" +g3kubectl apply -f https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.14.1/config/master/aws-k8s-cni.yaml || { gen3_log_err "Failed to apply new VPC CNI version"; exit 1; } + +# Check the version to make sure it updated +NEW_VERSION=$(kubectl describe daemonset aws-node --namespace kube-system | grep amazon-k8s-cni: | cut -d : -f 3) +gen3_log_info "Current version of aws-k8s-cni is: $NEW_VERSION" +if [ "$NEW_VERSION" != "v1.14.1" ]; then + gen3_log_info "The version of aws-k8s-cni has not been updated correctly." + exit 1 +fi + +# Edit the amazon-vpc-cni configmap to enable network policy controller +gen3_log_info "Enabling NetworkPolicies in VPC CNI Configmap" +kubectl patch configmap -n kube-system amazon-vpc-cni --type merge -p '{"data":{"enable-network-policy-controller":"true"}}' || { gen3_log_err "Configmap patch failed"; exit 1; } + +# Edit the aws-node daemonset +gen3_log_info "Enabling NetworkPolicies in aws-node Daemonset" +kubectl patch daemonset aws-node -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/1/args", "value": ["--enable-network-policy=true", "--enable-ipv6=false", "--enable-cloudwatch-logs=false", "--metrics-bind-addr=:8162", "--health-probe-bind-addr=:8163"]}]' || { gen3_log_err "Daemonset edit failed"; exit 1; } + +# Ensure all the aws-nodes are running +kubectl get pods -n kube-system | grep aws +while true; do + read -p "Do all the aws-node pods in the kube-system ns have 2/2 containers running? (yes/no): " yn + case $yn in + [Yy]* ) + gen3_log_info "Running kube-setup-networkpolicy..." + gen3 kube-setup-networkpolicy || exit 1 + break + ;; + [Nn]* ) + gen3_log_err "Look at aws-node logs to figure out what went wrong. View this document for more details: https://docs.google.com/document/d/1fcBTciQSSwjvHktEnO_7EObY-xR_EvJ2NtgUa70wvL8" + gen3_log_info "Rollback instructions are also available in the above document" + ;; + * ) + gen3_log_info "Please answer yes or no." + ;; + esac +done \ No newline at end of file diff --git a/gen3/bin/mutate-guppy-config-for-guppy-test.sh b/gen3/bin/mutate-guppy-config-for-guppy-test.sh index de7da10d5c..151bb71697 100644 --- a/gen3/bin/mutate-guppy-config-for-guppy-test.sh +++ b/gen3/bin/mutate-guppy-config-for-guppy-test.sh @@ -16,7 +16,7 @@ sed -i 's/\(.*\)"index": "\(.*\)_etl",$/\1"index": "jenkins_subject_alias",/' or # for bloodpac-like envs sed -i 's/\(.*\)"index": "\(.*\)_case",$/\1"index": "jenkins_subject_alias",/' original_guppy_config.yaml # the pre-defined Canine index works with subject ONLY (never case) -sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml +# sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml sed -i 's/\(.*\)"index": "\(.*\)_file",$/\1"index": "jenkins_file_alias",/' original_guppy_config.yaml sed -i 's/\(.*\)"config_index": "\(.*\)_array-config",$/\1"config_index": "jenkins_configs_alias",/' original_guppy_config.yaml diff --git a/gen3/bin/sqs.sh b/gen3/bin/sqs.sh index dccb1ff7b7..7448437a0c 100644 --- a/gen3/bin/sqs.sh +++ b/gen3/bin/sqs.sh @@ -50,15 +50,15 @@ EOM # @sqsName # gen3_sqs_create_queue() { - local sqsName=$1 - if ! shift || [[ -z "$sqsName" ]]; then - gen3_log_err "Must provide 'sqsName' to 'gen3_sqs_create_queue'" + local serviceName=$1 + if ! shift || [[ -z "$serviceName" ]]; then + gen3_log_err "Must provide 'serviceName' to 'gen3_sqs_create_queue'" return 1 fi + local sqsName="$(gen3 api safe-name $serviceName)" gen3_log_info "Creating SQS '$sqsName'" - local prefix="$(gen3 api safe-name sqs-create)" ( # subshell - do not pollute parent environment - gen3 workon default ${prefix}__sqs 1>&2 + gen3 workon default ${sqsName}__sqs 1>&2 gen3 cd 1>&2 cat << EOF > config.tfvars sqs_name="$sqsName" @@ -76,7 +76,8 @@ EOF # @sqsName # gen3_sqs_create_queue_if_not_exist() { - local sqsName=$1 + local serviceName=$1 + local sqsName="$(gen3 api safe-name $serviceName)" if ! shift || [[ -z "$sqsName" ]]; then gen3_log_err "Must provide 'sqsName' to 'gen3_sqs_create_queue'" return 1 @@ -90,7 +91,7 @@ gen3_sqs_create_queue_if_not_exist() { gen3_log_info "The '$sqsName' SQS already exists" else # create the queue - sqsInfo="$(gen3_sqs_create_queue $sqsName)" || exit 1 + sqsInfo="$(gen3_sqs_create_queue $serviceName)" || exit 1 sqsUrl="$(jq -e -r '.["sqs-url"].value' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; } sqsArn="$(jq -e -r '.["sqs-arn"].value' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; } fi diff --git a/gen3/bin/workon.sh b/gen3/bin/workon.sh index e7b951d1ca..f614cf662d 100644 --- a/gen3/bin/workon.sh +++ b/gen3/bin/workon.sh @@ -113,7 +113,7 @@ if [[ ! -f "$bucketCheckFlag" && "$GEN3_FLAVOR" == "AWS" ]]; then } EOM ) - gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" --create-bucket-configuration ‘{“LocationConstraint”:“‘$(aws configure get $GEN3_PROFILE.region)‘“}’ + gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" $([[ $(aws configure get $GEN3_PROFILE.region) = "us-east-1" ]] && echo "" || echo --create-bucket-configuration LocationConstraint="$(aws configure get $GEN3_PROFILE.region)") sleep 5 # Avoid race conditions if gen3_aws_run aws s3api put-bucket-encryption --bucket "$GEN3_S3_BUCKET" --server-side-encryption-configuration "$S3_POLICY"; then touch "$bucketCheckFlag" diff --git a/gen3/lib/logs/snapshot.sh b/gen3/lib/logs/snapshot.sh index 31cb80283b..ae769a2852 100644 --- a/gen3/lib/logs/snapshot.sh +++ b/gen3/lib/logs/snapshot.sh @@ -36,10 +36,11 @@ gen3_logs_snapshot_container() { # Snapshot all the pods # gen3_logs_snapshot_all() { + # For each pod for which we can list the containers, get the pod name and get its list of containers + # (container names + initContainers names). Diplay them as lines of " ". g3kubectl get pods -o json | \ - jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | map( {pod: .metadata.name, containers: .spec.containers | map(.name) } ) | map( .pod as $pod | .containers | map( { pod: $pod, cont: .})[]) | map(select(.cont != "pause" and .cont != "jupyterhub"))[] | .pod + " " + .cont' | \ + jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | .[] | .metadata.name as $pod | (.spec.containers + .spec.initContainers) | map(select(.name != "pause" and .name != "jupyterhub")) | .[] | {pod: $pod, cont: .name} | "\(.pod) \(.cont)"' | \ while read -r line; do gen3_logs_snapshot_container $line done } - diff --git a/gen3/lib/testData/default/expectedFenceResult.yaml b/gen3/lib/testData/default/expectedFenceResult.yaml index f6d76d790f..98c3605311 100644 --- a/gen3/lib/testData/default/expectedFenceResult.yaml +++ b/gen3/lib/testData/default/expectedFenceResult.yaml @@ -44,6 +44,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType @@ -136,6 +143,7 @@ spec: ports: - containerPort: 80 - containerPort: 443 + - containerPort: 6567 volumeMounts: # ----------------------------------------------------------------------------- # DEPRECATED! Remove when all commons are no longer using local_settings.py diff --git a/gen3/lib/testData/default/expectedSheepdogResult.yaml b/gen3/lib/testData/default/expectedSheepdogResult.yaml index b9db85a36c..a2bd3efcc4 100644 --- a/gen3/lib/testData/default/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/default/expectedSheepdogResult.yaml @@ -17,6 +17,7 @@ spec: template: metadata: labels: + netnolimit: "yes" app: sheepdog release: production public: "yes" @@ -39,12 +40,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index d4196c0708..adc35ad2f1 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -47,6 +47,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml index f54fd3e03b..08407ae52b 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml @@ -43,6 +43,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType diff --git a/gen3/test/ec2Test.sh b/gen3/test/ec2Test.sh index 21310a24ca..4981c925cf 100644 --- a/gen3/test/ec2Test.sh +++ b/gen3/test/ec2Test.sh @@ -1,6 +1,6 @@ -if ! EC2_TEST_IP="$(g3kubectl get nodes -o json | jq -r -e '.items[0].status.addresses[] | select(.type == "InternalIP") | .address')" || [[ -z "$EC2_TEST_IP" ]]; then +if ! EC2_TEST_IP="$(g3kubectl get nodes -o json | jq -r -e '.items[3].status.addresses[] | select(.type == "InternalIP") | .address')" || [[ -z "$EC2_TEST_IP" ]]; then gen3_log_err "ec2Test failed to acquire IP address of a k8s node to test against" fi diff --git a/gen3/test/ecrTest.sh b/gen3/test/ecrTest.sh index 91edf798ba..57847abe5e 100644 --- a/gen3/test/ecrTest.sh +++ b/gen3/test/ecrTest.sh @@ -10,8 +10,8 @@ test_ecr_login() { test_ecr_setup() { if [[ -n "$JENKINS_HOME" ]]; then - # give ourselves read/write permissions on /var/run/docker.sock - sudo chmod a+rw /var/run/docker.sock; because $? "ecr_setup modified docker.sock" + # give ourselves permissions on /run/containerd/containerd.sock + sudo chown root:sudo /run/containerd/containerd.sock; because $? "ecr_setup modified containerd.sock" fi } diff --git a/gen3/test/jobTest.sh b/gen3/test/jobTest.sh index 84a4d046b6..bb37b4f723 100644 --- a/gen3/test/jobTest.sh +++ b/gen3/test/jobTest.sh @@ -6,7 +6,7 @@ excludeJob() { local jobKey="$1" local excludeList=( - /aws-bucket- /bucket- /covid19- /data-ingestion- /google- /nb-etl- /remove-objects-from- /replicate- /s3sync- /fence-cleanup + /aws-bucket- /bucket- /covid19- /data-ingestion- /google- /nb-etl- /remove-objects-from- /replicate- /s3sync- /fence-cleanup /etl- /indexd- /metadata- ) for exclude in "${excludeList[@]}"; do if [[ "$it" =~ $exclude ]]; then return 0; fi diff --git a/gen3/test/jupyterTest.sh b/gen3/test/jupyterTest.sh index f0e327d717..db6a626188 100644 --- a/gen3/test/jupyterTest.sh +++ b/gen3/test/jupyterTest.sh @@ -30,7 +30,7 @@ test_jupyter_metrics() { } shunit_runtest "test_jupyter_idle" "jupyter" -shunit_runtest "test_jupyter_metrics" "jupyter" +# shunit_runtest "test_jupyter_metrics" "jupyter" shunit_runtest "test_jupyter_prepuller" "local,jupyter" shunit_runtest "test_jupyter_namespace" "local,jupyter" shunit_runtest "test_jupyter_setup" "jupyter" diff --git a/gen3/test/terraformTest.sh b/gen3/test/terraformTest.sh deleted file mode 100644 index 17bcc03c2b..0000000000 --- a/gen3/test/terraformTest.sh +++ /dev/null @@ -1,461 +0,0 @@ -GEN3_TEST_PROFILE="${GEN3_TEST_PROFILE:-cdistest}" -GEN3_TEST_WORKSPACE="gen3test" -GEN3_TEST_ACCOUNT=707767160287 - -# -# TODO - generalize these tests to setup their own test VPC, -# rather than relying on qaplanetv1 or devplanetv1 being there -# - -# -# Little macos/linux stat wrapper -# -file_mode() { - if [[ $(uname -s) == 'Linux' ]]; then - stat -c %a "$1" - else - stat -f %p "$1" - fi -} - -test_workspace() { - gen3 workon $GEN3_TEST_PROFILE $GEN3_TEST_WORKSPACE; because $? "Calling gen3 workon multiple times should be harmless" - [[ $GEN3_PROFILE = $GEN3_TEST_PROFILE ]]; because $? "gen3 workon sets the GEN3_PROFILE env variable: $GEN3_PROFILE" - [[ $GEN3_WORKSPACE = $GEN3_TEST_WORKSPACE ]]; because $? "gen3 workon sets the GEN3_WORKSPACE env variable: $GEN3_WORKSPACE" - [[ $GEN3_FLAVOR = "AWS" || \ - ($GEN3_FLAVOR == "GCP" && $GEN3_PROFILE =~ ^gcp-) || \ - ($GEN3_FLAVOR == "ONPREM" && $GEN3_PROFILE =~ ^onprem-) ]]; because $? "GEN3_FLAVOR is gcp for gcp-* profiles, else AWS" - [[ $GEN3_FLAVOR != "AWS" || $GEN3_S3_BUCKET = "cdis-state-ac${GEN3_TEST_ACCOUNT}-gen3" || $GEN3_S3_BUCKET = "cdis-terraform-state.account-${GEN3_TEST_ACCOUNT}.gen3" ]]; because $? "gen3 workon sets the GEN3_S3_BUCKET env variable: $GEN3_S3_BUCKET" - [[ (! -z $GEN3_WORKDIR) && -d $GEN3_WORKDIR ]]; because $? "gen3 workon sets the GEN3_WORKDIR env variable, and initializes the folder: $GEN3_WORKDIR" - [[ $(file_mode $GEN3_WORKDIR) =~ 700$ ]]; because $? "gen3 workon sets the GEN3_WORKDIR to mode 0700, because secrets are in there" - gen3 cd && [[ $(pwd) = "$GEN3_WORKDIR" ]]; because $? "gen3 cd should take us to the workspace by default: $(pwd) =? $GEN3_WORKDIR" - for fileName in README.md config.tfvars backend.tfvars; do - [[ -f $fileName ]]; because $? "gen3 workon ensures we have a $fileName - local copy || s3 copy || generated from template" - done - [[ ! -z "$MD5" ]]; because $? "commons.sh sets MD5 to $MD5" - - if [[ $GEN3_TEST_WORKSPACE =~ __custom$ ]]; then - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_WORKDIR" ]]; because $? "a __custom workspace loads from the workspace folder" - elif [[ "$GEN3_TEST_PROFILE" =~ ^gcp- ]]; then - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/gcp/commons" ]]; because $? "a gcp- profile currently only support a commons workspace" - elif [[ "$GEN3_TEST_PROFILE" =~ ^onprem- ]]; then - for fileName in README.md creds.json 00configmap.yaml kube-setup.sh; do - filePath="onprem_scripts/$fileName" - [[ -f $filePath ]]; because $? "gen3 workon ensures we have a $filePath generated from template" - done - else # aws profile - [[ "$GEN3_TFSCRIPT_FOLDER" =~ ^"$GEN3_HOME/tf_files/aws/" ]]; because $? "an aws workspace references the aws/ folder: $GEN3_TFSCRIPT_FOLDER" - fi -} - -workspace_cleanup() { - # try to avoid accidentally erasing the user's data ... - cd /tmp && [[ -n "$GEN3_WORKDIR" && "$GEN3_WORKDIR" =~ /gen3/ && -f "$GEN3_WORKDIR/config.tfvars" ]] && /bin/rm -rf "$GEN3_WORKDIR"; - because $? "was able to cleanup $GEN3_WORKDIR" -} - -test_uservpc_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_user" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/user_vpc" ]]; because $? "a _user workspace should use the ./aws/user_vpc resources: $GEN3_TFSCRIPT_FOLDER" - workspace_cleanup -} - -test_usergeneric_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_usergeneric" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/user_generic" ]]; because $? "a _usergeneric workspace should use the ./aws/user_generic resources: $GEN3_TFSCRIPT_FOLDER" - cat << EOF > config.tfvars -username="frickjack" -EOF - gen3 tfplan; because $? "_usergeneric tfplan should work"; - workspace_cleanup -} - -test_snapshot_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_snapshot" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/rds_snapshot" ]]; because $? "a _snapshot workspace should use the ./aws/rds_snapshot resources: $GEN3_TFSCRIPT_FOLDER" - workspace_cleanup -} - -test_databucket_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_databucket" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/data_bucket" ]]; because $? "a _databucket workspace should use the ./aws/data_bucket resources: $GEN3_TFSCRIPT_FOLDER" - cat - > config.tfvars < config.tfvars < config.tfvars < @ in password -db_password_fence="whatever" - -db_password_gdcapi="whatever" -db_password_sheepdog="whatever" -db_password_peregrine="whatever" - -db_password_indexd="g6pmYkcoR7qECjGoErzVb5gkX3kum0yo" - -# password for write access to indexd -gdcapi_indexd_password="oYva39mIPV5uXskv7jWnKuVZBUFBQcxd" - -fence_snapshot="" -gdcapi_snapshot="" -indexd_snapshot="" -# mailgun for sending alert e-mails -mailgun_api_key="" -mailgun_api_url="" -mailgun_smtp_host="" - -kube_ssh_key="" -EOM - [[ "$(pwd)" =~ "/$GEN3_WORKSPACE"$ ]]; because $? "commons workspace should have base $GEN3_WORKSPACE - $(pwd)" - gen3 tfplan; because $? "tfplan should run even with some invalid config variables" - [[ -f "$GEN3_WORKDIR/plan.terraform" ]]; because $? "'gen3 tfplan' generates a plan.terraform file used by 'gen3 tfapply'" - workspace_cleanup -} - -test_custom_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}__custom" - test_workspace - - local sourceFolder="../../../../../cloud-automation/tf_files/aws/modules/s3-bucket" - if [[ ! -d "$sourceFolder" ]]; then - # Jenkins has a different relative path setup - sourceFolder="../../../../cloud-automation/tf_files/aws/modules/s3-bucket" - fi - cat - > bucket.tf < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars </dev/null 2>&1; then - envsubst < /home/manifests/nodetemplate.yaml | kubectl apply -f - + sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - fi if ! kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - envsubst < /home/manifests/provisioner.yaml | kubectl apply -f - + sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - fi env: - name: WORKFLOW_NAME @@ -76,9 +86,13 @@ spec: configMapKeyRef: name: environment key: environment + - name: PROVISIONER_TEMPLATE + value: /manifests/provisioner.yaml + - name: AWSNODETEMPLATE_TEMPLATE + value: /manifests/nodetemplate.yaml volumeMounts: - name: karpenter-templates-volume - mountPath: /home/manifests + mountPath: /manifests volumes: - name: karpenter-templates-volume configMap: diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index 65f68d98ab..89ec29ecce 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -58,7 +58,10 @@ spec: configMap: name: manifest-argo optional: true - + - name: argo-wrapper-namespace-config + configMap: + name: argo-wrapper-namespace-config + containers: - name: argo-wrapper GEN3_ARGO-WRAPPER_IMAGE @@ -70,3 +73,7 @@ spec: readOnly: true mountPath: /argo.json subPath: argo.json + - name: argo-wrapper-namespace-config + readOnly: true + mountPath: /argowrapper/config.ini + subPath: config.ini diff --git a/kube/services/argo-wrapper/config.ini b/kube/services/argo-wrapper/config.ini new file mode 100644 index 0000000000..0693ee2e29 --- /dev/null +++ b/kube/services/argo-wrapper/config.ini @@ -0,0 +1,6 @@ +[DEFAULT] +ARGO_ACCESS_METHOD = access +ARGO_HOST = $ARGO_HOST +ARGO_NAMESPACE = $ARGO_NAMESPACE +COHORT_DEFINITION_BY_SOURCE_AND_TEAM_PROJECT_URL = http://cohort-middleware-service/cohortdefinition-stats/by-source-id/{}/by-team-project?team-project={} +COHORT_MIDDLEWARE_URL = http://cohort-middleware-service diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 67fa05a09d..23dda4a5ad 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -1,5 +1,6 @@ controller: - parallelism: 5 + parallelism: 10 + namespaceParallelism: 5 metricsConfig: # -- Enables prometheus metrics server enabled: true @@ -28,11 +29,11 @@ controller: } ] } - } + } resourceRateLimit: limit: 40 - burst: 4 + burst: 4 # -- enable persistence using postgres persistence: @@ -49,7 +50,7 @@ controller: port: 5432 database: GEN3_ARGO_DB_NAME tableName: argo_workflows - # # the database secrets must be in the same namespace of the controller + # # the database secrets must be in the same namespace of the controller userNameSecret: name: argo-db-creds key: db_username @@ -58,7 +59,7 @@ controller: key: db_password nodeStatusOffLoad: true - workflowDefaults: + workflowDefaults: spec: archiveLogs: true @@ -77,11 +78,11 @@ server: baseHref: "/argo/" # -- Extra arguments to provide to the Argo server binary, such as for disabling authentication. extraArgs: - - --auth-mode=server - - --auth-mode=client + - --auth-mode=server + - --auth-mode=client extraEnv: - - name: ARGO_HTTP1 - value: "true" + - name: ARGO_HTTP1 + value: "true" resources: requests: memory: 8Gi diff --git a/kube/services/dicom-viewer/dicom-viewer-service.yaml b/kube/services/dicom-viewer/dicom-viewer-service.yaml index ea25765845..26f3a21b05 100644 --- a/kube/services/dicom-viewer/dicom-viewer-service.yaml +++ b/kube/services/dicom-viewer/dicom-viewer-service.yaml @@ -12,4 +12,4 @@ spec: nodePort: null name: http type: ClusterIP - \ No newline at end of file + diff --git a/kube/services/gen3-discovery-ai/README.md b/kube/services/gen3-discovery-ai/README.md new file mode 100644 index 0000000000..4c20678e06 --- /dev/null +++ b/kube/services/gen3-discovery-ai/README.md @@ -0,0 +1,42 @@ +# Gen3 Discovery AI Configuration + +Expects data in a `gen3-discovery-ai` folder relative to +where the `manifest.json` is. + +Basic setup: + +`{{dir where manifest.json is}}/gen3-discovery-ai/knowledge/` + +- `tsvs` folder + - tsvs with topic_name at beginning of file +- `markdown` folder + - {{topic_name_1}} + - markdown file(s) + - {{topic_name_2}} + - markdown file(s) + +The `kube-setup-gen3-discovery-ai` script syncs the above `/knowledge` folder to +an S3 bucket. The service configuration then pulls from the S3 bucket and runs load commands +to get the data into chromadb. + +> Note: See the `gen3-discovery-ai` service repo docs and README for more details on data load capabilities. + +Check the `gen3-discovery-ai-deploy.yaml` for what commands are being run in the automation. + +Expects secrets setup in `g3auto/gen3-discovery-ai` folder + - `credentials.json`: Google service account key if using a topic with Google Vertex AI + - `env`: .env file contents for service configuration (see service repo for a default one) + +## Populating Disk for In-Memory Vectordb Chromadb + +In order to setup pre-configured topics, we need to load a bunch of data +into Chromadb (which is an in-mem vectordb with an option to persist to disk). + +To load topics consistently, we setup an S3 bucket to house the persisted +data for the vectordb. + +### Getting data from S3 in mem + +We specify a path for Chromadb to use for persisted data and when it sees +data there, it loads it in. So the deployment automation: 1. aws syncs the bucket +and then 2. calls a script to load the files into the in-mem vectorstore from there. diff --git a/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml b/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml new file mode 100644 index 0000000000..dcfe03248a --- /dev/null +++ b/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml @@ -0,0 +1,181 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gen3-discovery-ai-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: gen3-discovery-ai + release: production + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: gen3-discovery-ai + release: production + GEN3_DATE_LABEL + spec: + serviceAccountName: gen3-discovery-ai-sa + volumes: + - name: gen3-discovery-ai-g3auto-volume + secret: + secretName: gen3-discovery-ai-g3auto + - name: gen3-discovery-ai-knowledge-library-volume + emptyDir: {} + initContainers: + # chromadb's persisted disk support requires the ability to write. We don't technically need this ability + # since we're populating the entirety of the database from configured files (no live updates). + # + # Solution: utilize emptyDir as a writable space. + # + # Procedure: in init containers, copy files from s3 to writable + # temporary space in emptyDir, use files from writable space + # to load into knowledge libary, move final knowledge library + # files into top-level emptyDir and make available in final container + - name: gen3-discovery-ai-aws-init + GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| + imagePullPolicy: Always + ports: + - containerPort: 8080 + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + volumeMounts: + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/.env + subPath: env + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/credentials.json + subPath: credentials.json + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/storage_config.json + subPath: storage_config.json + - name: gen3-discovery-ai-knowledge-library-volume + mountPath: /gen3discoveryai/knowledge + imagePullPolicy: Always + resources: + requests: + cpu: 1 + limits: + cpu: 2 + memory: 512Mi + command: ["/bin/bash"] + args: + - "-c" + - | + bucketName=$(grep -o "\"bucket\": *\"[^\"]*\"" /gen3discoveryai/storage_config.json | awk -F'"' '{print $4}') + echo BUCKET: "$bucketName" + echo + echo BEFORE /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + echo + echo syncing from s3 + aws s3 sync "s3://${bucketName}" "/gen3discoveryai/knowledge/tmp" + echo + echo AFTER /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + - name: gen3-discovery-ai-knowledge-init + GEN3_GEN3-DISCOVERY-AI_IMAGE + imagePullPolicy: Always + ports: + - containerPort: 8080 + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + - name: ANONYMIZED_TELEMETRY + value: "False" + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /gen3discoveryai/credentials.json + volumeMounts: + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/.env + subPath: env + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/credentials.json + subPath: credentials.json + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/storage_config.json + subPath: storage_config.json + - name: gen3-discovery-ai-knowledge-library-volume + mountPath: /gen3discoveryai/knowledge + imagePullPolicy: Always + resources: + requests: + cpu: 1 + limits: + cpu: 2 + memory: 512Mi + command: ["/bin/bash"] + args: + - "-c" + - | + echo + echo BEFORE /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + echo running load_into_knowledge_store.py + poetry run python /gen3discoveryai/bin/load_into_knowledge_store.py tsvs /gen3discoveryai/knowledge/tmp/tsvs + + if [ -d "/gen3discoveryai/knowledge/tmp/markdown" ]; then + for dir in "/gen3discoveryai/knowledge/tmp/markdown"/*; do + if [ -d "$dir" ]; then + dir_name=$(basename "$dir") + + echo "Processing directory: $dir_name. Full path: $dir" + poetry run python /gen3discoveryai/bin/load_into_knowledge_store.py markdown --topic $dir_name $dir + fi + done + else + echo "Not syncing markdown, directory not found: /gen3discoveryai/knowledge/tmp/markdown" + fi + + rm -r /gen3discoveryai/knowledge/tmp/ + echo + echo AFTER /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + containers: + - name: gen3-discovery-ai + GEN3_GEN3-DISCOVERY-AI_IMAGE + imagePullPolicy: Always + ports: + - containerPort: 8080 + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + - name: ANONYMIZED_TELEMETRY + value: "False" + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /gen3discoveryai/credentials.json + volumeMounts: + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/.env + subPath: env + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/credentials.json + subPath: credentials.json + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/storage_config.json + subPath: storage_config.json + - name: gen3-discovery-ai-knowledge-library-volume + mountPath: /gen3discoveryai/knowledge + imagePullPolicy: Always + resources: + requests: + cpu: 1 + limits: + cpu: 2 + # NOTE: If the configured data for the knowledge library (vector database) is large, you may need to bump this + memory: 512Mi diff --git a/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml b/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml new file mode 100644 index 0000000000..b4734c3b8a --- /dev/null +++ b/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml @@ -0,0 +1,21 @@ +kind: Service +apiVersion: v1 +metadata: + name: gen3-discovery-ai-service +spec: + selector: + app: gen3-discovery-ai + release: production + ports: + - protocol: TCP + port: 80 + targetPort: 8089 + name: http + nodePort: null + - protocol: TCP + port: 443 + targetPort: 443 + name: https + nodePort: null + type: ClusterIP + diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml index 01a8905de7..c3e8d121c4 100644 --- a/kube/services/guppy/guppy-deploy.yaml +++ b/kube/services/guppy/guppy-deploy.yaml @@ -155,6 +155,6 @@ spec: resources: requests: cpu: 100m - memory: 128Mi + memory: 256Mi limits: - memory: 1200Mi + memory: 2000Mi diff --git a/kube/services/ingress/ingress.yaml b/kube/services/ingress/ingress.yaml index 65916679a7..1db08e8ef6 100644 --- a/kube/services/ingress/ingress.yaml +++ b/kube/services/ingress/ingress.yaml @@ -11,7 +11,7 @@ metadata: alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=600 alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' - alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-2021-06 + alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-FIPS-2023-04 spec: ingressClassName: alb rules: diff --git a/kube/services/jenkins/jenkins-deploy.yaml b/kube/services/jenkins/jenkins-deploy.yaml index c0eae2040b..954e996f21 100644 --- a/kube/services/jenkins/jenkins-deploy.yaml +++ b/kube/services/jenkins/jenkins-deploy.yaml @@ -38,6 +38,10 @@ spec: operator: In values: - on-demand + - key: topology.kubernetes.io/zone + operator: In + values: + - us-east-1a serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml b/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml new file mode 100644 index 0000000000..7f4e58109c --- /dev/null +++ b/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + name: jenkins-agent-service + name: jenkins-agent + namespace: default +spec: + ports: + - name: slavelistener + port: 50000 + protocol: TCP + targetPort: 50000 + selector: + app: jenkins + sessionAffinity: None + type: ClusterIP diff --git a/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml new file mode 100644 index 0000000000..3dea38a5cb --- /dev/null +++ b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml @@ -0,0 +1,149 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jenkins-ci-worker-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: jenkins-ci-worker + template: + metadata: + labels: + app: jenkins-ci-worker + # for network policy + netnolimit: "yes" + annotations: + "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + - matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + serviceAccountName: jenkins-service + securityContext: + runAsUser: 1000 + fsGroup: 1000 + initContainers: + - args: + - -c + - | + # fix permissions for /var/run/docker.sock + chmod 666 /var/run/docker.sock + echo "done" + command: + - /bin/bash + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + name: awshelper + resources: {} + securityContext: + allowPrivilegeEscalation: false + runAsUser: 0 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/docker.sock + name: dockersock + containers: + # + # See for details on running docker in a pod: + # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b + # + - name: jenkins-worker + image: "quay.io/cdis/gen3-ci-worker:master" + ports: + - containerPort: 8080 + env: + - name: JENKINS_URL + value: "https://jenkins2.planx-pla.net" + - name: JENKINS_SECRET + valueFrom: + secretKeyRef: + name: jenkins-ci-worker-g3auto + key: jenkins-jnlp-agent-secret + - name: JENKINS_AGENT_NAME + value: "gen3-ci-worker" + - name: JENKINS_TUNNEL + value: "jenkins-agent:50000" + - name: AWS_DEFAULT_REGION + value: us-east-1 + - name: JAVA_OPTS + value: "-Xmx3072m" + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: jenkins-secret + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: jenkins-secret + key: aws_secret_access_key + - name: GOOGLE_EMAIL_AUX1 + valueFrom: + secretKeyRef: + name: google-acct1 + key: email + - name: GOOGLE_PASSWORD_AUX1 + valueFrom: + secretKeyRef: + name: google-acct1 + key: password + - name: GOOGLE_EMAIL_AUX2 + valueFrom: + secretKeyRef: + name: google-acct2 + key: email + - name: GOOGLE_PASSWORD_AUX2 + valueFrom: + secretKeyRef: + name: google-acct2 + key: password + - name: GOOGLE_APP_CREDS_JSON + valueFrom: + secretKeyRef: + name: jenkins-g3auto + key: google_app_creds.json + resources: + limits: + cpu: 0.9 + memory: 4096Mi + ephemeral-storage: 500Mi + imagePullPolicy: Always + volumeMounts: + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.crt" + subPath: "service.crt" + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.key" + subPath: "service.key" + - name: "ca-volume" + readOnly: true + mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" + subPath: "ca.pem" + - name: dockersock + mountPath: "/var/run/docker.sock" + imagePullPolicy: Always + volumes: + - name: cert-volume + secret: + secretName: "cert-jenkins-service" + - name: ca-volume + secret: + secretName: "service-ca" + - name: dockersock + hostPath: + path: /var/run/docker.sock diff --git a/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml new file mode 100644 index 0000000000..047e4e966e --- /dev/null +++ b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml @@ -0,0 +1,12 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: datadir-jenkins-ci + annotations: + volume.beta.kubernetes.io/storage-class: gp2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 200Gi diff --git a/kube/services/jenkins2/jenkins2-deploy.yaml b/kube/services/jenkins2/jenkins2-deploy.yaml index ee838bae60..08365f811b 100644 --- a/kube/services/jenkins2/jenkins2-deploy.yaml +++ b/kube/services/jenkins2/jenkins2-deploy.yaml @@ -48,7 +48,7 @@ spec: # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b # - name: jenkins - GEN3_JENKINS_IMAGE + GEN3_JENKINS2_IMAGE ports: - containerPort: 8080 name: http diff --git a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml index 29603d27fe..a726237362 100644 --- a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml +++ b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: arborist-rm-expired-access diff --git a/kube/services/jobs/covid19-bayes-cronjob.yaml b/kube/services/jobs/covid19-bayes-cronjob.yaml index 733c17cf71..01e71badeb 100644 --- a/kube/services/jobs/covid19-bayes-cronjob.yaml +++ b/kube/services/jobs/covid19-bayes-cronjob.yaml @@ -1,5 +1,5 @@ # gen3 job run covid19-bayes-cronjob S3_BUCKET -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: covid19-bayes diff --git a/kube/services/jobs/ecr-access-job.yaml b/kube/services/jobs/ecr-access-job.yaml new file mode 100644 index 0000000000..89bb49d6d2 --- /dev/null +++ b/kube/services/jobs/ecr-access-job.yaml @@ -0,0 +1,83 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: ecr-access +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + restartPolicy: Never + serviceAccountName: ecr-access-job-sa + securityContext: + fsGroup: 1000 + containers: + - name: awshelper + GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| + imagePullPolicy: Always + resources: + limits: + cpu: 0.5 + memory: 1Gi + env: + - name: SLACK_WEBHOOK + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + optional: true + - name: HOSTNAME + valueFrom: + configMapKeyRef: + name: global + key: hostname + - name: PAY_MODELS_DYNAMODB_TABLE + valueFrom: + configMapKeyRef: + name: manifest-hatchery + key: pay-models-dynamodb-table + optional: true + - name: ECR_ACCESS_JOB_ARN + valueFrom: + configMapKeyRef: + name: manifest-global + key: ecr-access-job-role-arn + optional: true + command: ["/bin/bash"] + args: + - "-c" + - | + cd cloud-automation/files/scripts/ + echo Installing requirements... + pip3 install -r ecr-access-job-requirements.txt + python3 ecr-access-job.py + exitcode=$? + + if [[ "${SLACK_WEBHOOK}" != 'None' ]]; then + if [[ $exitcode == 1 ]]; then + curl -X POST --data-urlencode "payload={\"text\": \"JOBFAIL: ECR access job on ${HOSTNAME}\"}" "${SLACK_WEBHOOK}" + else + curl -X POST --data-urlencode "payload={\"text\": \"SUCCESS: ECR access job on ${HOSTNAME}\"}" "${SLACK_WEBHOOK}" + fi + fi + + echo "Exit code: $exitcode" + exit "$exitcode" diff --git a/kube/services/jobs/etl-cronjob.yaml b/kube/services/jobs/etl-cronjob.yaml index 463fbfb2e2..95b423debd 100644 --- a/kube/services/jobs/etl-cronjob.yaml +++ b/kube/services/jobs/etl-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: etl diff --git a/kube/services/jobs/etl-job.yaml b/kube/services/jobs/etl-job.yaml index fa201c99aa..6b9b887eca 100644 --- a/kube/services/jobs/etl-job.yaml +++ b/kube/services/jobs/etl-job.yaml @@ -2,6 +2,8 @@ apiVersion: batch/v1 kind: Job metadata: + annotations: + karpenter.sh/do-not-evict: "true" name: etl spec: backoffLimit: 0 diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml index 74d7fc9a4d..93eaf7652d 100644 --- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml +++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml @@ -1,5 +1,5 @@ --- -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: fence-cleanup-expired-ga4gh-info diff --git a/kube/services/jobs/fence-visa-update-cronjob.yaml b/kube/services/jobs/fence-visa-update-cronjob.yaml index 6c58ef291e..eba842ddf5 100644 --- a/kube/services/jobs/fence-visa-update-cronjob.yaml +++ b/kube/services/jobs/fence-visa-update-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: fence-visa-update diff --git a/kube/services/jobs/google-delete-expired-access-cronjob.yaml b/kube/services/jobs/google-delete-expired-access-cronjob.yaml index ce485cce36..2b9e4e49a6 100644 --- a/kube/services/jobs/google-delete-expired-access-cronjob.yaml +++ b/kube/services/jobs/google-delete-expired-access-cronjob.yaml @@ -1,5 +1,5 @@ --- -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: google-delete-expired-access diff --git a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml index eb102f5bf4..b40e22624d 100644 --- a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml +++ b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-delete-expired-service-account diff --git a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml index 499d6cabd1..6b4fc10aa0 100644 --- a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml +++ b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-init-proxy-groups diff --git a/kube/services/jobs/google-manage-account-access-cronjob.yaml b/kube/services/jobs/google-manage-account-access-cronjob.yaml index 4e796cea0d..fd8bba6067 100644 --- a/kube/services/jobs/google-manage-account-access-cronjob.yaml +++ b/kube/services/jobs/google-manage-account-access-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-manage-account-access diff --git a/kube/services/jobs/google-manage-keys-cronjob.yaml b/kube/services/jobs/google-manage-keys-cronjob.yaml index ea0bcc45fd..eff76d30ad 100644 --- a/kube/services/jobs/google-manage-keys-cronjob.yaml +++ b/kube/services/jobs/google-manage-keys-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-manage-keys diff --git a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml index 57981d813f..49e83374fc 100644 --- a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml +++ b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-verify-bucket-access-group diff --git a/kube/services/jobs/healthcheck-cronjob.yaml b/kube/services/jobs/healthcheck-cronjob.yaml index d79274bb7f..1ca71fc8d6 100644 --- a/kube/services/jobs/healthcheck-cronjob.yaml +++ b/kube/services/jobs/healthcheck-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: healthcheck diff --git a/kube/services/jobs/psql-db-dump-va-testing-job.yaml b/kube/services/jobs/psql-db-dump-va-testing-job.yaml new file mode 100644 index 0000000000..8a8037e166 --- /dev/null +++ b/kube/services/jobs/psql-db-dump-va-testing-job.yaml @@ -0,0 +1,80 @@ +--- +# NOTE: This job was created specifically to dump all the databases in va-testing, in preparation for a move to second cluster +# If you aren't doing that, this probably is not the job you're looking for +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-db-dump-va-testing +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + serviceAccountName: dbbackup-sa + containers: + - name: pgdump + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + command: ["/bin/bash"] + args: + - "-c" + - | + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + account_id=$(aws sts get-caller-identity --query "Account" --output text) + default_bucket_name="gen3-db-backups-${account_id}" + default_databases=("fence" "indexd" "sheepdog" "peregrine" "arborist" "argo" "atlas" "metadata" "ohdsi" "omop-data" "wts") + s3_dir="va-testing-$(date +"%Y-%m-%d-%H-%M-%S")" + databases=("${default_databases[@]}") + bucket_name=$default_bucket_name + + for database in "${databases[@]}"; do + gen3_log_info "Starting database backup for ${database}" + gen3 db backup "${database}" > "${database}.sql" + + if [ $? -eq 0 ] && [ -f "${database}.sql" ]; then + gen3_log_info "Uploading backup file ${database}.sql to s3://${bucket_name}/${s3_dir}/${database}.sql" + aws s3 cp "${database}.sql" "s3://${bucket_name}/${s3_dir}/${database}.sql" + + if [ $? -eq 0 ]; then + gen3_log_info "Successfully uploaded ${database}.sql to S3" + else + gen3_log_err "Failed to upload ${database}.sql to S3" + fi + gen3_log_info "Deleting temporary backup file ${database}.sql" + rm -f "${database}.sql" + else + gen3_log_err "Backup operation failed for ${database}" + rm -f "${database}.sql" + fi + done + sleep 600 + restartPolicy: Never diff --git a/kube/services/jobs/s3sync-cronjob.yaml b/kube/services/jobs/s3sync-cronjob.yaml index f05ab518a4..69d66ec3fa 100644 --- a/kube/services/jobs/s3sync-cronjob.yaml +++ b/kube/services/jobs/s3sync-cronjob.yaml @@ -5,7 +5,7 @@ #####REQUIRED VARIABLE######## #SOURCE_BUCKET #TARGET_BUCKET -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: s3sync diff --git a/kube/services/karpenter-reconciler/application.yaml b/kube/services/karpenter-reconciler/application.yaml new file mode 100644 index 0000000000..fb0fab8711 --- /dev/null +++ b/kube/services/karpenter-reconciler/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: karpenter-reconciler-application + namespace: argocd +spec: + destination: + namespace: kube-system + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/karpenter-reconciler + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/karpenter-reconciler/auth.yaml b/kube/services/karpenter-reconciler/auth.yaml new file mode 100644 index 0000000000..c159028ab3 --- /dev/null +++ b/kube/services/karpenter-reconciler/auth.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: karpenter-reconciler + namespace: argo-events +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: karpenter-admin-binding-reconciler +subjects: + - kind: ServiceAccount + name: karpenter-reconciler + namespace: argo-events +roleRef: + kind: ClusterRole + name: karpenter-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: workflow-viewer-reconciler +subjects: + - kind: ServiceAccount + name: karpenter-reconciler + namespace: argo-events +roleRef: + kind: ClusterRole + name: argo-argo-workflows-view + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: viewer-reconciler +subjects: + - kind: ServiceAccount + name: karpenter-reconciler + namespace: argo-events +roleRef: + kind: ClusterRole + name: system:aggregate-to-view + apiGroup: rbac.authorization.k8s.io diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml new file mode 100644 index 0000000000..aaba57b07a --- /dev/null +++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml @@ -0,0 +1,71 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: karpenter-reconciler-cronjob-va-testing + namespace: argo-events +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccount: karpenter-reconciler + volumes: + - name: karpenter-templates-volume + configMap: + name: karpenter-templates + containers: + - name: karpenter-reconciler + image: quay.io/cdis/awshelper + volumeMounts: + - name: karpenter-templates-volume + mountPath: /manifests + env: + - name: PROVISIONER_TEMPLATE + value: /manifests/provisioner.yaml + - name: AWSNODETEMPLATE_TEMPLATE + value: /manifests/nodetemplate.yaml + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + if [ -z "$PROVISIONER_TEMPLATE" ]; then + PROVISIONER_TEMPLATE="provisioner.yaml" + fi + + if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then + AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" + fi + + ENVIRONMENT=$(kubectl -n va-testing get configmap global -o jsonpath="{.data.environment}") + + WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{"\n"}') + + WORKFLOW_ARRAY=() + + while IFS= read -r line; do + WORKFLOW_ARRAY+=("$line") + done <<< "$WORKFLOWS" + + for workflow in "${WORKFLOW_ARRAY[@]}" + do + echo "Running loop for workflow: $workflow" + workflow_name=$(echo "$workflow" | awk '{print $1}') + workflow_user=$(echo "$workflow" | awk '{print $2}') + + if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then + echo "No awsnodetemplate found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - + fi + + if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then + echo "No provisioner found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + + fi + done + restartPolicy: OnFailure diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml new file mode 100644 index 0000000000..aef5d6c49f --- /dev/null +++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml @@ -0,0 +1,74 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: karpenter-reconciler-cronjob + namespace: argo-events +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccount: karpenter-reconciler + volumes: + - name: karpenter-templates-volume + configMap: + name: karpenter-templates + containers: + - name: karpenter-reconciler + image: quay.io/cdis/awshelper + volumeMounts: + - name: karpenter-templates-volume + mountPath: /manifests + env: + - name: PROVISIONER_TEMPLATE + value: /manifests/provisioner.yaml + - name: AWSNODETEMPLATE_TEMPLATE + value: /manifests/nodetemplate.yaml + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + if [ -z "$PROVISIONER_TEMPLATE" ]; then + PROVISIONER_TEMPLATE="provisioner.yaml" + fi + + if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then + AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" + fi + + ENVIRONMENT=$(kubectl -n default get configmap global -o jsonpath="{.data.environment}") + + WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{"\n"}') + + WORKFLOW_ARRAY=() + + while IFS= read -r line; do + WORKFLOW_ARRAY+=("$line") + done <<< "$WORKFLOWS" + + echo $WORKFLOWS + + for workflow in "${WORKFLOW_ARRAY[@]}" + do + workflow_name=$(echo "$workflow" | awk '{print $1}') + workflow_user=$(echo "$workflow" | awk '{print $2}') + + if [ ! -z "$workflow_name" ]; then + if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then + echo "No awsnodetemplate found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - + fi + + if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then + echo "No provisioner found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + + fi + fi + done + restartPolicy: OnFailure diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml index a3dbf64802..6ba8b3a0f7 100644 --- a/kube/services/karpenter/nodeTemplateDefault.yaml +++ b/kube/services/karpenter/nodeTemplateDefault.yaml @@ -38,7 +38,14 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" + + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + --BOUNDARY + Content-Type: text/cloud-config; charset="us-ascii" power_state: diff --git a/kube/services/karpenter/nodeTemplateGPU.yaml b/kube/services/karpenter/nodeTemplateGPU.yaml index 5270b697f3..925e7a9a08 100644 --- a/kube/services/karpenter/nodeTemplateGPU.yaml +++ b/kube/services/karpenter/nodeTemplateGPU.yaml @@ -38,6 +38,12 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" + + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml index 74f24926ac..1c8970ad64 100644 --- a/kube/services/karpenter/nodeTemplateJupyter.yaml +++ b/kube/services/karpenter/nodeTemplateJupyter.yaml @@ -38,6 +38,12 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" + + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml index ec2b81a60c..6e47b22f97 100644 --- a/kube/services/karpenter/nodeTemplateWorkflow.yaml +++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml @@ -38,6 +38,12 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" + + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml b/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml index 7ad51cacad..93c2de3c33 100644 --- a/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml +++ b/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml @@ -3,7 +3,6 @@ kind: NetworkPolicy metadata: name: netpolicy-sowerjob spec: - spec: podSelector: matchLabels: app: sowerjob diff --git a/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml b/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml index 7b1f85c291..bd6e03f051 100644 --- a/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml +++ b/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml @@ -3,7 +3,6 @@ kind: NetworkPolicy metadata: name: netpolicy-ssjdispatcherjob spec: - spec: podSelector: matchLabels: app: ssjdispatcherjob diff --git a/kube/services/node-monitors/fenceshib-jenkins-test.yaml b/kube/services/node-monitors/fenceshib-jenkins-test.yaml new file mode 100644 index 0000000000..e9e27af983 --- /dev/null +++ b/kube/services/node-monitors/fenceshib-jenkins-test.yaml @@ -0,0 +1,40 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fenceshib-service-check + namespace: default +spec: + schedule: "0 */4 * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: node-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + + fenceshib=$(kubectl get services -A | grep "fenceshib-service" | awk '{print $2}') + + # Check if there are any fenceshib services + if [[ ! -z "$fenceshib" ]]; then + echo "Alert: Service fenceshib-service found with output: $fenceshib" + curl -X POST -H 'Content-type: application/json' --data "{\"text\": \"WARNING: Fenceshib service discovered in qaplanetv1 cluster. This could cause issues with future CI runs. Please delete this service if it is not needed. Run the following in qaplanetv1 to see which namespace it is in: \`kubectl get services -A | grep "fenceshib-service"\`\"}" $SLACK_WEBHOOK_URL + else + echo "Fenceshib Service Not Found" + fi + restartPolicy: OnFailure diff --git a/kube/services/node-monitors/node-not-ready.yaml b/kube/services/node-monitors/node-not-ready.yaml index 6626b55075..500832fc34 100644 --- a/kube/services/node-monitors/node-not-ready.yaml +++ b/kube/services/node-monitors/node-not-ready.yaml @@ -22,6 +22,11 @@ spec: configMapKeyRef: name: global key: slack_webhook + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: global + key: environment command: ["/bin/bash"] args: @@ -36,7 +41,7 @@ spec: echo "Nodes reporting 'NodeStatusNeverUpdated', sending an alert:" echo "$NODES" # Send alert to Slack - curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODES}\` is stuck in "NotReady"!\"}" $SLACK_WEBHOOK_URL + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODES}\` is stuck in "NotReady" in \`${ENVIRONMENT}\`! \"}" $SLACK_WEBHOOK_URL else echo "No nodes reporting 'NodeStatusNeverUpdated'" fi diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 258aa8f875..a729ae7c41 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -59,13 +59,6 @@ spec: containers: - name: ohdsi-webapi GEN3_OHDSI-WEBAPI_IMAGE|-image: quay.io/cdis/ohdsi-webapi:latest-| - env: - - name: ARBORIST_URL - valueFrom: - configMapKeyRef: - name: manifest-global - key: arborist_url - optional: true livenessProbe: httpGet: path: /WebAPI/info/ @@ -90,7 +83,7 @@ spec: limits: memory: 4Gi - name: ohdsi-webapi-reverse-proxy - image: nginx:1.23 + image: 707767160287.dkr.ecr.us-east-1.amazonaws.com/gen3/nginx:1.23 ports: - containerPort: 80 volumeMounts: @@ -104,4 +97,4 @@ spec: cpu: 100m memory: 100Mi limits: - memory: 500Mi \ No newline at end of file + memory: 500Mi diff --git a/kube/services/ohif-viewer/app-config.js b/kube/services/ohif-viewer/app-config.js new file mode 100644 index 0000000000..6768726f4c --- /dev/null +++ b/kube/services/ohif-viewer/app-config.js @@ -0,0 +1,209 @@ +window.config = { + routerBasename: '/ohif-viewer/', + // whiteLabeling: {}, + extensions: [], + modes: [], + customizationService: { + // Shows a custom route -access via http://localhost:3000/custom + // helloPage: '@ohif/extension-default.customizationModule.helloPage', + }, + showStudyList: true, + // some windows systems have issues with more than 3 web workers + maxNumberOfWebWorkers: 3, + // below flag is for performance reasons, but it might not work for all servers + omitQuotationForMultipartRequest: true, + showWarningMessageForCrossOrigin: true, + showCPUFallbackMessage: true, + showLoadingIndicator: true, + strictZSpacingForVolumeViewport: true, + maxNumRequests: { + interaction: 100, + thumbnail: 75, + // Prefetch number is dependent on the http protocol. For http 2 or + // above, the number of requests can be go a lot higher. + prefetch: 25, + }, + // filterQueryParam: false, + defaultDataSourceName: 'dicomweb', + /* Dynamic config allows user to pass "configUrl" query string this allows to load config without recompiling application. The regex will ensure valid configuration source */ + // dangerouslyUseDynamicConfig: { + // enabled: true, + // // regex will ensure valid configuration source and default is /.*/ which matches any character. To use this, setup your own regex to choose a specific source of configuration only. + // // Example 1, to allow numbers and letters in an absolute or sub-path only. + // // regex: /(0-9A-Za-z.]+)(\/[0-9A-Za-z.]+)*/ + // // Example 2, to restricts to either hosptial.com or othersite.com. + // // regex: /(https:\/\/hospital.com(\/[0-9A-Za-z.]+)*)|(https:\/\/othersite.com(\/[0-9A-Za-z.]+)*)/ + // regex: /.*/, + // }, + dataSources: [ + { + friendlyName: 'dcmjs DICOMWeb Server', + namespace: '@ohif/extension-default.dataSourcesModule.dicomweb', + sourceName: 'dicomweb', + configuration: { + name: 'dicomweb', + wadoUriRoot: '$DICOM_SERVER_URL/wado', + qidoRoot: '$DICOM_SERVER_URL/dicom-web', + wadoRoot: '$DICOM_SERVER_URL/dicom-web', + + qidoSupportsIncludeField: false, + supportsReject: false, + imageRendering: 'wadors', + thumbnailRendering: 'wadors', + enableStudyLazyLoad: true, + supportsFuzzyMatching: false, + supportsWildcard: true, + staticWado: true, + singlepart: 'bulkdata,video', + // whether the data source should use retrieveBulkData to grab metadata, + // and in case of relative path, what would it be relative to, options + // are in the series level or study level (some servers like series some study) + bulkDataURI: { + enabled: true, + relativeResolution: 'studies', + }, + }, + }, + { + friendlyName: 'dicomweb delegating proxy', + namespace: '@ohif/extension-default.dataSourcesModule.dicomwebproxy', + sourceName: 'dicomwebproxy', + configuration: { + name: 'dicomwebproxy', + }, + }, + { + friendlyName: 'dicom json', + namespace: '@ohif/extension-default.dataSourcesModule.dicomjson', + sourceName: 'dicomjson', + configuration: { + name: 'json', + }, + }, + { + friendlyName: 'dicom local', + namespace: '@ohif/extension-default.dataSourcesModule.dicomlocal', + sourceName: 'dicomlocal', + configuration: {}, + }, + ], + httpErrorHandler: error => { + // This is 429 when rejected from the public idc sandbox too often. + console.warn(error.status); + + // Could use services manager here to bring up a dialog/modal if needed. + console.warn('test, navigate to https://ohif.org/'); + }, + // whiteLabeling: { + // /* Optional: Should return a React component to be rendered in the "Logo" section of the application's Top Navigation bar */ + // createLogoComponentFn: function (React) { + // return React.createElement( + // 'a', + // { + // target: '_self', + // rel: 'noopener noreferrer', + // className: 'text-purple-600 line-through', + // href: '/', + // }, + // React.createElement('img', + // { + // src: './assets/customLogo.svg', + // className: 'w-8 h-8', + // } + // )) + // }, + // }, + hotkeys: [ + { + commandName: 'incrementActiveViewport', + label: 'Next Viewport', + keys: ['right'], + }, + { + commandName: 'decrementActiveViewport', + label: 'Previous Viewport', + keys: ['left'], + }, + { commandName: 'rotateViewportCW', label: 'Rotate Right', keys: ['r'] }, + { commandName: 'rotateViewportCCW', label: 'Rotate Left', keys: ['l'] }, + { commandName: 'invertViewport', label: 'Invert', keys: ['i'] }, + { + commandName: 'flipViewportHorizontal', + label: 'Flip Horizontally', + keys: ['h'], + }, + { + commandName: 'flipViewportVertical', + label: 'Flip Vertically', + keys: ['v'], + }, + { commandName: 'scaleUpViewport', label: 'Zoom In', keys: ['+'] }, + { commandName: 'scaleDownViewport', label: 'Zoom Out', keys: ['-'] }, + { commandName: 'fitViewportToWindow', label: 'Zoom to Fit', keys: ['='] }, + { commandName: 'resetViewport', label: 'Reset', keys: ['space'] }, + { commandName: 'nextImage', label: 'Next Image', keys: ['down'] }, + { commandName: 'previousImage', label: 'Previous Image', keys: ['up'] }, + // { + // commandName: 'previousViewportDisplaySet', + // label: 'Previous Series', + // keys: ['pagedown'], + // }, + // { + // commandName: 'nextViewportDisplaySet', + // label: 'Next Series', + // keys: ['pageup'], + // }, + { + commandName: 'setToolActive', + commandOptions: { toolName: 'Zoom' }, + label: 'Zoom', + keys: ['z'], + }, + // ~ Window level presets + { + commandName: 'windowLevelPreset1', + label: 'W/L Preset 1', + keys: ['1'], + }, + { + commandName: 'windowLevelPreset2', + label: 'W/L Preset 2', + keys: ['2'], + }, + { + commandName: 'windowLevelPreset3', + label: 'W/L Preset 3', + keys: ['3'], + }, + { + commandName: 'windowLevelPreset4', + label: 'W/L Preset 4', + keys: ['4'], + }, + { + commandName: 'windowLevelPreset5', + label: 'W/L Preset 5', + keys: ['5'], + }, + { + commandName: 'windowLevelPreset6', + label: 'W/L Preset 6', + keys: ['6'], + }, + { + commandName: 'windowLevelPreset7', + label: 'W/L Preset 7', + keys: ['7'], + }, + { + commandName: 'windowLevelPreset8', + label: 'W/L Preset 8', + keys: ['8'], + }, + { + commandName: 'windowLevelPreset9', + label: 'W/L Preset 9', + keys: ['9'], + }, + ], +}; diff --git a/kube/services/ohif-viewer/ohif-viewer-deploy.yaml b/kube/services/ohif-viewer/ohif-viewer-deploy.yaml index fc45434cad..e2df93cd06 100644 --- a/kube/services/ohif-viewer/ohif-viewer-deploy.yaml +++ b/kube/services/ohif-viewer/ohif-viewer-deploy.yaml @@ -86,7 +86,7 @@ spec: periodSeconds: 60 timeoutSeconds: 30 ports: - - containerPort: 80 + - containerPort: 8080 volumeMounts: - name: config-volume-g3auto readOnly: true diff --git a/kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf b/kube/services/revproxy/gen3.nginx.conf/argo-server.conf similarity index 86% rename from kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf rename to kube/services/revproxy/gen3.nginx.conf/argo-server.conf index cb8def3aa5..1cdd4608c6 100644 --- a/kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf +++ b/kube/services/revproxy/gen3.nginx.conf/argo-server.conf @@ -7,7 +7,7 @@ auth_request /gen3-authz; set $proxy_service "argo"; - set $upstream http://argo-argo-workflows-server.argo.svc.cluster.local:2746; + set $upstream SERVICE_URL; rewrite ^/argo/(.*) /$1 break; diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf new file mode 100644 index 0000000000..42e9a3758b --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf @@ -0,0 +1,12 @@ + location /ai { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + set $proxy_service "gen3-discovery-ai-service"; + set $upstream http://gen3-discovery-ai-service$des_domain; + rewrite ^/ai/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/ai/; + client_max_body_size 0; + } diff --git a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf index db2de58861..e6d66ec12e 100644 --- a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf @@ -1,4 +1,8 @@ location /guppy/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check, make sure data-portal version >= 2023.12 or >= 5.19.0"; + } + proxy_connect_timeout 600s; proxy_send_timeout 600s; proxy_read_timeout 600s; diff --git a/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf b/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf index 9a20bc832f..22926bcf05 100644 --- a/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf @@ -3,11 +3,12 @@ location /ohif-viewer/ { # return 403 "failed csrf check"; # } - set $authz_resource "/services/ohif-viewer"; - set $authz_method "read"; - set $authz_service "ohif-viewer"; + # see if this can be fixed in the future for anonymous access + # set $authz_resource "/services/ohif-viewer"; + # set $authz_method "read"; + # set $authz_service "ohif-viewer"; - auth_request /gen3-authz; + # auth_request /gen3-authz; set $proxy_service "ohif-viewer"; set $upstream http://ohif-viewer-service.$namespace.svc.cluster.local; diff --git a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf index 2eb77b1790..ed736189c4 100644 --- a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf @@ -7,11 +7,6 @@ location /orthanc/ { set $authz_method "create"; set $authz_service "orthanc"; - if ($request_uri ~ "^/orthanc/dicom-web/studies/") { - set $authz_method "read"; - set $authz_resource "/services/orthanc/studies"; - } - auth_request /gen3-authz; proxy_set_header Authorization "Basic cHVibGljOmhlbGxv"; @@ -24,3 +19,23 @@ location /orthanc/ { # no limit to payload size so we can upload large DICOM files client_max_body_size 0; } + +location /orthanc/dicom-web/studies/ { + set $authz_method "read"; + set $authz_resource "/services/orthanc/studies"; + set $authz_service "orthanc"; + + auth_request /gen3-authz; + if ($request_method = POST) { + return 403; + } + proxy_set_header Authorization "Basic cHVibGljOmhlbGxv"; + + set $proxy_service "orthanc"; + set $upstream http://orthanc-service.$namespace.svc.cluster.local; + rewrite ^/orthanc/(.*) /$1 break; + proxy_pass $upstream; + + # no limit to payload size so we can upload large DICOM files + client_max_body_size 0; +} diff --git a/kube/services/revproxy/nginx.conf b/kube/services/revproxy/nginx.conf index 2e3a3b1512..d0e14f49b6 100644 --- a/kube/services/revproxy/nginx.conf +++ b/kube/services/revproxy/nginx.conf @@ -236,7 +236,7 @@ server { # This overrides the individual services # set $allow_origin "*"; - if ($http_origin) { + if ($http_origin = "https://$host") { set $allow_origin "$http_origin"; } diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index 9d5caab1b2..9f10ce90bd 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -21,6 +21,7 @@ spec: app: revproxy # allow access from workspaces userhelper: "yes" + internet: "yes" GEN3_DATE_LABEL spec: affinity: diff --git a/kube/services/workflow-age-monitor/application.yaml b/kube/services/workflow-age-monitor/application.yaml new file mode 100644 index 0000000000..99798bb2b8 --- /dev/null +++ b/kube/services/workflow-age-monitor/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: argo-workflow-age-monitor-application + namespace: argocd +spec: + destination: + namespace: default + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/workflow-age-monitor/ + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/workflow-age-monitor/argo-workflow-age.yaml b/kube/services/workflow-age-monitor/argo-workflow-age.yaml new file mode 100644 index 0000000000..0d0c29115b --- /dev/null +++ b/kube/services/workflow-age-monitor/argo-workflow-age.yaml @@ -0,0 +1,55 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: argo-workflow-age + namespace: default +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: argo-workflow-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + # This is 3 * 3600, or 3 hours + - name: THRESHOLD_TIME + value: "10800" + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + # Get all workflows with specific label and check their age + kubectl get workflows --all-namespaces -o json | jq -c '.items[] | {name: .metadata.name, creationTimestamp: .metadata.creationTimestamp}' | while read workflow_info; do + WORKFLOW_NAME=$(echo $workflow_info | jq -r '.name') + CREATION_TIMESTAMP=$(echo $workflow_info | jq -r '.creationTimestamp') + + # Convert creation timestamp to Unix Epoch time + CREATION_EPOCH=$(date -d "$CREATION_TIMESTAMP" +%s) + + # Get current Unix Epoch time + CURRENT_EPOCH=$(date +%s) + + # Calculate workflow age in seconds + WORKFLOW_AGE=$(($CURRENT_EPOCH - $CREATION_EPOCH)) + + # Check if workflow age is greater than threshold + if [ "$WORKFLOW_AGE" -gt "$THRESHOLD_TIME" ]; then + echo "Workflow $WORKFLOW_NAME has been running for over $THRESHOLD_TIME seconds, sending an alert" + # Send alert to Slack + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Workflow \`${WORKFLOW_NAME}\` has been running longer than $THRESHOLD_TIME seconds\"}" $SLACK_WEBHOOK_URL + fi + done + restartPolicy: OnFailure diff --git a/kube/services/workflow-age-monitor/auth.yaml b/kube/services/workflow-age-monitor/auth.yaml new file mode 100644 index 0000000000..fb7970a3ea --- /dev/null +++ b/kube/services/workflow-age-monitor/auth.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-workflow-monitor + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-workflow-monitor-binding +subjects: + - kind: ServiceAccount + name: argo-workflow-monitor + namespace: default +roleRef: + kind: ClusterRole + name: argo-argo-workflows-view + apiGroup: rbac.authorization.k8s.io diff --git a/packer/configs/web_wildcard_whitelist b/packer/configs/web_wildcard_whitelist index c58eeefe80..621dec3d58 100644 --- a/packer/configs/web_wildcard_whitelist +++ b/packer/configs/web_wildcard_whitelist @@ -44,4 +44,5 @@ .yahooapis.com .cloudfront.net .docker.io +.blob.core.windows.net .googleapis.com