diff --git a/.github/workflows/image_build_push_squid.yaml b/.github/workflows/image_build_push_squid.yaml index ce1761d3c2..0645fb8b23 100644 --- a/.github/workflows/image_build_push_squid.yaml +++ b/.github/workflows/image_build_push_squid.yaml @@ -1,6 +1,7 @@ name: Build Squid images on: + workflow_dispatch: push: paths: - .github/workflows/image_build_push_squid.yaml diff --git a/doc/README.md b/doc/README.md index c3c6602b3f..4fc8939358 100644 --- a/doc/README.md +++ b/doc/README.md @@ -85,7 +85,7 @@ For example - `gen3 help aws` opens `aws.md` * [utility vm](../tf_files/aws/modules/utility-vm/README.md) * [explorer infrastructure](https://github.com/uc-cdis/cdis-wiki/blob/master/dev/gen3/data_explorer/README.md) * [automation for gcp](../tf_files/gcp/commons/README.md) -* [gcp bucket access flows for DCF](https://github.com/uc-cdis/fence/blob/master/docs/google_architecture.md) +* [gcp bucket access flows for DCF](https://github.com/uc-cdis/fence/blob/master/docs/additional_documentation/google_architecture.md) * [authn and authz with fence](https://github.com/uc-cdis/fence/blob/master/README.md) * [jenkins](../kube/services/jenkins/README.md) * [jupyterhub configuration](../kube/services/jupyterhub/README.md) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 0507652f39..42095986a9 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -33,6 +33,7 @@ centos.mirrors.hoobly.com centos.mirrors.tds.net centos.mirrors.wvstateu.edu cernvm.cern.ch +charts.authelia.com charts.helm.sh cloud.r-project.org coredns.github.io @@ -124,6 +125,7 @@ neuro.debian.net neurodeb.pirsquared.org nginx.org nvidia.github.io +ohsu-comp-bio.github.io opportunityinsights.org orcid.org pgp.mit.edu diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 1717b44432..7c8d7f2333 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -59,6 +59,7 @@ .immport.org .jenkins.io .jenkins-ci.org +.jetstack.io .k8s.io .kegg.jp .kidsfirstdrc.org diff --git a/gen3/bin/kube-setup-aws-es-proxy.sh b/gen3/bin/kube-setup-aws-es-proxy.sh index 1ce80fd8e4..5a1f5ac0e7 100644 --- a/gen3/bin/kube-setup-aws-es-proxy.sh +++ b/gen3/bin/kube-setup-aws-es-proxy.sh @@ -21,7 +21,7 @@ if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${esDomain}" --query "DomainStatusList[*].Endpoints" --output text)" \ && [[ -n "${ES_ENDPOINT}" && -n "${esDomain}" ]]; then gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" - g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." else @@ -33,8 +33,8 @@ if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then gen3 kube-setup-networkpolicy service aws-es-proxy g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true fi - elif [ "$es7" = true ]; then - if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" \ + elif [ "$es7" = false ]; then + if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" @@ -50,9 +50,10 @@ if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true fi else - if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \ + if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" \ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." else diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh index b75470f733..df5731cf11 100644 --- a/gen3/bin/kube-setup-ingress.sh +++ b/gen3/bin/kube-setup-ingress.sh @@ -15,23 +15,6 @@ ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.na scriptDir="${GEN3_HOME}/kube/services/ingress" gen3_ingress_setup_waf() { - gen3_log_info "Starting GPE-312 waf setup" - #variable to see if WAF already exists - export waf=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains(env.vpc_name)).Name'` -if [[ -z $waf ]]; then - gen3_log_info "Creating Web ACL. This may take a few minutes." - aws wafv2 create-web-acl\ - --name $vpc_name-waf \ - --scope REGIONAL \ - --default-action Allow={} \ - --visibility-config SampledRequestsEnabled=true,CloudWatchMetricsEnabled=true,MetricName=GPE-312WebAclMetrics \ - --rules file://${GEN3_HOME}/gen3/bin/waf-rules-GPE-312.json \ - --region us-east-1 - #Need to sleep to avoid "WAFUnavailableEntityException" error since the waf takes a bit to spin up - sleep 300 -else - gen3_log_info "WAF already exists. Skipping..." -fi gen3_log_info "Attaching ACL to ALB." export acl_arn=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains(env.vpc_name)).ARN'` export alb_name=`kubectl get ingress gen3-ingress | awk '{print $4}' | tail +2 | sed 's/^\([A-Za-z0-9]*-[A-Za-z0-9]*-[A-Za-z0-9]*\).*/\1/;q'` diff --git a/gen3/bin/waf-rules-GPE-312.json b/gen3/bin/waf-rules-GPE-312.json deleted file mode 100644 index b8cdccabe8..0000000000 --- a/gen3/bin/waf-rules-GPE-312.json +++ /dev/null @@ -1,153 +0,0 @@ -[ - { - "Name": "AWS-AWSManagedRulesAdminProtectionRuleSet", - "Priority": 0, - "Statement": { - "ManagedRuleGroupStatement": { - "VendorName": "AWS", - "Name": "AWSManagedRulesAdminProtectionRuleSet", - "RuleActionOverrides": [ - { - "Name": "AdminProtection_URIPATH", - "ActionToUse": { - "Challenge": {} - } - } - ] - } - }, - "OverrideAction": { - "None": {} - }, - "VisibilityConfig": { - "SampledRequestsEnabled": true, - "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesAdminProtectionRuleSet" - } - }, - { - "Name": "AWS-AWSManagedRulesAmazonIpReputationList", - "Priority": 1, - "Statement": { - "ManagedRuleGroupStatement": { - "VendorName": "AWS", - "Name": "AWSManagedRulesAmazonIpReputationList", - "RuleActionOverrides": [ - { - "Name": "AWSManagedReconnaissanceList", - "ActionToUse": { - "Count": {} - } - } - ] - } - }, - "OverrideAction": { - "None": {} - }, - "VisibilityConfig": { - "SampledRequestsEnabled": true, - "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesAmazonIpReputationList" - } - }, - { - "Name": "AWS-AWSManagedRulesCommonRuleSet", - "Priority": 2, - "Statement": { - "ManagedRuleGroupStatement": { - "VendorName": "AWS", - "Name": "AWSManagedRulesCommonRuleSet", - "Version": "Version_1.4", - "RuleActionOverrides": [ - { - "Name": "EC2MetaDataSSRF_BODY", - "ActionToUse": { - "Count": {} - } - }, - { - "Name": "GenericLFI_BODY", - "ActionToUse": { - "Allow": {} - } - }, - { - "Name": "SizeRestrictions_QUERYSTRING", - "ActionToUse": { - "Count": {} - } - }, - { - "Name": "SizeRestrictions_BODY", - "ActionToUse": { - "Allow": {} - } - }, - { - "Name": "CrossSiteScripting_BODY", - "ActionToUse": { - "Count": {} - } - }, - { - "Name": "SizeRestrictions_URIPATH", - "ActionToUse": { - "Allow": {} - } - }, - { - "Name": "SizeRestrictions_Cookie_HEADER", - "ActionToUse": { - "Allow": {} - } - } - ] - } - }, - "OverrideAction": { - "None": {} - }, - "VisibilityConfig": { - "SampledRequestsEnabled": true, - "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesCommonRuleSet" - } - }, - { - "Name": "AWS-AWSManagedRulesKnownBadInputsRuleSet", - "Priority": 3, - "Statement": { - "ManagedRuleGroupStatement": { - "VendorName": "AWS", - "Name": "AWSManagedRulesKnownBadInputsRuleSet" - } - }, - "OverrideAction": { - "None": {} - }, - "VisibilityConfig": { - "SampledRequestsEnabled": true, - "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesKnownBadInputsRuleSet" - } - }, - { - "Name": "AWS-AWSManagedRulesLinuxRuleSet", - "Priority": 4, - "Statement": { - "ManagedRuleGroupStatement": { - "VendorName": "AWS", - "Name": "AWSManagedRulesLinuxRuleSet" - } - }, - "OverrideAction": { - "None": {} - }, - "VisibilityConfig": { - "SampledRequestsEnabled": true, - "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesLinuxRuleSet" - } - } -] \ No newline at end of file diff --git a/kube/services/audit-service/audit-service-deploy.yaml b/kube/services/audit-service/audit-service-deploy.yaml index b7081a7f51..b0cf5c6615 100644 --- a/kube/services/audit-service/audit-service-deploy.yaml +++ b/kube/services/audit-service/audit-service-deploy.yaml @@ -94,6 +94,11 @@ spec: readOnly: true mountPath: "/src/audit-service-config.yaml" subPath: "audit-service-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: "config-volume" + readOnly: true + mountPath: "/audit/audit-service-config.yaml" + subPath: "audit-service-config.yaml" resources: requests: cpu: 100m @@ -109,6 +114,11 @@ spec: readOnly: true mountPath: "/src/audit-service-config.yaml" subPath: "audit-service-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: "config-volume" + readOnly: true + mountPath: "/audit/audit-service-config.yaml" + subPath: "audit-service-config.yaml" resources: limits: cpu: 0.8 @@ -117,4 +127,5 @@ spec: args: - "-c" - | - /env/bin/alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || /env/bin/alembic upgrade head diff --git a/kube/services/jobs/indexd-userdb-job.yaml b/kube/services/jobs/indexd-userdb-job.yaml index 676307481b..228a1989df 100644 --- a/kube/services/jobs/indexd-userdb-job.yaml +++ b/kube/services/jobs/indexd-userdb-job.yaml @@ -74,7 +74,7 @@ spec: # Script always succeeds if it runs (echo exits with 0) # indexd image does not include jq, so use python - | - eval $(python 2> /dev/null < /dev/null || poetry run python 2> /dev/null) < /dev/null || poetry run python /indexd/bin/index_admin.py create --username "$user" --password "${user_db[$user]}") done echo "Exit code: $?" restartPolicy: Never diff --git a/kube/services/jobs/psql-db-copy-aurora-job.yaml b/kube/services/jobs/psql-db-copy-aurora-job.yaml index 8fd6e899aa..a29274146e 100644 --- a/kube/services/jobs/psql-db-copy-aurora-job.yaml +++ b/kube/services/jobs/psql-db-copy-aurora-job.yaml @@ -168,11 +168,11 @@ spec: gen3_log_info "Source DB: $source_db_database, Username: $db_username, Current DB: $db_database, Target DB: $target_db" # DB commands - gen3 psql aurora -c "GRANT $db_username TO $aurora_master_username" + gen3 psql aurora -c "GRANT \"$db_username\" TO \"$aurora_master_username\"" gen3 psql aurora -c "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$source_db_database' AND pid <> pg_backend_pid()" - gen3 psql aurora -c "CREATE DATABASE $target_db WITH TEMPLATE $source_db_database OWNER $db_username" - pg_command="DO \$\$ DECLARE tbl record; BEGIN FOR tbl IN (SELECT table_schema || '.' || table_name AS full_table_name FROM information_schema.tables WHERE table_schema = 'public') LOOP EXECUTE 'ALTER TABLE ' || tbl.full_table_name || ' OWNER TO $db_username;'; END LOOP; END \$\$;" - PGPASSWORD=${aurora_master_password} psql -h $aurora_host_name -U $aurora_master_username -d "$target_db" -c "$pg_command" + gen3 psql aurora -c "CREATE DATABASE \"$target_db\" WITH TEMPLATE \"$source_db_database\" OWNER \"$db_username\"" + pg_command="DO \$\$ DECLARE tbl record; BEGIN FOR tbl IN (SELECT table_schema || '.' || table_name AS full_table_name FROM information_schema.tables WHERE table_schema = 'public') LOOP EXECUTE 'ALTER TABLE ' || tbl.full_table_name || ' OWNER TO \"$db_username\";'; END LOOP; END \$\$;" + PGPASSWORD=${aurora_master_password} psql -h $aurora_host_name -U "$aurora_master_username" -d "$target_db" -c "$pg_command" if [ $? -eq 0 ]; then gen3_log_info "Successfully processed $database" new_databases+=("$target_db") diff --git a/kube/services/metadata/metadata-deploy.yaml b/kube/services/metadata/metadata-deploy.yaml index 68a83078e6..71ab7b484f 100644 --- a/kube/services/metadata/metadata-deploy.yaml +++ b/kube/services/metadata/metadata-deploy.yaml @@ -117,6 +117,11 @@ spec: readOnly: true mountPath: /src/.env subPath: metadata.env + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: config-volume-g3auto + readOnly: true + mountPath: /mds/.env + subPath: metadata.env - name: config-volume readOnly: true mountPath: /aggregate_config.json @@ -140,6 +145,11 @@ spec: readOnly: true mountPath: /src/.env subPath: metadata.env + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: config-volume-g3auto + readOnly: true + mountPath: /mds/.env + subPath: metadata.env resources: limits: cpu: 0.8 @@ -148,4 +158,6 @@ spec: args: - "-c" - | - /env/bin/alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || /env/bin/alembic upgrade head + diff --git a/kube/services/node-affinity-daemonset/README.md b/kube/services/node-affinity-daemonset/README.md new file mode 100644 index 0000000000..3de3bae061 --- /dev/null +++ b/kube/services/node-affinity-daemonset/README.md @@ -0,0 +1,5 @@ +# Prerequisites + +This service needs certmanager to work. Please install certmanager before deploying this service. Once certmanager is installed, you can deploy this service by applying the manifests in this directory. + +Code lives in https://github.com/uc-cdis/node-affinity-webhook/ diff --git a/kube/services/node-affinity-daemonset/deployment.yaml b/kube/services/node-affinity-daemonset/deployment.yaml new file mode 100644 index 0000000000..027dd690cb --- /dev/null +++ b/kube/services/node-affinity-daemonset/deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: node-affinity-daemonset + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: node-affinity-daemonset + template: + metadata: + labels: + app: node-affinity-daemonset + spec: + containers: + - name: node-affinity-daemonset + image: quay.io/cdis/node-affinity-daemonset:master + ports: + - containerPort: 8443 + volumeMounts: + - name: webhook-certs + mountPath: /etc/webhook/certs + readOnly: true + volumes: + - name: webhook-certs + secret: + secretName: webhook-certs #pragma: allowlist secret diff --git a/kube/services/node-affinity-daemonset/service.yaml b/kube/services/node-affinity-daemonset/service.yaml new file mode 100644 index 0000000000..022ca443f1 --- /dev/null +++ b/kube/services/node-affinity-daemonset/service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: node-affinity-daemonset + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + app: node-affinity-daemonset diff --git a/kube/services/node-affinity-daemonset/webhook.yaml b/kube/services/node-affinity-daemonset/webhook.yaml new file mode 100644 index 0000000000..b1c92dbb9a --- /dev/null +++ b/kube/services/node-affinity-daemonset/webhook.yaml @@ -0,0 +1,43 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: node-affinity-daemonset + cert-manager.io/inject-ca-from: kube-system/node-affinity-daemonset-cert +webhooks: + - name: node-affinity-daemonset.k8s.io + clientConfig: + service: + name: node-affinity-daemonset + namespace: kube-system + path: "/mutate" + rules: + - operations: ["CREATE"] + apiGroups: ["apps"] + apiVersions: ["v1"] + resources: ["daemonsets"] + admissionReviewVersions: ["v1"] + sideEffects: None + +--- + +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: node-affinity-daemonset-cert + namespace: kube-system +spec: + secretName: webhook-certs #pragma: allowlist secret + dnsNames: + - node-affinity-daemonset.kube-system.svc + issuerRef: + name: selfsigned + +--- + +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned + namespace: kube-system +spec: + selfSigned: {} diff --git a/kube/services/requestor/requestor-deploy.yaml b/kube/services/requestor/requestor-deploy.yaml index 2ed8866384..954cb847c4 100644 --- a/kube/services/requestor/requestor-deploy.yaml +++ b/kube/services/requestor/requestor-deploy.yaml @@ -91,6 +91,11 @@ spec: readOnly: true mountPath: "/src/requestor-config.yaml" subPath: "requestor-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibilit + - name: "config-volume" + readOnly: true + mountPath: "/requestor/requestor-config.yaml" + subPath: "requestor-config.yaml" resources: requests: cpu: 100m @@ -106,6 +111,11 @@ spec: readOnly: true mountPath: "/src/requestor-config.yaml" subPath: "requestor-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibilit + - name: "config-volume" + readOnly: true + mountPath: "/requestor/requestor-config.yaml" + subPath: "requestor-config.yaml" resources: limits: cpu: 0.8 @@ -114,4 +124,5 @@ spec: args: - "-c" - | - /env/bin/alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || /env/bin/alembic upgrade head diff --git a/kube/services/wts/wts-deploy.yaml b/kube/services/wts/wts-deploy.yaml index 06f43fe01c..c6c4ffe74e 100644 --- a/kube/services/wts/wts-deploy.yaml +++ b/kube/services/wts/wts-deploy.yaml @@ -163,10 +163,11 @@ spec: args: - "-c" - | - if hash alembic 2>/dev/null; then + if hash alembic 2>/dev/null || poetry run alembic --version >/dev/null 2>&1; then echo "Running DB migration" cd /wts - alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || alembic upgrade head else # WTS < 0.3.0 does not have the DB migration setup echo "Alembic not installed - not running DB migration"