diff --git a/.eth/charon/.empty b/.eth/charon/.empty new file mode 100755 index 00000000..e69de29b diff --git a/.eth/lido-ejector/.empty b/.eth/lido-ejector/.empty new file mode 100755 index 00000000..e69de29b diff --git a/.gitignore b/.gitignore index c418707a..eac3f087 100644 --- a/.gitignore +++ b/.gitignore @@ -4,10 +4,13 @@ ext-network.yml ext-network.yml.original ext-network.yml.bak .eth/* +.eth_backup* !.eth/README.md !.eth/validator_keys/.empty !.eth/exit_messages/.empty !.eth/dkg_output/.empty +!.eth/charon/.empty +!.eth/lido-ejector/.empty !.eth/ethdo/README.md !.eth/ethdo/create-withdrawal-change.sh *.swp @@ -23,4 +26,5 @@ ssv-config/config.yaml ssv-config/config.yaml.original ssv-config/dkg-config.yaml ssv-config/dkg-config.yaml.original +commit-boost/cb-config.toml .nada diff --git a/README.md b/README.md index f51d5c79..21e41802 100644 --- a/README.md +++ b/README.md @@ -34,4 +34,4 @@ Eth Docker uses a "semver-ish" scheme. large. - Second through fourth digit, [semver](https://semver.org/). -This is Eth Docker v2.12.0.0 +This is Eth Docker v2.12.3.0 diff --git a/besu.yml b/besu.yml index a7cd8956..740685e6 100644 --- a/besu.yml +++ b/besu.yml @@ -27,8 +27,10 @@ services: - EL_EXTRAS=${EL_EXTRAS:-} - ARCHIVE_NODE=${ARCHIVE_NODE:-} - NETWORK=${NETWORK} + - IPV6=${IPV6:-false} volumes: - - besu-eth1-data:/var/lib/besu + - besu-el-data:/var/lib/besu + - besu-eth1-data:/var/lib/besu-og - /etc/localtime:/etc/localtime:ro - jwtsecret:/var/lib/besu/ee-secret ports: @@ -43,8 +45,6 @@ services: entrypoint: - docker-entrypoint.sh - /opt/besu/bin/besu - - --data-path - - /var/lib/besu - --p2p-port - ${EL_P2P_PORT:-30303} - --rpc-http-enabled @@ -92,6 +92,7 @@ services: command: /bin/sh volumes: + besu-el-data: besu-eth1-data: jwtsecret: diff --git a/besu/Dockerfile.binary b/besu/Dockerfile.binary index ff024867..113c2f86 100644 --- a/besu/Dockerfile.binary +++ b/besu/Dockerfile.binary @@ -23,6 +23,7 @@ RUN set -eux; \ gosu nobody true # Create data mount point with permissions +RUN mkdir -p /var/lib/besu-og && chown -R ${USER}:${USER} /var/lib/besu-og && chmod -R 700 /var/lib/besu-og RUN mkdir -p /var/lib/besu/ee-secret && chown -R ${USER}:${USER} /var/lib/besu && chmod -R 700 /var/lib/besu && chmod 777 /var/lib/besu/ee-secret # Cannot assume buildkit, hence no chmod diff --git a/besu/Dockerfile.source b/besu/Dockerfile.source index 054236d3..9ca521c4 100644 --- a/besu/Dockerfile.source +++ b/besu/Dockerfile.source @@ -1,5 +1,5 @@ # Build Besu in a stock Ubuntu container -FROM eclipse-temurin:21-jdk-jammy AS builder +FROM eclipse-temurin:21-jdk-noble AS builder # This is here to avoid build-time complaints ARG DOCKER_TAG @@ -14,7 +14,7 @@ WORKDIR /usr/src RUN bash -c "git clone --recurse-submodules -j8 ${SRC_REPO} besu && cd besu && git config advice.detachedHead false && git fetch --all --tags && if [[ ${BUILD_TARGET} =~ pr-.+ ]]; then git fetch origin pull/$(echo ${BUILD_TARGET} | cut -d '-' -f 2)/head:besu-pr; git checkout besu-pr; else git checkout ${BUILD_TARGET}; fi && ./gradlew installDist" # Pull all binaries into a second stage deploy Ubuntu container -FROM eclipse-temurin:21-jre-jammy +FROM eclipse-temurin:21-jre-noble ARG USER=besu ARG UID=10001 @@ -28,6 +28,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install libjemalloc-dev \ git \ wget \ + adduser \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* @@ -46,6 +47,7 @@ RUN adduser \ --ingroup "${USER}" \ "${USER}" +RUN mkdir -p /var/lib/besu-og && chown -R ${USER}:${USER} /var/lib/besu-og && chmod -R 700 /var/lib/besu-og RUN mkdir -p /var/lib/besu/ee-secret && chown -R besu:besu /var/lib/besu && chmod -R 700 /var/lib/besu && chmod 777 /var/lib/besu/ee-secret # Cannot assume buildkit, hence no chmod diff --git a/besu/docker-entrypoint.sh b/besu/docker-entrypoint.sh index aa5ec239..2aee7d17 100755 --- a/besu/docker-entrypoint.sh +++ b/besu/docker-entrypoint.sh @@ -48,14 +48,14 @@ if [[ "${NETWORK}" =~ ^https?:// ]]; then __network="--genesis-file=/var/lib/besu/testnet/${config_dir}/besu.json --bootnodes=${bootnodes} \ --Xfilter-on-enr-fork-id=true --rpc-http-api=ADMIN,CLIQUE,MINER,ETH,NET,DEBUG,TXPOOL,ENGINE,TRACE,WEB3" else - __network="--network ${NETWORK} --rpc-http-api WEB3,ETH,NET" + __network="--network ${NETWORK}" fi if [ "${ARCHIVE_NODE}" = "true" ]; then echo "Besu archive node without pruning" __prune="--data-storage-format=FOREST --sync-mode=FULL" else - __prune="--data-storage-format=BONSAI --sync-mode=SNAP" + __prune="" fi __memtotal=$(awk '/MemTotal/ {printf "%d", int($2/1024/1024)}' /proc/meminfo) @@ -65,6 +65,21 @@ else __spec="" fi +# New or old datadir +if [ -d /var/lib/besu-og/database ]; then + __datadir="--data-path /var/lib/besu-og" +else + __datadir="--data-path /var/lib/besu" +fi + +# DiscV5 for IPV6 +if [ "${IPV6:-false}" = "true" ]; then + echo "Configuring Besu for discv5 for IPv6 advertisements" + __ipv6="--Xv5-discovery-enabled" +else + __ipv6="" +fi + if [ -f /var/lib/besu/prune-marker ]; then rm -f /var/lib/besu/prune-marker if [ "${ARCHIVE_NODE}" = "true" ]; then @@ -73,9 +88,9 @@ if [ -f /var/lib/besu/prune-marker ]; then fi # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__prune} ${EL_EXTRAS} storage trie-log prune + exec "$@" ${__datadir} ${__network} ${__prune} ${EL_EXTRAS} storage trie-log prune else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__prune} ${__spec} ${EL_EXTRAS} + exec "$@" ${__datadir} ${__network} ${__ipv6} ${__prune} ${__spec} ${EL_EXTRAS} fi diff --git a/central-metrics.yml b/central-metrics.yml index fee0bf0b..17857f58 100644 --- a/central-metrics.yml +++ b/central-metrics.yml @@ -10,7 +10,7 @@ x-logging: &logging services: ethereum-metrics-exporter: restart: "unless-stopped" - image: samcm/ethereum-metrics-exporter:0.23.0-debian + image: samcm/ethereum-metrics-exporter:0.24.0-debian entrypoint: - /ethereum-metrics-exporter - --consensus-url=${CL_NODE} diff --git a/commit-boost-pbs.yml b/commit-boost-pbs.yml new file mode 100644 index 00000000..e9d11024 --- /dev/null +++ b/commit-boost-pbs.yml @@ -0,0 +1,22 @@ +x-logging: &logging + logging: + driver: json-file + options: + max-size: 100m + max-file: "3" + tag: '{{.ImageName}}|{{.Name}}|{{.ImageFullID}}|{{.FullID}}' + +services: + cb-pbs: + image: ${CB_PBS_DOCKER_REPO:-ghcr.io/commit-boost/pbs}:${CB_PBS_DOCKER_TAG:-latest} + environment: + CB_CONFIG: /cb-config.toml + CB_METRICS_PORT: 10000 + volumes: + - ./commit-boost/cb-config.toml:/cb-config.toml:ro + labels: + - metrics.scrape=true + - metrics.path=/metrics + - metrics.port=10000 + - metrics.instance=cb-pbs + - metrics.network=${NETWORK} diff --git a/commit-boost/cb-config.toml.sample b/commit-boost/cb-config.toml.sample new file mode 100644 index 00000000..934ad9ee --- /dev/null +++ b/commit-boost/cb-config.toml.sample @@ -0,0 +1,17 @@ +chain = "Holesky" + +[pbs] +port = 18550 + +[[relays]] +id = "bloxroute" +url = "https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.holesky.blxrbdn.com" +[[relays]] +id = "aestus" +url = "https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7c0406754376c2c8285a36c630346aa5c5f833@holesky.aestus.live" +[[relays]] +id = "titan" +url = "https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz" +[[relays]] +id = "flashbots" +url = "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net" diff --git a/default.env b/default.env index d1653952..ef926943 100644 --- a/default.env +++ b/default.env @@ -8,7 +8,7 @@ FEE_RECIPIENT= # If "true" and used with a CL, it also requires :mev-boost.yml in COMPOSE_FILE MEV_BOOST=false # For relay information, please see https://ethstaker.cc/mev-relay-list/ -MEV_RELAYS=https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net,https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz,https://0xb1d229d9c21298a87846c7022ebeef277dfc321fe674fa45312e20b5b6c400bfde9383f801848d7837ed5fc449083a12@relay-holesky.edennetwork.io,https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.holesky.blxrbdn.com +MEV_RELAYS=https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net,https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz,https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.holesky.blxrbdn.com # Set a minimum MEV bid (e.g. 0.05), used by mev-boost.yml. If empty, no minimum is used. MEV_MIN_BID= # Graffiti to use for validator @@ -79,6 +79,9 @@ HOST_IP= # IP address to use when host-mapping a port through *-shared.yml. Set this to 127.0.0.1 to restrict the share to localhost SHARE_IP= +# Relays to connect charon node +OBOL_P2P_RELAYS= + # P2P ports you will forward to your staking node. Adjust here if you are # going to use something other than defaults. EL_P2P_PORT=30303 @@ -86,6 +89,8 @@ CL_P2P_PORT=9000 PRYSM_PORT=9000 PRYSM_UDP_PORT=9000 CL_QUIC_PORT=9001 +# Some clients need a separate port for IPv6 +CL_IPV6_P2P_PORT=9090 # Local grafana dashboard port. Do not expose to Internet, it is insecure http GRAFANA_PORT=3000 # Local Siren UI port @@ -115,6 +120,8 @@ SSV_P2P_PORT=13001 SSV_P2P_PORT_UDP=12001 # SSV DKG port SSV_DKG_PORT=3030 +# OBOL Node ports +OBOL_P2P_PORT=3610 # Engine port. Only for distributed setups, this should otherwise be left alone EE_PORT=8551 # Consensus layer REST port. Only for distributed setups, this should otherwise be left alone @@ -160,10 +167,14 @@ EL_NODE=http://execution:8551 CL_NODE=http://consensus:5052 # MEV-boost address. This would only be changed for Vouch setups MEV_NODE=http://mev-boost:18550 +# Consensus client address for Charon and Lido Validator Ejector in Obol setup +OBOL_CL_NODE=http://consensus:5052 +# Execution client address (RPC) for Lido Validator Ejector in Obol setup +OBOL_EL_NODE=http://execution:8545 # You can set specific version targets and choose binary or compiled from source builds below, # via "Dockerfile.binary" or "Dockerfile.source" -# These settings are only migrated when running "./ethd update --keep-targets" +# These settings can be reset to defaults with "./ethd update --refresh-targets" # The default source build targets build from the latest github tag # Eth Docker updates its code to latest by default. @@ -172,11 +183,25 @@ ETH_DOCKER_TAG= # SSV SSV_NODE_TAG=latest +SSV_NODE_REPO=bloxstaking/ssv-node SSV_DKG_TAG=latest - +SSV_DKG_REPO=bloxstaking/ssv-dkg + +# Lido OBOL +CHARON_VERSION=latest +VE_OPERATOR_ID= +VE_STAKING_MODULE_ID= +VE_LOCATOR_ADDRESS= +VE_ORACLE_ADDRESSES_ALLOWLIST= +ENABLE_DIST_ATTESTATION_AGGR= +LIDO_DV_EXIT_EXIT_EPOCH= + +# Commit-Boost +CB_PBS_DOCKER_TAG=latest +CB_PBS_DOCKER_REPO=ghcr.io/commit-boost/pbs # MEV-Boost # SRC build target can be a tag, a branch, or a pr as "pr-ID" -MEV_SRC_BUILD_TARGET=develop +MEV_SRC_BUILD_TARGET=stable MEV_SRC_REPO=https://github.com/flashbots/mev-boost MEV_DOCKER_TAG=latest MEV_DOCKER_REPO=flashbots/mev-boost @@ -257,8 +282,8 @@ BESU_DOCKERFILE=Dockerfile.binary # SRC build target can be a tag, a branch, or a pr as "pr-ID" ERIGON_SRC_BUILD_TARGET='$(git describe --tags $(git rev-list --tags --max-count=1))' ERIGON_SRC_REPO=https://github.com/ledgerwatch/erigon -ERIGON_DOCKER_TAG=v2.60.4 -ERIGON_DOCKER_REPO=thorax/erigon +ERIGON_DOCKER_TAG=latest +ERIGON_DOCKER_REPO=erigontech/erigon ERIGON_DOCKERFILE=Dockerfile.binary # Nethermind @@ -301,11 +326,11 @@ DEPCLI_SRC_REPO=https://github.com/ethereum/staking-deposit-cli DEPCLI_DOCKER_TAG=nonesuch # traefik and ddns-updater -TRAEFIK_TAG=v3.0 +TRAEFIK_TAG=v3.1 DDNS_TAG=v2 # For the Node Dashboard, define a regex of mount points to ignore for the diskspace check. NODE_EXPORTER_IGNORE_MOUNT_REGEX='^/(dev|proc|sys|run|var/lib/docker/.+)($|/)' # Used by ethd update - please do not adjust -ENV_VERSION=13 +ENV_VERSION=17 diff --git a/erigon.yml b/erigon.yml index 4fe00f8c..4e3ae7ce 100644 --- a/erigon.yml +++ b/erigon.yml @@ -15,8 +15,8 @@ services: args: - BUILD_TARGET=${ERIGON_SRC_BUILD_TARGET:-'$(git describe --tags $(git rev-list --tags --max-count=1))'} - SRC_REPO=${ERIGON_SRC_REPO:-https://github.com/ledgerwatch/erigon} - - DOCKER_TAG=${ERIGON_DOCKER_TAG:-stable} - - DOCKER_REPO=${ERIGON_DOCKER_REPO:-thorax/erigon} + - DOCKER_TAG=${ERIGON_DOCKER_TAG:-latest} + - DOCKER_REPO=${ERIGON_DOCKER_REPO:-erigontech/erigon} stop_grace_period: 5m image: erigon:local pull_policy: never @@ -27,7 +27,7 @@ services: - ARCHIVE_NODE=${ARCHIVE_NODE:-} - NETWORK=${NETWORK} - IPV6=${IPV6:-false} - - DOCKER_TAG=${ERIGON_DOCKER_TAG:-stable} + - DOCKER_TAG=${ERIGON_DOCKER_TAG:-latest} - COMPOSE_FILE=${COMPOSE_FILE} - CL_P2P_PORT=${CL_P2P_PORT:-9000} - CL_REST_PORT=${CL_REST_PORT:-5052} diff --git a/erigon/Dockerfile.binary b/erigon/Dockerfile.binary index 13d7706f..26da6dff 100644 --- a/erigon/Dockerfile.binary +++ b/erigon/Dockerfile.binary @@ -1,5 +1,5 @@ -ARG DOCKER_TAG=stable -ARG DOCKER_REPO=thorax/erigon +ARG DOCKER_TAG=latest +ARG DOCKER_REPO=erigontech/erigon FROM ${DOCKER_REPO}:${DOCKER_TAG} diff --git a/erigon/Dockerfile.source b/erigon/Dockerfile.source index 87fbd92b..4abb9b64 100644 --- a/erigon/Dockerfile.source +++ b/erigon/Dockerfile.source @@ -1,5 +1,5 @@ # Build Erigon in a stock Go build container -FROM golang:1.22-alpine AS builder +FROM golang:1.23-alpine AS builder # Unused, this is here to avoid build time complaints ARG DOCKER_TAG diff --git a/erigon/docker-entrypoint.sh b/erigon/docker-entrypoint.sh index cb294ad4..c560bcc8 100755 --- a/erigon/docker-entrypoint.sh +++ b/erigon/docker-entrypoint.sh @@ -56,8 +56,39 @@ fi __caplin="" __db_params="" -#if [[ "${DOCKER_TAG}" =~ "v3" || "${DOCKER_TAG}" = "latest" || "${DOCKER_TAG}" = "stable" ]]; then # No stable yet -if [[ "${DOCKER_TAG}" =~ "v3" || "${DOCKER_TAG}" = "latest" ]]; then +# Literal match intended +# shellcheck disable=SC2076 +if [[ "${DOCKER_TAG}" =~ "2." || "${DOCKER_TAG}" = "latest" ]]; then +# Check for network, and set prune accordingly + if [ "${ARCHIVE_NODE}" = "true" ]; then + echo "Erigon archive node without pruning" + __prune="" + else + if [[ "${NETWORK}" = "mainnet" ]]; then + echo "mainnet: Running with prune.r.before=11052984 for eth deposit contract" + __prune="--prune=htc --prune.r.before=11052984" + elif [[ "${NETWORK}" = "goerli" ]]; then + echo "goerli: Running with prune.r.before=4367322 for eth deposit contract" + __prune="--prune=htc --prune.r.before=4367322" + elif [[ "${NETWORK}" = "sepolia" ]]; then + echo "sepolia: Running with prune.r.before=1273020 for eth deposit contract" + __prune="--prune=htc --prune.r.before=1273020" + elif [[ "${NETWORK}" = "gnosis" ]]; then + echo "gnosis: Running with prune.r.before=19469077 for gno deposit contract" + __prune="--prune=htc --prune.r.before=19469077" + elif [[ "${NETWORK}" = "holesky" ]]; then + echo "holesky: Running without prune.r for eth deposit contract" + __prune="--prune=htc" + elif [[ "${NETWORK}" =~ ^https?:// ]]; then + echo "Custom testnet: Running without prune.r for eth deposit contract" + __prune="--prune=htc" + else + echo "Unable to determine eth deposit contract, running without prune.r" + __prune="--prune=htc" + fi + fi + __db_params="--db.pagesize 16K --db.size.limit 8TB" +else # Erigon v3 if [ "${ARCHIVE_NODE}" = "true" ]; then echo "Erigon archive node without pruning" __prune="--prune.mode=archive" @@ -77,47 +108,19 @@ if [[ "${DOCKER_TAG}" =~ "v3" || "${DOCKER_TAG}" = "latest" ]]; then __caplin+=" --beacon.api.addr=0.0.0.0 --beacon.api.port=${CL_REST_PORT} --beacon.api.cors.allow-origins=*" if [ "${MEV_BOOST}" = "true" ]; then __caplin+=" --caplin.mev-relay-url=${MEV_NODE}" + echo "MEV Boost enabled" fi if [ "${ARCHIVE_NODE}" = "true" ]; then __caplin+=" --caplin.archive=true" fi if [ -n "${RAPID_SYNC_URL}" ]; then __caplin+=" --caplin.checkpoint-sync-url=${RAPID_SYNC_URL}" + echo "Checkpoint sync enabled" else __caplin+=" --caplin.checkpoint-sync.disable=true" fi echo "Caplin parameters: ${__caplin}" fi -else -# Check for network, and set prune accordingly - if [ "${ARCHIVE_NODE}" = "true" ]; then - echo "Erigon archive node without pruning" - __prune="" - else - if [[ "${NETWORK}" = "mainnet" ]]; then - echo "mainnet: Running with prune.r.before=11052984 for eth deposit contract" - __prune="--prune=htc --prune.r.before=11052984" - elif [[ "${NETWORK}" = "goerli" ]]; then - echo "goerli: Running with prune.r.before=4367322 for eth deposit contract" - __prune="--prune=htc --prune.r.before=4367322" - elif [[ "${NETWORK}" = "sepolia" ]]; then - echo "sepolia: Running with prune.r.before=1273020 for eth deposit contract" - __prune="--prune=htc --prune.r.before=1273020" - elif [[ "${NETWORK}" = "gnosis" ]]; then - echo "gnosis: Running with prune.r.before=19469077 for gno deposit contract" - __prune="--prune=htc --prune.r.before=19469077" - elif [[ "${NETWORK}" = "holesky" ]]; then - echo "holesky: Running without prune.r for eth deposit contract" - __prune="--prune=htc" - elif [[ "${NETWORK}" =~ ^https?:// ]]; then - echo "Custom testnet: Running without prune.r for eth deposit contract" - __prune="--prune=htc" - else - echo "Unable to determine eth deposit contract, running without prune.r" - __prune="--prune=htc" - fi - fi - __db_params="--db.pagesize 16K --db.size.limit 8TB" fi if [ "${IPV6}" = "true" ]; then diff --git a/ethd b/ethd index 3842e24d..b16f9c91 100755 --- a/ethd +++ b/ethd @@ -9,19 +9,19 @@ __compose_exe="docker compose" __compose_upgraded=0 -dodocker() { +__dodocker() { $__docker_sudo $__docker_exe "$@" } -docompose() { +__docompose() { # I want word splitting here # shellcheck disable=SC2086 $__docker_sudo $__compose_exe "$@" } -determine_distro() { +__determine_distro() { # Determine OS platform __uname=$(uname | tr "[:upper:]" "[:lower:]") # If Linux, try to determine specific distribution @@ -43,25 +43,39 @@ determine_distro() { __distro=$(echo "$__distro" | tr "[:upper:]" "[:lower:]") if [[ "$__distro" = "ubuntu" ]]; then - if ! dpkg-query -W -f='${Status}' lsb-release 2>/dev/null | grep -q "ok installed"; then - ${__auto_sudo} apt-get update && ${__auto_sudo} apt-get -y install lsb-release + if [ "$__cannot_sudo" -eq 0 ]; then + if ! dpkg-query -W -f='${Status}' lsb-release 2>/dev/null | grep -q "ok installed"; then + echo "Installing lsb-release" + ${__auto_sudo} apt-get update && ${__auto_sudo} apt-get -y install lsb-release + fi + fi + if [ -n "$(command -v lsb_release 2>/dev/null)" ]; then + __os_major_version=$(lsb_release -r | cut -d: -f2 | sed s/'^\t'// | cut -d. -f1) + else + __os_major_version=24 # Without sudo and lsb_release let's just skip the check fi - __os_major_version=$(lsb_release -r | cut -d: -f2 | sed s/'^\t'// | cut -d. -f1) elif [[ "$__distro" =~ "debian" ]]; then - if ! dpkg-query -W -f='${Status}' lsb-release 2>/dev/null | grep -q "ok installed"; then - ${__auto_sudo} apt-get update && ${__auto_sudo} apt-get -y install lsb-release + if [ "$__cannot_sudo" -eq 0 ]; then + if ! dpkg-query -W -f='${Status}' lsb-release 2>/dev/null | grep -q "ok installed"; then + echo "Installing lsb-release" + ${__auto_sudo} apt-get update && ${__auto_sudo} apt-get -y install lsb-release + fi + fi + if [ -n "$(command -v lsb_release 2>/dev/null)" ]; then + __os_major_version=$(lsb_release -r | cut -f2) + else + __os_major_version=12 # Without sudo and lsb_release let's just skip the check fi - __os_major_version=$(lsb_release -r | cut -f2) fi } -handle_docker_sudo() { +__handle_docker_sudo() { set +e if [[ "$__distro" =~ "debian" || "$__distro" = "ubuntu" ]]; then systemctl status docker >/dev/null - result=$? - if [ ! "${result}" -eq 0 ]; then + __result=$? + if [ ! "${__result}" -eq 0 ]; then echo "The Docker daemon is not running. Please check Docker installation." echo "\"sudo systemctl status docker\" and \"sudo journalctl -fu docker\" will be helpful." echo "Aborting." @@ -80,24 +94,34 @@ handle_docker_sudo() { fi __docker_sudo="" if ! docker images >/dev/null 2>&1; then + if [ "$__cannot_sudo" -eq 1 ]; then + echo "Cannot call Docker and cannot use sudo. Please make your user part of the docker group" + exit 1 + fi echo "Will use sudo to access Docker" __docker_sudo="sudo" fi } -handle_root() { +__handle_root() { + __cannot_sudo=0 if [ "${EUID}" -eq 0 ]; then __as_owner="sudo -u ${OWNER}" __auto_sudo="" else __as_owner="" - __auto_sudo="sudo" + if groups | grep -q '\bsudo\b' || groups | grep -q '\badmin\b'; then + __auto_sudo="sudo" + else + __auto_sudo="" + __cannot_sudo=1 + fi fi } -upgrade_compose() { +__upgrade_compose() { if ! type -P docker-compose >/dev/null 2>&1; then echo "Docker Compose has already been updated to V2" return @@ -153,7 +177,7 @@ upgrade_compose() { } -check_compose_version() { +__check_compose_version() { # Check for Compose V2 (docker compose) vs Compose V1 (docker-compose) if docker compose version >/dev/null 2>&1; then __compose_version=$($__docker_sudo docker compose version | sed -n -E -e "s/.*version [v]?([0-9.-]*).*/\1/ip") @@ -175,10 +199,10 @@ check_compose_version() { echo echo "It is recommended that you replace Compose V1 with Compose V2." while true; do - read -rp "Do you want to update Docker Compose to V2? (yes/no) " yn - case $yn in + read -rp "Do you want to update Docker Compose to V2? (yes/no) " __yn + case $__yn in [Nn]* ) echo "Please be sure to update Docker Compose yourself!"; break;; - * ) upgrade_compose; break;; + * ) __upgrade_compose; break;; esac done fi @@ -186,7 +210,7 @@ check_compose_version() { } -prep_conffiles() { +__prep_conffiles() { # Create custom-prom.yml if it doesn't exist if [ ! -f "./prometheus/custom-prom.yml" ]; then ${__as_owner} touch "./prometheus/custom-prom.yml" @@ -203,18 +227,31 @@ prep_conffiles() { ${__as_owner} cp ssv-config/dkg-config-sample.yaml ssv-config/dkg-config.yaml fi # Make sure local user owns the dkg output dir and everything in it - if find .eth/dkg_output \! -user "${OWNER}" -o \! -group "${OWNER_GROUP}" -o \! -perm 755 | grep -q .; then - ${__auto_sudo} chown -R "${OWNER}:${OWNER_GROUP}" .eth/dkg_output - ${__auto_sudo} chmod -R 755 .eth/dkg_output + if find .eth/dkg_output \! -user "${OWNER}" -o \! -group "${OWNER_GROUP}" | grep -q .; then + if [ "$__cannot_sudo" -eq 0 ]; then + echo "Fixing ownership of .eth/dkg_output" + ${__auto_sudo} chown -R "${OWNER}:${OWNER_GROUP}" .eth/dkg_output + ${__auto_sudo} chmod -R 755 .eth/dkg_output + else + echo "Ownership of .eth/dkg_output should be fixed, but this user can't sudo" + fi + fi +# Make sure the dkg output dir and its contents are mod 0755 + if find .eth/dkg_output \! -perm 755 | grep -q .; then + chmod -R 755 .eth/dkg_output fi # Create ext-network.yml if it doesn't exist if [ ! -f "ext-network.yml" ]; then ${__as_owner} cp ext-network.yml.sample ext-network.yml fi +# Create cb-config.toml if it doesn't exist + if [ ! -f "commit-boost/cb-config.toml" ]; then + ${__as_owner} cp commit-boost/cb-config.toml.sample commit-boost/cb-config.toml + fi } -check_for_snap() { +__check_for_snap() { if [[ "$__distro" = "ubuntu" && -n "$(command -v snap)" ]] && snap list 2>/dev/null | grep -qw 'docker'; then echo echo "WARNING! Snap Docker package detected. This WILL result in issues." @@ -234,7 +271,7 @@ check_for_snap() { } -install-bash-completions() { +__install_bash_completions() { if [[ "$OSTYPE" == "darwin"* ]]; then echo "Skipping installation of tab completions (not supported on macOS)" else @@ -248,7 +285,10 @@ install-bash-completions() { install() { - + if [ "$__cannot_sudo" -eq 1 ]; then + echo "The install command requires the user to be part of the sudo group, or on macOS the admin group" + exit 1 + fi if [[ "$__distro" = "ubuntu" ]]; then ${__auto_sudo} apt-get update ${__auto_sudo} apt-get install -y ca-certificates curl gnupg whiptail chrony pkg-config @@ -261,8 +301,8 @@ install() { exit 1 fi read -rp "This will attempt to install Docker and make your user part of the docker group. Do you wish to \ -continue? (no/yes) " yn - case $yn in +continue? (no/yes) " __yn + case $__yn in [Yy]* ) ;; * ) echo "Aborting, no changes made"; return 0;; esac @@ -300,8 +340,8 @@ continue? (no/yes) " yn exit 1 fi read -rp "This will attempt to install Docker and make your user part of the docker group. Do you wish to \ -continue? (no/yes) " yn - case $yn in +continue? (no/yes) " __yn + case $__yn in [Yy]* ) ;; * ) echo "Aborting, no changes made"; return 0;; esac @@ -336,11 +376,11 @@ continue? (no/yes) " yn fi # We only get here on Ubuntu or Debian - install-bash-completions + __install_bash_completions __install_base=$(basename "$(dirname "$(realpath "${BASH_SOURCE[0]}")")") if [ "${__install_base}" = "eth-docker" ]; then - read -rp "Do you want to be able to call 'ethd' from anywhere? (yes/no) " yn - case $yn in + read -rp "Do you want to be able to call 'ethd' from anywhere? (yes/no) " __yn + case $__yn in [Nn]* ) return 0;; * ) ;; esac @@ -362,17 +402,17 @@ continue? (no/yes) " yn __get_docker_free_space() { # set __free_space to what's available to Docker if [[ "$OSTYPE" == "darwin"* ]]; then # macOS doesn't expose docker root dir to the OS - __free_space=$(dodocker run --rm -v macos-space-check:/dummy busybox df -P /dummy | awk '/[0-9]%/{print $(NF-2)}') + __free_space=$(__dodocker run --rm -v macos-space-check:/dummy busybox df -P /dummy | awk '/[0-9]%/{print $(NF-2)}') else - __docker_dir=$(dodocker system info --format '{{.DockerRootDir}}') + __docker_dir=$(__dodocker system info --format '{{.DockerRootDir}}') __free_space=$(df -P "${__docker_dir}" | awk '/[0-9]%/{print $(NF-2)}') fi - re='^[0-9]+$' - if ! [[ "${__free_space}" =~ $re ]] ; then + __regex='^[0-9]+$' + if ! [[ "${__free_space}" =~ $__regex ]] ; then echo "Unable to determine free disk space. This is likely a bug." if [[ "$OSTYPE" == "darwin"* ]]; then - echo "df reports $(dodocker run --rm -v macos-space-check:/dummy busybox df -P /dummy) and __free_space is ${__free_space}" + echo "df reports $(__dodocker run --rm -v macos-space-check:/dummy busybox df -P /dummy) and __free_space is ${__free_space}" else echo "df reports $(df -P "${__docker_dir}") and __free_space is ${__free_space}" fi @@ -384,7 +424,7 @@ __get_docker_free_space() { # set __free_space to what's available to Docker __display_docker_dir() { if [[ "$OSTYPE" == "darwin"* ]]; then # macOS doesn't expose docker root dir to the OS echo "Here's total and used space on Docker's virtual volume" - dodocker run --rm -v macos-space-check:/dummy busybox df -h /dummy + __dodocker run --rm -v macos-space-check:/dummy busybox df -h /dummy else echo "Here's total and used space on ${__docker_dir}" df -h "${__docker_dir}" @@ -394,12 +434,12 @@ __display_docker_dir() { __display_docker_volumes() { echo - if [ -z "$(dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+")" ]; then + if [ -z "$(__dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+")" ]; then echo "There are no Docker volumes for this copy of ${__project_name}" echo else echo "Here are the Docker volumes used by this copy of ${__project_name} and their space usage:" - dodocker system df -v | grep -A 50 "VOLUME NAME" | grep "^$(basename "$(dirname "$(realpath "${BASH_SOURCE[0]}")")")" + __dodocker system df -v | grep -A 50 "VOLUME NAME" | grep "^$(basename "$(dirname "$(realpath "${BASH_SOURCE[0]}")")")" echo echo "If your Consensus Layer client takes more than 300 GiB, you can resync it with" echo "\"${__me} resync-consensus\"." @@ -425,15 +465,15 @@ space() { # Warn user if space is low, so they can prune -check_disk_space() { +__check_disk_space() { __get_docker_free_space - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - var="AUTOPRUNE_NM" - auto_prune=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - var="NETWORK" - NETWORK=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + __var="AUTOPRUNE_NM" + __auto_prune=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + __var="NETWORK" + NETWORK=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ "${NETWORK}" = "mainnet" ] || [ "${NETWORK}" = "gnosis" ]; then __min_free=314572800 @@ -447,10 +487,10 @@ check_disk_space() { # Literal match intended # shellcheck disable=SC2076 - if [[ "${value}" =~ "nethermind.yml" ]] && [[ "${__free_space}" -lt "${__min_free}" ]]; then + if [[ "${__value}" =~ "nethermind.yml" ]] && [[ "${__free_space}" -lt "${__min_free}" ]]; then echo echo "You are running Nethermind and have less than ${__min_gib} GiB of free disk space." - if [ "${auto_prune}" = true ]; then + if [ "${__auto_prune}" = true ]; then echo "It should currently be auto-pruning, check logs with \"$__me logs -f --tail 500 execution | grep \ Full\". Free space:" else @@ -460,7 +500,7 @@ Full\". Free space:" echo __display_docker_dir __display_docker_volumes - elif [[ "${value}" =~ "geth.yml" ]] && [[ "${__free_space}" -lt 104857600 ]]; then + elif [[ "${__value}" =~ "geth.yml" ]] && [[ "${__free_space}" -lt 104857600 ]]; then echo echo "You are running Geth and have less than 100 GiB of free disk space." echo "You may resync from scratch to use PBSS and slow on-disk DB growth, if you haven't done so already, with \"$__me resync-execution\"." @@ -468,7 +508,7 @@ Full\". Free space:" echo __display_docker_dir __display_docker_volumes - elif [[ "${value}" =~ "besu.yml" ]] && [[ "${__free_space}" -lt 52428800 ]]; then + elif [[ "${__value}" =~ "besu.yml" ]] && [[ "${__free_space}" -lt 52428800 ]]; then echo echo "You are running Besu and have less than 50 GiB of free disk space." echo @@ -488,158 +528,158 @@ Full\". Free space:" } -source_build() { +__source_build() { # Check whether there's a source-built client and if so, force it with --no-cache - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) - case "${value}" in + case "${__value}" in *deposit-cli.yml* ) - docompose --profile tools build --pull --no-cache deposit-cli-new + __docompose --profile tools build --pull --no-cache deposit-cli-new ;; esac - case "${value}" in + case "${__value}" in *mev-boost.yml* ) - var="MEV_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache mev-boost + __var="MEV_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache mev-boost fi ;; esac - case "${value}" in + case "${__value}" in *reth.yml* ) - var="RETH_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="RETH_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; *geth.yml* ) - var="GETH_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="GETH_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; *besu.yml* ) - var="BESU_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="BESU_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; *nethermind.yml* ) - var="NM_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="NM_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; *erigon.yml* ) - var="ERIGON_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="ERIGON_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; *nimbus-el.yml* ) - var="NIMEL_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="NIMEL_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; esac - case "${value}" in + case "${__value}" in *lighthouse.yml* | *lighthouse-cl-only.yml* ) - var="LH_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="LH_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; *teku.yml* | *teku-allin1.yml* | *teku-cl-only.yml* ) - var="TEKU_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="TEKU_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; *lodestar.yml* | *lodestar-cl-only.yml* ) - var="LS_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="LS_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; *nimbus.yml* | *nimbus-allin1.yml* | *nimbus-cl-only.yml* ) - var="NIM_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="NIM_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; *prysm.yml* | *prysm-cl-only.yml* ) - var="PRYSM_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="PRYSM_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; *grandine.yml* | *grandine-allin1.yml* | *grandine-cl-only.yml* ) - var="GRANDINE_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="GRANDINE_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; esac } -migrate_compose_file() { -# When this gets called $var is COMPOSE_FILE and $value is what is set in .env for it +__migrate_compose_file() { +# When this gets called $__var is COMPOSE_FILE and $__value is what is set in .env for it # Some files have been renamed and others removed altogether - FROM_YML=( ) - TO_YML=( ) + __from_yml=( ) + __to_yml=( ) IFS=":" set -o noglob # Globbing is off # shellcheck disable=SC2206 - __ymlarray=($value) # split+glob with glob disabled, and split using : as delimiter + __ymlarray=($__value) # split+glob with glob disabled, and split using : as delimiter set +o noglob # Unset restores default unset IFS - value="" - for n in "${!__ymlarray[@]}"; do - __ymlfile="${__ymlarray[n]}" - for index in "${!FROM_YML[@]}"; do - if [ "${FROM_YML[index]}" = "${__ymlfile}" ]; then - __ymlfile=${TO_YML[index]} + __value="" + for __n in "${!__ymlarray[@]}"; do + __ymlfile="${__ymlarray[__n]}" + for __index in "${!__from_yml[@]}"; do + if [ "${__from_yml[__index]}" = "${__ymlfile}" ]; then + __ymlfile=${__to_yml[__index]} break fi done if [ -n "${__ymlfile}" ]; then - if [ -z "${value}" ]; then - value="${__ymlfile}" + if [ -z "${__value}" ]; then + __value="${__ymlfile}" else - value="${value}:${__ymlfile}" + __value="${__value}:${__ymlfile}" fi fi done } -ssv_switch() { +__ssv_switch() { echo "Detected legacy SSV Node. Migrating config to new testnet." echo echo "Stopping SSV Node container" - __node=$(dodocker ps --format '{{.Names}}' | grep 'ssv2-node') - dodocker stop "${__node}" && dodocker rm -f "${__node}" - dodocker volume rm "$(dodocker volume ls -q | grep "$(basename "$(realpath .)")"_ssv2-data)" + __node=$(__dodocker ps --format '{{.Names}}' | grep 'ssv2-node') + __dodocker stop "${__node}" && __dodocker rm -f "${__node}" + __dodocker volume rm "$(__dodocker volume ls -q | grep "$(basename "$(realpath .)")"_ssv2-data)" echo echo "SSV Node stopped and database deleted." echo @@ -648,9 +688,9 @@ ssv_switch() { rm blox-ssv-config.yaml echo "Backup copy blox-ssv-config.yaml.bak created" echo "Making changes to ssv-config/config.yaml" - var="NETWORK" - NETWORK=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - sed -i'.original' 's/blox-ssv2.yml/ssv.yml/' "${ENV_FILE}".source + __var="NETWORK" + NETWORK=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + sed -i'.original' 's/blox-ssv2.yml/ssv.yml/' "${__env_file}".source if ! grep -q "LogFilePath:" ssv-config/config.yaml; then # macOS-isms: Newline for sed add sed -i'.original' '/global:/a\ @@ -686,31 +726,38 @@ MetricsAPIPort: 15000 } -delete_reth() { +__delete_reth() { # Check for Reth - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "reth.yml" ]]; then + if [[ ! "${__value}" =~ "reth.yml" ]]; then return 0 fi # Check Reth version, only continue if not on alpha - var="RETH_DOCKER_TAG" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="RETH_DOCKER_TAG" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 - if [[ "${value}" =~ "alpha" ]]; then + if [[ "${__value}" =~ "alpha" ]]; then + return 0 + fi + + if [ -z "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")" ]; then # No Reth volume return 0 fi - if [ -z "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")" ]; then # No Reth volume +# Has db been initialized? + __db_exists=$(__dodocker run --rm -v "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")":"/var/lib/reth" \ + alpine:3 sh -c 'if [ -f "/var/lib/reth/db/database.version" ]; then echo true; else echo false; fi') + if [ "$__db_exists" = "false" ]; then return 0 fi # Check Reth db version - __db_version="$(dodocker run --rm -v "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")":"/var/lib/reth" \ + __db_version="$(__dodocker run --rm -v "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")":"/var/lib/reth" \ alpine:3 cat /var/lib/reth/db/database.version)" if [ "${__db_version}" -ne "1" ]; then return 0 @@ -720,8 +767,8 @@ delete_reth() { echo if [ "${__non_interactive:-0}" -eq 0 ]; then while true; do - read -rp "WARNING - About to delete the Reth database. Do you wish to continue? (Y/n) " yn - case $yn in + read -rp "WARNING - About to delete the Reth database. Do you wish to continue? (Y/n) " __yn + case $__yn in [Nn]o | [Nn] ) echo "No changes made"; return 0;; * ) break;; esac @@ -729,40 +776,41 @@ delete_reth() { fi echo "Stopping Reth container" - docompose stop execution && docompose rm -f execution - dodocker volume rm "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")" + __docompose stop execution && __docompose rm -f execution + __dodocker volume rm "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")" echo echo "Reth stopped and database deleted." echo } -delete_erigon() { +__delete_erigon() { # Check for Erigon - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "erigon.yml" ]]; then + if [[ ! "${__value}" =~ "erigon.yml" ]]; then return 0 fi # Check Erigon version, only continue if v3 - var="ERIGON_DOCKER_TAG" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="ERIGON_DOCKER_TAG" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + __var="ERIGON_DOCKER_REPO" + __repo=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 -# if [[ ! ("${value}" =~ "v3" || "${value}" = "latest" || "${value}" = "stable") ]]; then # No stable yet - if [[ ! ("${value}" =~ "v3" || "${value}" = "latest") ]]; then + if [[ ! ("${__value}" =~ "v3" || ( "${__value}" = "latest" && "${__repo}" =~ "thorax" ) || "${__value}" = "main-latest") ]]; then return 0 fi - if [ -z "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")" ]; then # No Erigon volume + if [ -z "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")" ]; then # No Erigon volume return 0 fi # Detect Erigon v3 by directory caplin/latest - __erigon_v3=$(dodocker run --rm -v "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")":"/var/lib/erigon" \ + __erigon_v3=$(__dodocker run --rm -v "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")":"/var/lib/erigon" \ alpine:3 sh -c 'if [ -d "/var/lib/erigon/caplin/latest" ]; then echo true; else echo false; fi') if [ "$__erigon_v3" = "true" ]; then return 0 @@ -771,41 +819,41 @@ delete_erigon() { echo "Detected Erigon. For Erigon v3, it will need to be re-synced from scratch." echo while true; do - read -rp "WARNING - About to delete the Erigon database. Do you wish to continue? (Y/n) " yn - case $yn in + read -rp "WARNING - About to delete the Erigon database. Do you wish to continue? (Y/n) " __yn + case $__yn in [Nn]o | [Nn] ) echo "Aborting, no changes made"; exit 130;; * ) break;; esac done echo "Stopping Erigon container" - docompose stop execution && docompose rm -f execution - dodocker volume rm "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")" + __docompose stop execution && __docompose rm -f execution + __dodocker volume rm "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")" echo echo "Erigon stopped and database deleted." echo } -upgrade_postgres() { +__upgrade_postgres() { # Check for web3signer - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "web3signer.yml" ]]; then + if [[ ! "${__value}" =~ "web3signer.yml" ]]; then return 0 fi __source_vol="$(basename "$(pwd)")_web3signer-slashing-data" - if [ -z "$(dodocker volume ls -q -f "name=${__source_vol}")" ]; then + if [ -z "$(__dodocker volume ls -q -f "name=${__source_vol}")" ]; then return 0 fi __target_pg=16 __during_postgres=1 - __source_pg="$(dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ + __source_pg="$(__dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ alpine:3 cat /var/lib/postgresql/data/PG_VERSION)" if [ "${__source_pg}" -lt "${__target_pg}" ]; then @@ -813,8 +861,8 @@ upgrade_postgres() { echo if [ "${__non_interactive:-0}" -eq 0 ]; then while true; do - read -rp "Would you like to migrate to PostgreSQL ${__target_pg}? (Y/n) " yn - case $yn in + read -rp "Would you like to migrate to PostgreSQL ${__target_pg}? (Y/n) " __yn + case $__yn in [Nn]o | [Nn] ) echo "Keeping PostgreSQL at version ${__source_pg}"; return 0;; * ) break;; esac @@ -824,11 +872,11 @@ upgrade_postgres() { return 0 fi - __source_size="$(dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ + __source_size="$(__dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ alpine:3 du -s /var/lib/postgresql/data/ | awk '{print $1}')" - re='^[0-9]+$' - if ! [[ "${__source_size}" =~ $re ]] ; then + __regex='^[0-9]+$' + if ! [[ "${__source_size}" =~ $__regex ]] ; then echo "Unable to determine database size. This is likely a bug." echo "__source_size is ${__source_size}" return 70 @@ -850,9 +898,9 @@ upgrade_postgres() { __backup_vol="$(basename "$(pwd)")_web3signer-slashing-data-pg${__source_pg}-backup" echo "Stopping Web3signer" - docompose stop web3signer && docompose rm -f web3signer + __docompose stop web3signer && __docompose rm -f web3signer echo "Stopping PostgreSQL" - docompose stop postgres && docompose rm -f postgres + __docompose stop postgres && __docompose rm -f postgres echo echo "Migrating database from PostgreSQL ${__source_pg} to PostgreSQL ${__target_pg}" @@ -860,48 +908,48 @@ upgrade_postgres() { echo "In failure case, do not start Web3signer again, instead seek help on Ethstaker Discord." echo - dodocker pull "pats22/postgres-upgrade:${__source_pg}-to-${__target_pg}" - dodocker volume create "${__migrated_vol}" - dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/${__source_pg}/data" \ + __dodocker pull "pats22/postgres-upgrade:${__source_pg}-to-${__target_pg}" + __dodocker volume create "${__migrated_vol}" + __dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/${__source_pg}/data" \ -v "${__migrated_vol}":"/var/lib/postgresql/${__target_pg}/data" \ "pats22/postgres-upgrade:${__source_pg}-to-${__target_pg}" # Adjust ownership. We use 70; postgres-upgrade creates it with 999 - dodocker run --rm -v "${__migrated_vol}":"/var/lib/postgres" \ + __dodocker run --rm -v "${__migrated_vol}":"/var/lib/postgres" \ alpine:3 chown -R 70:70 /var/lib/postgres # Conversion can leave us with a pg_hba.conf that does not allow connections - dodocker run --rm -v "${__migrated_vol}":"/var/lib/postgres" \ + __dodocker run --rm -v "${__migrated_vol}":"/var/lib/postgres" \ alpine:3 sh -c 'grep -qxE "host\s+all\s+all\s+all\s+scram-sha-256" /var/lib/postgres/pg_hba.conf \ || echo "host all all all scram-sha-256" \ >> /var/lib/postgres/pg_hba.conf' echo echo "Migration complete, copying data in web3signer-slashing-data volume to backup" - dodocker volume create "${__backup_vol}" - dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ + __dodocker volume create "${__backup_vol}" + __dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ -v "${__backup_vol}":"/var/lib/postgresql/${__source_pg}/data" \ alpine:3 cp -a /var/lib/postgresql/data/. "/var/lib/postgresql/${__source_pg}/data/" __during_migrate=1 echo "Moving migrated data to web3signer-slashing-data volume" - dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ + __dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ alpine:3 rm -rf /var/lib/postgresql/data/* - dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ + __dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ -v "${__migrated_vol}":"/var/lib/postgresql/${__target_pg}/data" \ alpine:3 cp -a "/var/lib/postgresql/${__target_pg}/data/." /var/lib/postgresql/data/ __migrated=1 - dodocker volume remove "${__migrated_vol}" + __dodocker volume remove "${__migrated_vol}" echo echo "Adjusting PostgreSQL Docker tag" - if [ ! -f "${ENV_FILE}.source" ]; then # update() didn't migrate env, let's make sure .env.source exists - cp "${ENV_FILE}" "${ENV_FILE}.source" + if [ ! -f "${__env_file}.source" ]; then # update() didn't migrate env, let's make sure .env.source exists + cp "${__env_file}" "${__env_file}.source" fi - var="PG_DOCKER_TAG" + __var="PG_DOCKER_TAG" # This gets used, but shellcheck doesn't recognize that # shellcheck disable=SC2034 PG_DOCKER_TAG=${__target_pg}-bookworm # To bookworm to avoid collation errors - also a faster PostgreSQL - set_value_in_env + __set_value_in_env echo "Web3signer has been stopped. You'll need to run \"$__me up\" to start it again." echo echo "A copy of your old slashing protection database is in the Docker volume ${__backup_vol}." @@ -911,12 +959,12 @@ upgrade_postgres() { __lookup_cf_zone() { # Migrates traefik-cf setup to use Zone ID - __compose_ymls=$(sed -n -e "s/^COMPOSE_FILE=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - __dns_token=$(sed -n -e "s/^CF_DNS_API_TOKEN=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - __zone_token=$(sed -n -e "s/^CF_ZONE_API_TOKEN=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - __domain=$(sed -n -e "s/^DOMAIN=\(.*\)/\1/p" "${ENV_FILE}.source" || true) + __compose_ymls=$(sed -n -e "s/^COMPOSE_FILE=\(.*\)/\1/p" "${__env_file}.source" || true) + __dns_token=$(sed -n -e "s/^CF_DNS_API_TOKEN=\(.*\)/\1/p" "${__env_file}.source" || true) + __zone_token=$(sed -n -e "s/^CF_ZONE_API_TOKEN=\(.*\)/\1/p" "${__env_file}.source" || true) + __domain=$(sed -n -e "s/^DOMAIN=\(.*\)/\1/p" "${__env_file}.source" || true) if [[ ! $__compose_ymls =~ traefik-cf.yml ]]; then - value="" + __value="" return elif [[ -n $__dns_token ]]; then if [[ -n $__zone_token ]]; then @@ -925,79 +973,110 @@ __lookup_cf_zone() { # Migrates traefik-cf setup to use Zone ID __token=$__dns_token fi set +e - value=$(docompose run --rm curl-jq sh -c \ + __value=$(__docompose run --rm curl-jq sh -c \ "curl -s \"https://api.cloudflare.com/client/v4/zones?name=${__domain}\" -H \"Authorization: Bearer ${__token}\" \ -H \"Content-Type: application/json\" | jq -r '.result[0].id'" | tail -n 1) __code=$? if [[ "$__code" -ne 0 ]]; then - value="" + __value="" return fi - __success=$(docompose run --rm curl-jq sh -c \ + __success=$(__docompose run --rm curl-jq sh -c \ "curl -s \"https://api.cloudflare.com/client/v4/zones?name=${__domain}\" -H \"Authorization: Bearer ${__token}\" \ -H \"Content-Type: application/json\" | jq -r '.success'" | tail -n 1) set -e if [ "${__success}" = "true" ]; then return else - value="" + __value="" return fi else - value="" + __value="" return fi } -envmigrate() { - if [ ! -f "${ENV_FILE}" ]; then +__enable_v6() { + if [ "${__docker_major_version}" -lt 27 ]; then + return + fi + + __var="IPV6" + IPV6=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${IPV6}" = "true" ]; then + return + fi + + echo "Testing IPv6 host connectivity" + if ! ping -c1 2001:4860:4860::8888 >/dev/null; then + return + fi + + echo "Testing IPv6 Docker connectivity" + __dodocker network create --ipv6 ip6net_ethd_test + __v6_works=$(__dodocker run --rm --network ip6net_ethd_test busybox sh -c \ + "if ping -c1 -6 2001:4860:4860::8888 >/dev/null; then echo true; else echo false; fi") + __dodocker network rm ip6net_ethd_test + + if [ "${__v6_works}" = "true" ]; then + echo "Enabling IPv4/6 dual-stack for your Eth Docker setup" + IPV6="true" + __set_value_in_env + __enabled_v6=1 + fi +} + + +__env_migrate() { + if [ ! -f "${__env_file}" ]; then return 0 fi - ALL_VARS=( COMPOSE_FILE FEE_RECIPIENT EL_NODE GRAFFITI DEFAULT_GRAFFITI NETWORK MEV_BOOST MEV_RELAYS MEV_MIN_BID \ + __all_vars=( COMPOSE_FILE FEE_RECIPIENT EL_NODE EL_OBOL_NODE GRAFFITI DEFAULT_GRAFFITI NETWORK MEV_BOOST MEV_RELAYS MEV_MIN_BID \ MEV_NODE CL_MAX_PEER_COUNT CL_MIN_PEER_COUNT EL_MAX_PEER_COUNT EL_MIN_PEER_COUNT DOMAIN ACME_EMAIL ANCIENT_DIR \ AUTOPRUNE_NM LOGS_LABEL CF_DNS_API_TOKEN CF_ZONE_API_TOKEN CF_ZONE_ID AWS_PROFILE AWS_HOSTED_ZONE_ID \ GRAFANA_HOST SIREN_HOST DISTRIBUTED BESU_HEAP TEKU_HEAP PROM_HOST HOST_IP SHARE_IP PRYSM_HOST EE_HOST \ EL_HOST EL_LB EL_WS_HOST EL_WS_LB CL_HOST CL_LB VC_HOST DDNS_SUBDOMAIN IPV6 DDNS_PROXY RAPID_SYNC_URL \ - CL_NODE BEACON_STATS_API BEACON_STATS_MACHINE EL_P2P_PORT CL_P2P_PORT WEB3SIGNER PRYSM_PORT DOPPELGANGER \ + CL_NODE CL_OBOL_NODE BEACON_STATS_API BEACON_STATS_MACHINE EL_P2P_PORT CL_P2P_PORT WEB3SIGNER PRYSM_PORT DOPPELGANGER \ PRYSM_UDP_PORT CL_QUIC_PORT GRAFANA_PORT SIREN_PORT PROMETHEUS_PORT KEY_API_PORT TRAEFIK_WEB_PORT \ TRAEFIK_WEB_HTTP_PORT CL_REST_PORT EL_RPC_PORT EL_WS_PORT EE_PORT ERIGON_TORRENT_PORT LOG_LEVEL JWT_SECRET \ - EL_EXTRAS CL_EXTRAS VC_EXTRAS ARCHIVE_NODE SSV_P2P_PORT SSV_P2P_PORT_UDP ERIGON_P2P_PORT_2 \ + EL_EXTRAS CL_EXTRAS VC_EXTRAS ARCHIVE_NODE SSV_P2P_PORT SSV_P2P_PORT_UDP OBOL_P2P_PORT ERIGON_P2P_PORT_2 \ ERIGON_P2P_PORT_3 LODESTAR_HEAP SSV_DKG_PORT SIREN_PASSWORD ) - TARGET_VARS=( ETH_DOCKER_TAG NIM_SRC_BUILD_TARGET NIM_SRC_REPO NIM_DOCKER_TAG NIM_DOCKER_VC_TAG NIM_DOCKER_REPO \ + __target_vars=( ETH_DOCKER_TAG NIM_SRC_BUILD_TARGET NIM_SRC_REPO NIM_DOCKER_TAG NIM_DOCKER_VC_TAG NIM_DOCKER_REPO \ NIM_DOCKER_VC_REPO NIM_DOCKERFILE TEKU_SRC_BUILD_TARGET TEKU_SRC_REPO TEKU_DOCKER_TAG TEKU_DOCKER_REPO \ - TEKU_DOCKERFILE LH_SRC_BUILD_TARGET LH_SRC_REPO LH_DOCKER_TAG LH_DOCKER_REPO LH_DOCKERFILE \ - PRYSM_SRC_BUILD_TARGET PRYSM_SRC_REPO PRYSM_DOCKER_TAG PRYSM_DOCKER_VC_TAG PRYSM_DOCKER_CTL_TAG \ + TEKU_DOCKERFILE LH_SRC_BUILD_TARGET LH_SRC_REPO LH_DOCKER_TAG LH_DOCKER_REPO LH_DOCKERFILE SSV_NODE_REPO \ + PRYSM_SRC_BUILD_TARGET PRYSM_SRC_REPO PRYSM_DOCKER_TAG PRYSM_DOCKER_VC_TAG PRYSM_DOCKER_CTL_TAG SSV_DKG_REPO \ PRYSM_DOCKER_REPO PRYSM_DOCKER_VC_REPO PRYSM_DOCKER_CTL_REPO PRYSM_DOCKERFILE ERIGON_SRC_BUILD_TARGET \ ERIGON_SRC_REPO ERIGON_DOCKER_TAG ERIGON_DOCKER_REPO ERIGON_DOCKERFILE MEV_SRC_BUILD_TARGET MEV_SRC_REPO \ MEV_DOCKERFILE MEV_DOCKER_TAG MEV_DOCKER_REPO NIMEL_SRC_BUILD_TARGET NIMEL_SRC_REPO NIMEL_DOCKER_TAG \ NIMEL_DOCKER_REPO NIMEL_DOCKERFILE LS_SRC_BUILD_TARGET LS_SRC_REPO LS_DOCKER_TAG LS_DOCKER_REPO LS_DOCKERFILE \ - GETH_SRC_BUILD_TARGET GETH_SRC_REPO GETH_DOCKER_TAG GETH_DOCKER_REPO TRAEFIK_TAG DDNS_TAG \ - GETH_DOCKERFILE NM_SRC_BUILD_TARGET NM_SRC_REPO NM_DOCKER_TAG NM_DOCKER_REPO NM_DOCKERFILE \ - BESU_SRC_BUILD_TARGET BESU_SRC_REPO BESU_DOCKER_TAG BESU_DOCKER_REPO BESU_DOCKERFILE SSV_NODE_TAG \ + GETH_SRC_BUILD_TARGET GETH_SRC_REPO GETH_DOCKER_TAG GETH_DOCKER_REPO TRAEFIK_TAG DDNS_TAG CB_PBS_DOCKER_TAG \ + GETH_DOCKERFILE NM_SRC_BUILD_TARGET NM_SRC_REPO NM_DOCKER_TAG NM_DOCKER_REPO NM_DOCKERFILE CB_PBS_DOCKER_REPO \ + BESU_SRC_BUILD_TARGET BESU_SRC_REPO BESU_DOCKER_TAG BESU_DOCKER_REPO BESU_DOCKERFILE SSV_NODE_TAG CHARON_VERSION \ DEPCLI_SRC_BUILD_TARGET DEPCLI_SRC_REPO DEPCLI_DOCKER_TAG W3S_DOCKER_TAG W3S_DOCKER_REPO \ PG_DOCKER_TAG RETH_SRC_BUILD_TARGET RETH_SRC_REPO RETH_DOCKER_TAG RETH_DOCKER_REPO RETH_DOCKERFILE \ GRANDINE_SRC_BUILD_TARGET GRANDINE_SRC_REPO GRANDINE_DOCKER_TAG GRANDINE_DOCKER_REPO GRANDINE_DOCKERFILE \ SIREN_DOCKER_TAG SIREN_DOCKER_REPO SSV_DKG_TAG NODE_EXPORTER_IGNORE_MOUNT_REGEX ) - OLD_VARS=( ) - NEW_VARS=( ) + __old_vars=( ) + __new_vars=( ) # Always make sure we have a SIREN password - var="SIREN_PASSWORD" - SIREN_PASSWORD=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="SIREN_PASSWORD" + SIREN_PASSWORD=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ -z "${SIREN_PASSWORD}" ]; then SIREN_PASSWORD=$(head -c 8 /dev/urandom | od -A n -t u8 | tr -d '[:space:]' | sha256sum | head -c 32) - set_value_in_env + __set_value_in_env fi - var=ENV_VERSION - __target_ver=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "default.env" || true) - __source_ver=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var=ENV_VERSION + __target_ver=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "default.env" || true) + __source_ver=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Aggressive prune to work around Docker grabbing old clients. Here so it doesn't get called during config if [[ "${__source_ver}" -lt "9" ]]; then - dodocker system prune --force -a + __dodocker system prune --force -a fi if [[ "${__keep_targets}" -eq 1 && "${__target_ver}" -le "${__source_ver}" ]]; then # No changes in template, do nothing @@ -1005,111 +1084,121 @@ envmigrate() { fi if [ "${__keep_targets}" -eq 0 ]; then - echo "Refreshing build targets in ${ENV_FILE}" + echo "Refreshing build targets in ${__env_file}" else - echo "Migrating ${ENV_FILE} to version ${__target_ver}" + echo "Migrating ${__env_file} to version ${__target_ver}" fi - ${__as_owner} cp "${ENV_FILE}" "${ENV_FILE}".source + + ${__as_owner} cp "${__env_file}" "${__env_file}".source __during_migrate=1 __migrated=1 - ${__as_owner} cp default.env "${ENV_FILE}" + ${__as_owner} cp default.env "${__env_file}" - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}.source" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}.source" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ "${value}" =~ "blox-ssv2.yml" ]]; then - ssv_switch + if [[ "${__value}" =~ "blox-ssv2.yml" ]]; then + __ssv_switch fi # Migrate over user settings - for var in "${ALL_VARS[@]}"; do - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - if [ -n "${value}" ] || [ "${var}" = "GRAFFITI" ] || [ "${var}" = "MEV_RELAYS" ] \ - || [ "${var}" = "ETH_DOCKER_TAG" ] || [ "${var}" = "RAPID_SYNC_URL" ]; then - if [ "${var}" = "COMPOSE_FILE" ]; then - migrate_compose_file + for __var in "${__all_vars[@]}"; do + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}.source" || true) + if [ -n "${__value}" ] || [ "${__var}" = "GRAFFITI" ] || [ "${__var}" = "MEV_RELAYS" ] \ + || [ "${__var}" = "ETH_DOCKER_TAG" ] || [ "${__var}" = "RAPID_SYNC_URL" ]; then + if [ "${__var}" = "COMPOSE_FILE" ]; then + __migrate_compose_file + fi + if [[ "${__source_ver}" -lt "17" && "${__var}" = "IPV6" ]]; then # One-time attempt; remove after Pectra + __enable_v6 + if [ "${__enabled_v6}" -eq 1 ]; then + __value="true" + fi fi - if [ "${var}" = "CL_QUIC_PORT" ]; then - __cl_port=$(sed -n -e "s/^CL_P2P_PORT=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - if [ -n "${__cl_port}" ] && [ "${__cl_port}" = "${value}" ]; then - value=$((value + 1)) - echo "Adjusted CL_QUIC_PORT to ${value} so it does not conflict with CL_P2P_PORT" + if [ "${__var}" = "CL_QUIC_PORT" ]; then + __cl_port=$(sed -n -e "s/^CL_P2P_PORT=\(.*\)/\1/p" "${__env_file}.source" || true) + if [ -n "${__cl_port}" ] && [ "${__cl_port}" = "${__value}" ]; then + __value=$((__value + 1)) + echo "Adjusted CL_QUIC_PORT to ${__value} so it does not conflict with CL_P2P_PORT" fi - __prysm_port=$(sed -n -e "s/^PRYSM_UDP_PORT=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - if [ -n "${__prysm_port}" ] && [ "${__prysm_port}" = "${value}" ]; then # just in case this is one ahead - value=$((value + 1)) - echo "Adjusted CL_QUIC_PORT to ${value} so it does not conflict with PRYSM_UDP_PORT" + __prysm_port=$(sed -n -e "s/^PRYSM_UDP_PORT=\(.*\)/\1/p" "${__env_file}.source" || true) + if [ -n "${__prysm_port}" ] && [ "${__prysm_port}" = "${__value}" ]; then # just in case this is one ahead + __value=$((__value + 1)) + echo "Adjusted CL_QUIC_PORT to ${__value} so it does not conflict with PRYSM_UDP_PORT" fi fi # Literal match intended # shellcheck disable=SC2076 - if [[ "${var}" = "RAPID_SYNC_URL" && "${value}" =~ "eth2-beacon-mainnet.infura.io" ]]; then - value="https://beaconstate.info" + if [[ "${__var}" = "RAPID_SYNC_URL" && "${__value}" =~ "eth2-beacon-mainnet.infura.io" ]]; then + __value="https://beaconstate.info" fi - if [[ "${var}" = "HOST_IP" && "${value: -1}" = ":" ]]; then - value="${value%:}" # Undo Compose V1 accommodation + if [[ "${__var}" = "HOST_IP" && "${__value: -1}" = ":" ]]; then + __value="${__value%:}" # Undo Compose V1 accommodation fi - if [[ "${var}" = "SHARE_IP" && "${value: -1}" = ":" ]]; then - value="${value%:}" # Undo Compose V1 accommodation + if [[ "${__var}" = "SHARE_IP" && "${__value: -1}" = ":" ]]; then + __value="${__value%:}" # Undo Compose V1 accommodation fi # Handle & in GRAFFITI gracefully - sed -i'.original' -e "s~^\(${var}\s*=\s*\).*\$~\1${value//&/\\&}~" "${ENV_FILE}" - else # empty value - if [ "${var}" = "CF_ZONE_ID" ]; then + sed -i'.original' -e "s~^\(${__var}\s*=\s*\).*\$~\1${__value//&/\\&}~" "${__env_file}" + else # empty __value + if [ "${__var}" = "CF_ZONE_ID" ]; then __lookup_cf_zone - if [ -n "${value}" ]; then - sed -i'.original' -e "s~^\(${var}\s*=\s*\).*\$~\1${value//&/\\&}~" "${ENV_FILE}" + if [ -n "${__value}" ]; then + sed -i'.original' -e "s~^\(${__var}\s*=\s*\).*\$~\1${__value//&/\\&}~" "${__env_file}" fi fi fi done if [ "${__keep_targets}" -eq 1 ]; then # Migrate over build targets - for var in "${TARGET_VARS[@]}"; do - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - if [ -n "${value}" ]; then - if [[ "${var}" = "DDNS_TAG" && "${__source_ver}" -lt "8" ]]; then # Switch to ddns-updater - value="v2" + for __var in "${__target_vars[@]}"; do + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}.source" || true) + if [ -n "${__value}" ]; then + if [[ "${__var}" = "DDNS_TAG" && "${__source_ver}" -lt "8" ]]; then # Switch to ddns-updater + __value="v2" fi - if [[ "${var}" = "LH_DOCKER_TAG" && "${value}" = "latest-modern" ]]; then # LH 5.2 ditched latest-modern - value="latest" + if [[ "${__var}" = "LH_DOCKER_TAG" && "${__value}" = "latest-modern" ]]; then # LH 5.2 ditched latest-modern + __value="latest" fi - if [[ "${var}" = "ERIGON_DOCKER_TAG" && "${value}" = "stable" ]]; then # Erigon ditched stable - value="v2.60.1" - fi - sed -i'.original' -e "s~^\(${var}\s*=\s*\).*$~\1${value}~" "${ENV_FILE}" +# if [[ "${__var}" = "ERIGON_DOCKER_TAG" && "${__value}" = "stable" ]]; then # Erigon switched to latest +# __value="latest" +# fi +# if [[ "${__var}" = "ERIGON_DOCKER_REPO" && "${__value}" = "thorax/erigon" ]]; then # Erigon new repo +# __value="erigontech/erigon" +# fi + sed -i'.original' -e "s~^\(${__var}\s*=\s*\).*$~\1${__value}~" "${__env_file}" fi done fi # Move value from old variable name(s) to new one(s) - for index in "${!OLD_VARS[@]}"; do - var=${OLD_VARS[index]} - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - if [ -n "${value}" ]; then - sed -i'.original' -e "s~^\(${NEW_VARS[index]}\s*=\s*\).*$~\1${value}~" "${ENV_FILE}" + for __index in "${!__old_vars[@]}"; do + __var=${__old_vars[__index]} + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}.source" || true) + if [ -n "${__value}" ]; then + sed -i'.original' -e "s~^\(${__new_vars[__index]}\s*=\s*\).*$~\1${__value}~" "${__env_file}" fi done # Check whether we run a CL or VC, if so nag about FEE_RECIPIENT - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # It's CL&VC, CL-only, or VC-only # I do mean to match literally # shellcheck disable=SC2076 - if [[ "${value}" =~ "prysm.yml" || "${value}" =~ "lighthouse.yml" || "${value}" =~ "teku.yml" \ - || "${value}" =~ "nimbus.yml" || "${value}" =~ "lodestar.yml" || "${value}" =~ "-cl-only.yml" \ - || "${value}" =~ "-allin1.yml" || "${value}" =~ "-vc-only.yml" ]]; then + if [[ "${__value}" =~ "prysm.yml" || "${__value}" =~ "lighthouse.yml" || "${__value}" =~ "teku.yml" \ + || "${__value}" =~ "nimbus.yml" || "${__value}" =~ "lodestar.yml" || "${__value}" =~ "-cl-only.yml" \ + || "${__value}" =~ "-allin1.yml" || "${__value}" =~ "-vc-only.yml" ]]; then # Check for rewards - var="FEE_RECIPIENT" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ -z "${value}" || ${value} != 0x* || ${#value} -ne 42 ]]; then + __var="FEE_RECIPIENT" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ -z "${__value}" || ${__value} != 0x* || ${#__value} -ne 42 ]]; then if [ "${__non_interactive:-0}" -eq 0 ]; then whiptail --msgbox "A fee recipient ETH wallet address is required in order to start the client. This is \ for priority fees and, optionally, MEV. Please enter a valid ETH address in the next screen. Refer to \ Eth Docker docs (https://ethdocker.com/About/Rewards) for more information.\n\nCAUTION: \"$__me up\" will fail if no \ valid address is set" 12 75 - query_coinbase - set_value_in_env + __query_coinbase + __set_value_in_env else echo "A fee recipient ETH wallet address is required in order to start the client. Please set one in \".env\"." echo "CAUTION: \"$__me up\" will fail if no valid address is set." @@ -1118,53 +1207,56 @@ envmigrate() { fi # User signals it's a distributed setup and not to nag - var="DISTRIBUTED" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ "${value}" = "true" || "${__non_interactive:-0}" -eq 1 ]]; then - ${__as_owner} rm "${ENV_FILE}".original + __var="DISTRIBUTED" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ "${__value}" = "true" || "${__non_interactive:-0}" -eq 1 ]]; then + ${__as_owner} rm "${__env_file}".original __during_migrate=0 return 0 fi # Check for CL and EL, nag if we have only one without the other - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Case 1 ... CL, do we have an EL? # I do mean to match literally # shellcheck disable=SC2076 - if [[ "${value}" =~ "prysm.yml" || "${value}" =~ "lighthouse.yml" || "${value}" =~ "teku.yml" \ - || "${value}" =~ "nimbus.yml" || "${value}" =~ "lodestar.yml" || "${value}" =~ "-cl-only.yml" \ - || "${value}" =~ "-allin1.yml" ]]; then - if [[ ! "${value}" =~ "geth.yml" && ! "${value}" =~ "besu.yml" && ! "${value}" =~ "erigon.yml" \ - && ! "${value}" =~ "nethermind.yml" && ! "${value}" =~ "nimbus-el.yml" \ - && ! "${value}" =~ "reth.yml" ]]; then + if [[ "${__value}" =~ "prysm.yml" || "${__value}" =~ "lighthouse.yml" || "${__value}" =~ "teku.yml" \ + || "${__value}" =~ "nimbus.yml" || "${__value}" =~ "lodestar.yml" || "${__value}" =~ "-cl-only.yml" \ + || "${__value}" =~ "-allin1.yml" ]]; then + if [[ ! "${__value}" =~ "geth.yml" && ! "${__value}" =~ "besu.yml" && ! "${__value}" =~ "erigon.yml" \ + && ! "${__value}" =~ "nethermind.yml" && ! "${__value}" =~ "nimbus-el.yml" \ + && ! "${__value}" =~ "reth.yml" ]]; then whiptail --msgbox "An Execution Layer client is required alongside your Consensus Layer client since \ Ethereum Merge.\n\nIf you run a distributed setup, you can shut off this nag screen by setting DISTRIBUTED=true in \ -${ENV_FILE}" 12 75 +${__env_file}" 12 75 fi # Case 2 ... EL, do we have a CL? - elif [[ "${value}" =~ "geth.yml" || "${value}" =~ "besu.yml" || "${value}" =~ "erigon.yml" \ - || "${value}" =~ "nethermind.yml" || "${value}" =~ "nimbus-el.yml" || "${value}" =~ "reth.yml" ]]; then - if [[ ! "${value}" =~ "prysm.yml" && ! "${value}" =~ "lighthouse.yml" && ! "${value}" =~ "teku.yml" \ - && ! "${value}" =~ "nimbus.yml" && ! "${value}" =~ "lodestar.yml" && ! "${value}" =~ "-cl-only.yml" \ - && ! "${value}" =~ "-allin1.yml" ]]; then + elif [[ "${__value}" =~ "geth.yml" || "${__value}" =~ "besu.yml" || "${__value}" =~ "erigon.yml" \ + || "${__value}" =~ "nethermind.yml" || "${__value}" =~ "nimbus-el.yml" || "${__value}" =~ "reth.yml" ]]; then + if [[ ! "${__value}" =~ "prysm.yml" && ! "${__value}" =~ "lighthouse.yml" && ! "${__value}" =~ "teku.yml" \ + && ! "${__value}" =~ "nimbus.yml" && ! "${__value}" =~ "lodestar.yml" && ! "${__value}" =~ "-cl-only.yml" \ + && ! "${__value}" =~ "-allin1.yml" ]]; then whiptail --msgbox "A Consensus Layer client is required alongside your Execution Layer client since \ Ethereum Merge.\n\nIf you run a distributed setup, you can shut off this nag screen by setting DISTRIBUTED=true in \ -${ENV_FILE}" 12 75 +${__env_file}" 12 75 fi fi - ${__as_owner} rm "${ENV_FILE}".original + ${__as_owner} rm "${__env_file}".original __during_migrate=0 - echo "${ENV_FILE} updated successfully" + echo "${__env_file} updated successfully" } -nag_os_version() { +__nag_os_version() { if [[ "$__distro" = "ubuntu" ]]; then if [ "${__os_major_version}" -lt 22 ]; then echo echo "Ubuntu ${__os_major_version} is older than the recommended 24.04 or 22.04 version." echo + echo "Updating is neither urgent nor required, merely recommended." + echo + echo "Guide to upgrading to 24.04: https://gist.github.com/yorickdowne/94f1e5538007f4c9d3da7b22b0dc28a4" fi fi @@ -1173,16 +1265,19 @@ nag_os_version() { echo echo "Debian ${__os_major_version} is older than the recommended 12 or 11 version." echo + echo "Updating is neither urgent nor required, merely recommended." + echo + echo "Guide to upgrading to 12: https://gist.github.com/yorickdowne/ec9e2c6f4f8a2ee93193469d285cd54c" fi fi } -pull_and_build() { - dodocker system prune --force - docompose --profile tools pull - source_build - docompose --profile tools build --pull +__pull_and_build() { + __dodocker system prune --force + __docompose --profile tools pull + __source_build + __docompose --profile tools build --pull } @@ -1190,6 +1285,7 @@ pull_and_build() { # shellcheck disable=SC2120 update() { __during_update=1 + __enabled_v6=0 # Remove after Pectra if [[ $(${__as_owner} git status --porcelain) ]]; then __dirty=1 @@ -1199,8 +1295,8 @@ update() { __free_space=$(df -P "$(pwd)" | awk '/[0-9]%/{print $(NF-2)}') - re='^[0-9]+$' - if ! [[ "${__free_space}" =~ $re ]] ; then + __regex='^[0-9]+$' + if ! [[ "${__free_space}" =~ $__regex ]] ; then echo "Unable to determine free disk space. This is likely a bug." echo "df reports $(df -P "$(pwd)") and __free_space is ${__free_space}" exit 70 @@ -1220,9 +1316,9 @@ update() { if [ -z "${ETHDSECUNDO-}" ]; then set +e ${__as_owner} git config pull.rebase false - var="ETH_DOCKER_TAG" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ -z "${value}" ] || [ "${value}" = "latest" ]; then + __var="ETH_DOCKER_TAG" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ -z "${__value}" ] || [ "${__value}" = "latest" ]; then export ETHDPINNED="" __branch=$(git rev-parse --abbrev-ref HEAD) if [[ "${__branch}" =~ ^tag-v[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then @@ -1240,9 +1336,9 @@ update() { ${__as_owner} git update-index --assume-unchanged ext-network.yml fi else - export ETHDPINNED="${value}" + export ETHDPINNED="${__value}" ${__as_owner} git fetch --tags - ${__as_owner} git checkout -B "tag-${value}" "tags/${value}" + ${__as_owner} git checkout -B "tag-${__value}" "tags/${__value}" fi export GITEXITCODE=$? set -e @@ -1292,38 +1388,43 @@ update() { __non_interactive=1 fi -# envmigrate used to be called w/ arguments and checks for that +# __env_migrate used to be called w/ arguments and checks for that # shellcheck disable=SC2119 - envmigrate - pull_and_build + __env_migrate + __pull_and_build - delete_erigon - delete_reth - upgrade_postgres + __delete_erigon + __delete_reth + __upgrade_postgres echo - if [ "${__migrated}" -eq 1 ] && ! cmp -s "${ENV_FILE}" "${ENV_FILE}".source; then - ${__as_owner} cp "${ENV_FILE}".source "${ENV_FILE}".bak - ${__as_owner} rm "${ENV_FILE}".source - echo "Your ${ENV_FILE} configuration settings have been migrated to a fresh copy. You can \ -find the original contents in ${ENV_FILE}.bak." + if [ "${__migrated}" -eq 1 ] && ! cmp -s "${__env_file}" "${__env_file}".source; then + ${__as_owner} cp "${__env_file}".source "${__env_file}".bak + ${__as_owner} rm "${__env_file}".source + echo "Your ${__env_file} configuration settings have been migrated to a fresh copy. You can \ +find the original contents in ${__env_file}.bak." if [ "${__keep_targets}" -eq 0 ]; then echo "NB: If you made changes to the source or binary build targets, these have been \ reset to defaults." fi echo - echo "List of changes made to ${ENV_FILE} during migration - current on left, original on right:" + echo "List of changes made to ${__env_file} during migration - current on left, original on right:" echo - diff -y --suppress-common-lines "${ENV_FILE}" "${ENV_FILE}".bak || true + diff -y --suppress-common-lines "${__env_file}" "${__env_file}".bak || true else - echo "No changes made to ${ENV_FILE} during update" - if [ -f "${ENV_FILE}".source ]; then - ${__as_owner} rm "${ENV_FILE}".source || true + echo "No changes made to ${__env_file} during update" + if [ -f "${__env_file}".source ]; then + ${__as_owner} rm "${__env_file}".source || true fi fi echo if [ -z "${GITEXITCODE+x}" ] || [ "${GITEXITCODE}" -eq 0 ]; then - echo "An \"$__me up\" command will start using the new images and configuration." + if [ "${__enabled_v6}" -eq 0 ]; then # Remove after Pectra + echo "An \"$__me up\" command will start using the new images and configuration." + else + echo "IPv4/6 dual-stack support has been enabled." + echo "An \"$__me restart\" command will start using the new images and configuration." + fi else echo "WARNING" echo @@ -1334,7 +1435,7 @@ reset to defaults." echo "The current partial update risks startup failure." fi - nag_os_version + __nag_os_version unset ETHDSECUNDO unset GITEXITCODE @@ -1358,34 +1459,48 @@ reset to defaults." resync-execution() { # Check for EL client - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) - case "${value}" in + case "${__value}" in *erigon.yml* ) __el_volume='erigon-el-data'; __el_client="erigon";; - *geth.yml* ) __el_volume='geth-eth1-data'; __el_client="geth";; + *geth.yml* ) __el_volume='geth-el-data'; __el_client="geth";; *reth.yml* ) __el_volume='reth-el-data'; __el_client="reth";; - *besu.yml* ) __el_volume='besu-eth1-data'; __el_client="besu";; - *nethermind.yml* ) __el_volume='nm-eth1-data'; __el_client="nethermind";; + *besu.yml* ) __el_volume='besu-el-data'; __el_client="besu";; + *nethermind.yml* ) __el_volume='nethermind-el-data'; __el_client="nethermind";; * ) echo "You do not appear to be running an execution layer client. Nothing to do."; return 0;; esac - if ! dodocker volume ls -q | grep -q "$(basename "$(realpath .)")[_-]${__el_volume}"; then + if ! __dodocker volume ls -q | grep -q "$(basename "$(realpath .)")[_-]${__el_volume}"; then echo "Did not find Docker volume for ${__el_client}. Nothing to do." return 0 fi echo "This will stop ${__el_client} and delete its database to force a resync." - read -rp "WARNING - resync may take days. Do you wish to continue? (No/yes) " yn - case $yn in + read -rp "WARNING - resync may take days. Do you wish to continue? (No/yes) " __yn + case $__yn in [Yy][Ee][Ss] ) ;; * ) echo "Aborting."; exit 130;; esac __el_volume="$(basename "$(realpath .)")_${__el_volume}" echo "Stopping ${__el_client} container" - docompose stop execution && docompose rm -f execution - dodocker volume rm "$(dodocker volume ls -q -f "name=${__el_volume}")" + __docompose stop execution && __docompose rm -f execution + __dodocker volume rm "$(__dodocker volume ls -q -f "name=${__el_volume}")" + __volume_id="" + if [[ "${__el_volume}" =~ geth-el-data ]]; then + __legacy_volume="$(basename "$(realpath .)")_geth-eth1-data" + __volume_id="$(__dodocker volume ls -q -f "name=${__legacy_volume}")" + elif [[ "${__el_volume}" =~ besu-el-data ]]; then + __legacy_volume="$(basename "$(realpath .)")_besu-eth1-data" + __volume_id="$(__dodocker volume ls -q -f "name=${__legacy_volume}")" + elif [[ "${__el_volume}" =~ nethermind-el-data ]]; then + __legacy_volume="$(basename "$(realpath .)")_nm-eth1-data" + __volume_id="$(__dodocker volume ls -q -f "name=${__legacy_volume}")" + fi + if [ -n "${__volume_id}" ]; then + __dodocker volume rm "${__volume_id}" + fi echo echo "${__el_client} stopped and database deleted." echo @@ -1396,10 +1511,10 @@ resync-execution() { resync-consensus() { # Check for CL client - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) - case "${value}" in + case "${__value}" in *lighthouse.yml* | *lighthouse-cl-only.yml* ) __cl_volume='lhconsensus-data'; __cl_client="lighthouse";; *teku-allin1.yml* ) __cl_volume='wipe-db'; __cl_client="teku";; *teku.yml* | *teku-cl-only.yml* ) __cl_volume='tekuconsensus-data'; __cl_client="teku";; @@ -1412,43 +1527,43 @@ resync-consensus() { * ) echo "You do not appear to be running a consensus layer client. Nothing to do."; return;; esac - if [ ! "${__cl_volume}" = "wipe-db" ] && ! dodocker volume ls -q \ + if [ ! "${__cl_volume}" = "wipe-db" ] && ! __dodocker volume ls -q \ | grep -q "$(basename "$(realpath .)")[_-]${__cl_volume}"; then echo "Did not find Docker volume for ${__cl_client}. Nothing to do." return 0 fi # Can we checkpoint sync? - var="RAPID_SYNC_URL" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="RAPID_SYNC_URL" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) echo "This will stop ${__cl_client} and delete its database to force a resync." - if [ -z "${value}" ]; then - read -rp "WARNING - RAPID_SYNC_URL not set, resync may take days. Do you wish to continue? (No/yes) " yn + if [ -z "${__value}" ]; then + read -rp "WARNING - RAPID_SYNC_URL not set, resync may take days. Do you wish to continue? (No/yes) " __yn else - read -rp "RAPID_SYNC_URL set, resync should finish in minutes. Do you wish to continue? (No/yes) " yn + read -rp "RAPID_SYNC_URL set, resync should finish in minutes. Do you wish to continue? (No/yes) " __yn fi - case $yn in + case $__yn in [Yy][Ee][Ss] ) ;; * ) echo "Aborting."; exit 130;; esac echo "Stopping ${__cl_client} container" - docompose stop consensus && docompose rm -f consensus + __docompose stop consensus && __docompose rm -f consensus if [ "${__cl_volume}" = "wipe-db" ]; then - docompose run --rm wipe-db + __docompose run --rm wipe-db else __cl_volume="$(basename "$(realpath .)")_${__cl_volume}" - dodocker volume rm "$(dodocker volume ls -q -f "name=${__cl_volume}")" + __dodocker volume rm "$(__dodocker volume ls -q -f "name=${__cl_volume}")" __volume_id="" if [[ "${__cl_volume}" =~ lhconsensus-data ]]; then __legacy_volume="$(basename "$(realpath .)")_lhbeacon-data" - __volume_id="$(dodocker volume ls -q -f "name=${__legacy_volume}")" + __volume_id="$(__dodocker volume ls -q -f "name=${__legacy_volume}")" elif [[ "${__cl_volume}" =~ prysmconsensus-data ]]; then __legacy_volume="$(basename "$(realpath .)")_prysmbeacon-data" - __volume_id="$(dodocker volume ls -q -f "name=${__legacy_volume}")" + __volume_id="$(__dodocker volume ls -q -f "name=${__legacy_volume}")" fi if [ -n "${__volume_id}" ]; then - dodocker volume rm "${__volume_id}" + __dodocker volume rm "${__volume_id}" fi fi echo @@ -1460,17 +1575,24 @@ resync-consensus() { attach-geth() { - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - if ! grep -q '^COMPOSE_FILE=.*geth\.yml' "${ENV_FILE}" 2>/dev/null ; then + if ! grep -q '^COMPOSE_FILE=.*geth\.yml' "${__env_file}" 2>/dev/null ; then echo "You do not appear to be using Geth, aborting." exit 1 fi + __legacy_datadir=$(__dodocker run --rm -v "$(__dodocker volume ls -q -f \ + "name=$(basename "$(realpath .)")[_-]geth-eth1-data")":"/var/lib/goethereum" \ + alpine:3 sh -c 'if [ -d "/var/lib/goethereum/geth/chaindata" ]; then echo true; else echo false; fi') - docompose exec -it execution bash -c "geth attach /var/lib/goethereum/geth.ipc" + if [ "${__legacy_datadir}" = "true" ]; then + __docompose exec -it execution bash -c "geth attach /var/lib/goethereum/geth.ipc" + else + __docompose exec -it execution bash -c "geth attach /var/lib/geth/geth.ipc" + fi } @@ -1496,48 +1618,48 @@ prune-besu() { __non_interactive=1 fi - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - if ! grep -q '^COMPOSE_FILE=.*besu\.yml' "${ENV_FILE}" 2>/dev/null ; then + if ! grep -q '^COMPOSE_FILE=.*besu\.yml' "${__env_file}" 2>/dev/null ; then echo "You do not appear to be using Besu, aborting." exit 1 fi # Check for archive node - var="ARCHIVE_NODE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ "${value}" = "true" ]]; then + __var="ARCHIVE_NODE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ "${__value}" = "true" ]]; then echo "Besu is an archive node: Aborting." exit 1 fi - rpc_line=$(grep '^EL_RPC_PORT=' "${ENV_FILE}") - regex='^EL_RPC_PORT=([0-9]+)' - if [[ ! "${rpc_line}" =~ ${regex} ]]; then + __rpc_line=$(grep '^EL_RPC_PORT=' "${__env_file}") + __regex='^EL_RPC_PORT=([0-9]+)' + if [[ ! "${__rpc_line}" =~ ${__regex} ]]; then echo "Unable to determine EL_RPC_PORT, aborting." exit 1 else - rpc_port="${BASH_REMATCH[1]}" + __rpc_port="${BASH_REMATCH[1]}" fi set +e - sync_status=$(docompose exec -T execution wget -qO- "http://localhost:$rpc_port" \ + __sync_status=$(__docompose exec -T execution wget -qO- "http://localhost:$__rpc_port" \ --header 'Content-Type: application/json' --post-data '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}') - exitstatus=$? + __exitstatus=$? set -e - if [ $exitstatus -ne 0 ]; then + if [ $__exitstatus -ne 0 ]; then echo "Unable to connect to Besu: Is it running?" - echo "Output: ${sync_status}" + echo "Output: ${__sync_status}" echo "Aborting." exit 1 fi - if [[ ! "${sync_status}" =~ "false" ]]; then + if [[ ! "${__sync_status}" =~ "false" ]]; then echo "Besu is not done syncing yet. Sync status:" - echo "${sync_status}" + echo "${__sync_status}" echo echo "Aborting." exit 1 @@ -1545,8 +1667,8 @@ prune-besu() { if [ $__non_interactive = 0 ]; then while true; do - read -rp "WARNING - this will stop Besu and prune its trie-logs. Do you wish to continue? (No/Yes) " yn - case $yn in + read -rp "WARNING - this will stop Besu and prune its trie-logs. Do you wish to continue? (No/Yes) " __yn + case $__yn in [Yy][Ee][Ss] ) break;; * ) echo "Aborting, no changes made"; exit 130;; esac @@ -1556,8 +1678,8 @@ prune-besu() { echo echo "Starting Besu prune" echo - docompose run --rm set-prune-marker "touch /var/lib/besu/prune-marker" - docompose stop execution && docompose rm -f execution + __docompose run --rm set-prune-marker "touch /var/lib/besu/prune-marker" + __docompose stop execution && __docompose rm -f execution start echo echo "Prune is running, you can observe it with '$__me logs -f execution'" @@ -1589,48 +1711,48 @@ prune-reth() { __non_interactive=1 fi - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - if ! grep -q '^COMPOSE_FILE=.*reth\.yml' "${ENV_FILE}" 2>/dev/null ; then + if ! grep -q '^COMPOSE_FILE=.*reth\.yml' "${__env_file}" 2>/dev/null ; then echo "You do not appear to be using Reth, aborting." exit 1 fi # Check for archive node - var="ARCHIVE_NODE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ "${value}" = "true" ]]; then + __var="ARCHIVE_NODE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ "${__value}" = "true" ]]; then echo "Reth is an archive node: Aborting." exit 1 fi - rpc_line=$(grep '^EL_RPC_PORT=' "${ENV_FILE}") - regex='^EL_RPC_PORT=([0-9]+)' - if [[ ! "${rpc_line}" =~ ${regex} ]]; then + __rpc_line=$(grep '^EL_RPC_PORT=' "${__env_file}") + __regex='^EL_RPC_PORT=([0-9]+)' + if [[ ! "${__rpc_line}" =~ ${__regex} ]]; then echo "Unable to determine EL_RPC_PORT, aborting." exit 1 else - rpc_port="${BASH_REMATCH[1]}" + __rpc_port="${BASH_REMATCH[1]}" fi set +e - sync_status=$(docompose exec -T execution wget -qO- "http://localhost:$rpc_port" \ + __sync_status=$(__docompose exec -T execution wget -qO- "http://localhost:$__rpc_port" \ --header 'Content-Type: application/json' --post-data '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}') - exitstatus=$? + __exitstatus=$? set -e - if [ $exitstatus -ne 0 ]; then + if [ $__exitstatus -ne 0 ]; then echo "Unable to connect to Reth: Is it running?" - echo "Output: ${sync_status}" + echo "Output: ${__sync_status}" echo "Aborting." exit 1 fi - if [[ ! "${sync_status}" =~ "false" ]]; then + if [[ ! "${__sync_status}" =~ "false" ]]; then echo "Reth is not done syncing yet. Sync status:" - echo "${sync_status}" + echo "${__sync_status}" echo echo "Aborting." exit 1 @@ -1638,8 +1760,8 @@ prune-reth() { if [ $__non_interactive = 0 ]; then while true; do - read -rp "WARNING - this will stop Reth and prune its database. Do you wish to continue? (No/Yes) " yn - case $yn in + read -rp "WARNING - this will stop Reth and prune its database. Do you wish to continue? (No/Yes) " __yn + case $__yn in [Yy][Ee][Ss] ) break;; * ) echo "Aborting, no changes made"; exit 130;; esac @@ -1649,8 +1771,8 @@ prune-reth() { echo echo "Starting Reth prune" echo - docompose run --rm set-prune-marker "touch /var/lib/reth/prune-marker" - docompose stop execution && docompose rm -f execution + __docompose run --rm set-prune-marker "touch /var/lib/reth/prune-marker" + __docompose stop execution && __docompose rm -f execution start echo echo "Prune is running, you can observe it with '$__me logs -f execution'" @@ -1682,28 +1804,28 @@ prune-nethermind() { __non_interactive=1 fi - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - if ! grep -q '^COMPOSE_FILE=.*nethermind\.yml' "${ENV_FILE}" 2>/dev/null ; then + if ! grep -q '^COMPOSE_FILE=.*nethermind\.yml' "${__env_file}" 2>/dev/null ; then echo "You do not appear to be using Nethermind, aborting." exit 1 fi # Check for archive node - var="ARCHIVE_NODE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ "${value}" = "true" ]]; then + __var="ARCHIVE_NODE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ "${__value}" = "true" ]]; then echo "Nethermind is an archive node: Aborting." exit 1 fi __get_docker_free_space - var="NETWORK" - NETWORK=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="NETWORK" + NETWORK=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ -n "$(dodocker run --rm -v "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]nm-eth1-data")":"/var/lib/nethermind" \ alpine:3 ls "/var/lib/nethermind/nethermind_db/${NETWORK}/pathState")" ]; then @@ -1728,52 +1850,52 @@ prune-nethermind() { exit 1 fi - rpc_line=$(grep '^EL_RPC_PORT=' "${ENV_FILE}") - regex='^EL_RPC_PORT=([0-9]+)' - if [[ ! "${rpc_line}" =~ ${regex} ]]; then + __rpc_line=$(grep '^EL_RPC_PORT=' "${__env_file}") + __regex='^EL_RPC_PORT=([0-9]+)' + if [[ ! "${__rpc_line}" =~ ${__regex} ]]; then echo "Unable to determine EL_RPC_PORT, aborting." exit 1 else - rpc_port="${BASH_REMATCH[1]}" + __rpc_port="${BASH_REMATCH[1]}" fi set +e - sync_status=$(docompose exec -T execution wget -qO- "http://localhost:$rpc_port" --header \ + __sync_status=$(__docompose exec -T execution wget -qO- "http://localhost:$__rpc_port" --header \ 'Content-Type: application/json' --post-data '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}') - exitstatus=$? + __exitstatus=$? set -e - if [ $exitstatus -ne 0 ]; then + if [ $__exitstatus -ne 0 ]; then echo "Unable to connect to Nethermind: Is it running?" - echo "Output: ${sync_status}" + echo "Output: ${__sync_status}" echo "Aborting." exit 1 fi - if [[ ! "${sync_status}" =~ "false" ]]; then + if [[ ! "${__sync_status}" =~ "false" ]]; then echo "Nethermind is not done syncing yet. Sync status:" - echo "${sync_status}" + echo "${__sync_status}" echo echo "Aborting." exit 1 fi - var="AUTOPRUNE_NM" - auto_prune=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="AUTOPRUNE_NM" + __auto_prune=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ $__non_interactive = 0 ]; then while true; do - if [ "${auto_prune}" = true ]; then + if [ "${__auto_prune}" = true ]; then if [ "${NETWORK}" = "mainnet" ] || [ "${NETWORK}" = "gnosis" ]; then - threshold="350" + __threshold="350" else - threshold="50" + __threshold="50" fi - echo "Nethermind should auto-prune below ${threshold} GiB free. Check logs with \"$__me logs -f --tail 500 \ + echo "Nethermind should auto-prune below ${__threshold} GiB free. Check logs with \"$__me logs -f --tail 500 \ execution | grep Full\" to see whether it is." fi echo "Instead of pruning, consider resyncing Nethermind to use path-based storage, with \"$__me resync-execution\"." - read -rp "WARNING - this will prune Nethermind's database in the background. Do you wish to continue? (No/Yes) " yn - case $yn in + read -rp "WARNING - this will prune Nethermind's database in the background. Do you wish to continue? (No/Yes) " __yn + case $__yn in [Yy][Ee][Ss] ) break;; * ) echo "Aborting, no changes made"; exit 130;; esac @@ -1785,21 +1907,21 @@ execution | grep Full\" to see whether it is." echo set +e - prune_result=$(docompose exec -T execution wget -qO- "http://localhost:1337" --header \ + __prune_result=$(__docompose exec -T execution wget -qO- "http://localhost:1337" --header \ 'Content-Type: application/json' --post-data '{"jsonrpc":"2.0","method":"admin_prune","params":[],"id":1}') - exitstatus=$? + __exitstatus=$? set -e - if [ $exitstatus -ne 0 ]; then - echo "Unable to start prune, error code ${exitstatus}. This is likely a bug." - echo "An attempt to run it returned this: ${prune_result}" + if [ $__exitstatus -ne 0 ]; then + echo "Unable to start prune, error code ${__exitstatus}. This is likely a bug." + echo "An attempt to run it returned this: ${__prune_result}" # shellcheck disable=SC2028 echo 'The command attempted was: docker compose run --rm set-prune-marker "curl -s \ --data {\\\"method\\\":\\\"admin_prune\\\",\\\"params\\\":[],\\\"id\\\":1,\\\"jsonrpc\\\":\\\"2.0\\\"} \ -H Content-Type:\ application/json http://execution:8545"' - exit ${exitstatus} + exit ${__exitstatus} fi - echo "Nethermind returns ${prune_result}" - if [[ ! "${prune_result}" =~ [Ss]tarting ]]; then + echo "Nethermind returns ${__prune_result}" + if [[ ! "${__prune_result}" =~ [Ss]tarting ]]; then echo "Unable to start prune. This is likely a bug." exit 70 fi @@ -1835,51 +1957,51 @@ prune-lighthouse() { __non_interactive=1 fi - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "lighthouse.yml" && ! "${value}" =~ "lighthouse-cl-only.yml" ]]; then + if [[ ! "${__value}" =~ "lighthouse.yml" && ! "${__value}" =~ "lighthouse-cl-only.yml" ]]; then echo "You do not appear to be using Lighthouse, aborting." exit 1 fi # Check for archive node - var="ARCHIVE_NODE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ "${value}" = "true" ]]; then + __var="ARCHIVE_NODE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ "${__value}" = "true" ]]; then echo "Lighthouse is an archive node: Aborting." exit 1 fi - rpc_line=$(grep '^CL_REST_PORT=' "${ENV_FILE}") - regex='^CL_REST_PORT=([0-9]+)' - if [[ ! "${rpc_line}" =~ ${regex} ]]; then + __rpc_line=$(grep '^CL_REST_PORT=' "${__env_file}") + __regex='^CL_REST_PORT=([0-9]+)' + if [[ ! "${__rpc_line}" =~ ${__regex} ]]; then echo "Unable to determine CL_REST_PORT, aborting." exit 1 else - rpc_port="${BASH_REMATCH[1]}" + __rpc_port="${BASH_REMATCH[1]}" fi set +e - sync_status=$(docompose exec -T consensus wget -qO- "http://localhost:$rpc_port/eth/v1/node/syncing") - exitstatus=$? + __sync_status=$(__docompose exec -T consensus wget -qO- "http://localhost:$__rpc_port/eth/v1/node/syncing") + __exitstatus=$? set -e - if [ $exitstatus -ne 0 ]; then + if [ $__exitstatus -ne 0 ]; then echo "Unable to connect to Lighthouse: Is it running?" - echo "Output: ${sync_status}" + echo "Output: ${__sync_status}" echo "Aborting." exit 1 fi - if [[ "${sync_status}" =~ "true" ]]; then # Avoid jq - if el_offline or is_optimistic or is_syncing, don't proceed + if [[ "${__sync_status}" =~ "true" ]]; then # Avoid jq - if el_offline or is_optimistic or is_syncing, don't proceed echo "Lighthouse is not done syncing yet. Sync status:" - echo "${sync_status}" + echo "${__sync_status}" echo echo "Aborting." exit 1 @@ -1887,8 +2009,8 @@ prune-lighthouse() { if [ $__non_interactive = 0 ]; then while true; do - read -rp "WARNING - this will stop Lighthouse and prune its state. Do you wish to continue? (No/Yes) " yn - case $yn in + read -rp "WARNING - this will stop Lighthouse and prune its state. Do you wish to continue? (No/Yes) " __yn + case $__yn in [Yy][Ee][Ss] ) break;; * ) echo "Aborting, no changes made"; exit 130;; esac @@ -1898,8 +2020,8 @@ prune-lighthouse() { echo echo "Starting Lighthouse prune" echo - docompose run --rm set-cl-prune-marker "touch /var/lib/lighthouse/beacon/prune-marker" - docompose stop consensus && docompose rm -f consensus + __docompose run --rm set-cl-prune-marker "touch /var/lib/lighthouse/beacon/prune-marker" + __docompose stop consensus && __docompose rm -f consensus start echo echo "Prune is running, you can observe it with '$__me logs -f consensus'" @@ -1909,19 +2031,19 @@ prune-lighthouse() { } -prep-keyimport() { - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." +__prep-keyimport() { + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "prysm.yml" ]] && [[ ! "${value}" =~ "lighthouse.yml" ]] && [[ ! "${value}" =~ "teku.yml" ]] \ - && [[ ! "${value}" =~ "nimbus.yml" ]] && [[ ! "${value}" =~ "lodestar.yml" ]] && \ - [[ ! "${value}" =~ "-allin1.yml" ]] && [[ ! "${value}" =~ "vc-only.yml" ]]; then + if [[ ! "${__value}" =~ "prysm.yml" ]] && [[ ! "${__value}" =~ "lighthouse.yml" ]] && [[ ! "${__value}" =~ "teku.yml" ]] \ + && [[ ! "${__value}" =~ "nimbus.yml" ]] && [[ ! "${__value}" =~ "lodestar.yml" ]] && \ + [[ ! "${__value}" =~ "-allin1.yml" ]] && [[ ! "${__value}" =~ "vc-only.yml" ]]; then echo "You do not appear to be running a validator client. Aborting." exit 1 fi @@ -1949,18 +2071,18 @@ prep-keyimport() { continue fi IFS=$'\n' - files=$(find "$2" -maxdepth 1 -name '*.json') + __files=$(find "$2" -maxdepth 1 -name '*.json') # Unset restores default unset IFS - if [ -z "$files" ]; then + if [ -z "$__files" ]; then echo "No .json files found in $2, aborting" exit 1 fi IFS=$'\n' - files=$(find ./.eth/validator_keys -maxdepth 1 -name '*.json') + __files=$(find ./.eth/validator_keys -maxdepth 1 -name '*.json') # Unset restores default unset IFS - if [ -n "$files" ]; then + if [ -n "$__files" ]; then ${__as_owner} mkdir -p ./.eth/validator_keys/keybackup ${__as_owner} mv -uf ./.eth/validator_keys/*.json ./.eth/validator_keys/keybackup ${__as_owner} rm -f ./.eth/validator_keys/*.json @@ -1994,70 +2116,70 @@ prep-keyimport() { __i_haz_ethdo() { - if [ ! -f "${ENV_FILE}" ]; then + if [ ! -f "${__env_file}" ]; then echo "${__project_name} has not been configured. Please run $__me config first." exit 0 fi - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "ethdo.yml" ]]; then - echo "Please edit the ${ENV_FILE} file and make sure \":ethdo.yml\" is added to the \"COMPOSE_FILE\" line" - echo "For example, \"nano ${ENV_FILE}\" will open the nano text editor with the \"${ENV_FILE}\" file loaded." + if [[ ! "${__value}" =~ "ethdo.yml" ]]; then + echo "Please edit the ${__env_file} file and make sure \":ethdo.yml\" is added to the \"COMPOSE_FILE\" line" + echo "For example, \"nano ${__env_file}\" will open the nano text editor with the \"${__env_file}\" file loaded." echo "Without it, this step cannot be run" echo - read -rp "Do you want me to make this change for you? (n/y)" yn - case $yn in + read -rp "Do you want me to make this change for you? (n/y)" __yn + case $__yn in [Yy] );; * ) exit 130;; esac - if [ -n "${value}" ]; then - COMPOSE_FILE="${value}:ethdo.yml" + if [ -n "${__value}" ]; then + COMPOSE_FILE="${__value}:ethdo.yml" else COMPOSE_FILE="ethdo.yml" - echo "You do not have a CL in ${__project_name}. Please make sure CL_NODE in ${ENV_FILE} points at an available one" + echo "You do not have a CL in ${__project_name}. Please make sure CL_NODE in ${__env_file} points at an available one" fi - set_value_in_env + __set_value_in_env echo "Your COMPOSE_FILE now reads ${COMPOSE_FILE}" fi } __i_haz_web3signer() { - if [ ! -f "${ENV_FILE}" ]; then + if [ ! -f "${__env_file}" ]; then echo "${__project_name} has not been configured. Please run $__me config first." exit 0 fi - var="WEB3SIGNER" - __w3s=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="WEB3SIGNER" + __w3s=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ ! "${__w3s}" = "true" ]; then return 0 fi - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "web3signer.yml" ]]; then - echo "WEB3SIGNER=true in ${ENV_FILE}, but web3signer.yml is not in use" - echo "Please edit the ${ENV_FILE} file and make sure \":web3signer.yml\" is added to the \"COMPOSE_FILE\" line" - echo "For example, \"nano ${ENV_FILE}\" will open the nano text editor with the \"${ENV_FILE}\" file loaded." + if [[ ! "${__value}" =~ "web3signer.yml" ]]; then + echo "WEB3SIGNER=true in ${__env_file}, but web3signer.yml is not in use" + echo "Please edit the ${__env_file} file and make sure \":web3signer.yml\" is added to the \"COMPOSE_FILE\" line" + echo "For example, \"nano ${__env_file}\" will open the nano text editor with the \"${__env_file}\" file loaded." echo "Without it, $__me keys cannot be run" echo - read -rp "Do you want me to make this change for you? (n/y)" yn - case $yn in + read -rp "Do you want me to make this change for you? (n/y)" __yn + case $__yn in [Yy] );; * ) exit 130;; esac - if [ -n "${value}" ]; then - COMPOSE_FILE="${value}:web3signer.yml" + if [ -n "${__value}" ]; then + COMPOSE_FILE="${__value}:web3signer.yml" else echo "You do not have a validator client in ${__project_name}. web3signer cannot be used without one." exit 1 fi - set_value_in_env + __set_value_in_env echo "Your COMPOSE_FILE now reads ${COMPOSE_FILE}" fi } @@ -2065,7 +2187,7 @@ __i_haz_web3signer() { __i_haz_keys_service() { # This caused issues and is currently not being called - if ! docompose --profile tools config --services | grep -q validator-keys; then + if ! __docompose --profile tools config --services | grep -q validator-keys; then if [[ "${1:-}" = "silent" ]]; then return 1 fi @@ -2083,8 +2205,12 @@ __i_haz_keys_service() { __keys_usage() { echo "Call keymanager with an ACTION, one of:" + echo " create-for-csm" + echo " Create keys for Lido CSM" echo " list" - echo " Lists the public keys of all validators currently loaded into your validator client" + echo " Lists the public keys of all validators currently loaded into your validator client" + echo " count" + echo " Counts the keys currently loaded into your validator client" echo " import" echo " Import all keystore*.json in .eth/validator_keys while loading slashing protection data" echo " in slashing_protection*.json files that match the public key(s) of the imported validator(s)" @@ -2097,7 +2223,7 @@ __keys_usage() { echo echo " get-recipient 0xPUBKEY" echo " List fee recipient set for the validator with public key 0xPUBKEY" - echo " Validators will use FEE_RECIPIENT in ${ENV_FILE} by default, if not set individually" + echo " Validators will use FEE_RECIPIENT in ${__env_file} by default, if not set individually" echo " set-recipient 0xPUBKEY 0xADDRESS" echo " Set individual fee recipient for the validator with public key 0xPUBKEY" echo " delete-recipient 0xPUBKEY" @@ -2160,52 +2286,56 @@ keys() { if [ "${1:-}" = "import" ]; then #__i_haz_keys_service shift - prep-keyimport "$@" - docompose run --rm -e OWNER_UID="${__owner_uid}" validator-keys import "${__args}" + __prep-keyimport "$@" + __docompose run --rm -e OWNER_UID="${__owner_uid}" validator-keys import "${__args}" elif [ "${1:-}" = "create-prysm-wallet" ]; then - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "prysm.yml" ]] && [[ ! "${value}" =~ "prysm-vc-only.yml" ]]; then + if [[ ! "${__value}" =~ "prysm.yml" ]] && [[ ! "${__value}" =~ "prysm-vc-only.yml" ]]; then echo "You do not appear to be using a Prysm validator. Aborting." exit 1 fi - if docompose run --rm create-wallet; then - docompose stop validator - docompose rm --force validator + if __docompose run --rm create-wallet; then + __docompose stop validator + __docompose rm --force validator up fi + elif [ "${1:-}" = "create-for-csm" ]; then + var="NETWORK" + NETWORK=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __query_lido_keys_generation elif [ "${1:-}" = "prepare-address-change" ]; then __i_haz_ethdo echo "Generating offline prep file" set +e - docompose run --rm ethdo validator credentials set --prepare-offline - exitstatus=$? + __docompose run --rm ethdo validator credentials set --prepare-offline + __exitstatus=$? set -e - if [ "${exitstatus}" -ne 0 ]; then + if [ "${__exitstatus}" -ne 0 ]; then echo "Running ethdo failed, unfortunately. Is the CL running and synced?" echo "Please try again after fixing root cause. Aborting." exit 1 fi echo echo "Downloading ethdo" - REPO="wealdtech/ethdo"; \ - wget -q -O- https://api.github.com/repos/${REPO}/releases/latest | grep "browser_download_url.*linux-amd64.tar.gz" \ + __repo="wealdtech/ethdo"; \ + wget -q -O- https://api.github.com/repos/${__repo}/releases/latest | grep "browser_download_url.*linux-amd64.tar.gz" \ | head -1 \ | cut -d : -f 2,3 \ | tr -d \" \ | wget -qi- -O- \ | ${__as_owner} tar zxf - -C ./.eth/ethdo/ \ - || echo "-> Could not download the latest version of '${REPO}' for amd64." + || echo "-> Could not download the latest version of '${__repo}' for amd64." ${__as_owner} mkdir -p ./.eth/ethdo/arm64 - wget -q -O- https://api.github.com/repos/${REPO}/releases/latest | grep "browser_download_url.*linux-arm64.tar.gz" \ + wget -q -O- https://api.github.com/repos/${__repo}/releases/latest | grep "browser_download_url.*linux-arm64.tar.gz" \ | head -1 \ | cut -d : -f 2,3 \ | tr -d \" \ | wget -qi- -O- \ | ${__as_owner} tar zxf - -C ./.eth/ethdo/arm64 \ - || echo "-> Could not download the latest version of '${REPO}' for arm64." + || echo "-> Could not download the latest version of '${__repo}' for arm64." ${__as_owner} mv ./.eth/ethdo/arm64/ethdo ./.eth/ethdo/ethdo-arm64 ${__as_owner} rm -rf ./.eth/ethdo/arm64 echo @@ -2213,7 +2343,7 @@ keys() { echo "Please see https://ethdocker.com/Support/ChangingWithdrawalCredentials for details" elif [ "${1:-}" = "send-address-change" ]; then __i_haz_ethdo - docompose run --rm ethdo validator credentials set + __docompose run --rm ethdo validator credentials set elif [ "${1:-}" = "sign-exit" ] && [ "${2:-}" = "from-keystore" ]; then __i_haz_ethdo @@ -2244,8 +2374,8 @@ keys() { if [ "$__num_files" -gt 1 ]; then while true; do - read -rp "Do all validator keys have the same password? (y/n) " yn - case $yn in + read -rp "Do all validator keys have the same password? (y/n) " __yn + case $__yn in [Yy]* ) __justone=1; break;; [Nn]* ) __justone=0; break;; * ) echo "Please answer yes or no.";; @@ -2271,8 +2401,8 @@ keys() { fi fi - created=0 - failed=0 + __created=0 + __failed=0 for __keyfile in .eth/validator_keys/keystore-*.json; do [ -f "${__keyfile}" ] || continue # Should always evaluate true - just in case if [ "${__justone}" -eq 0 ]; then @@ -2301,40 +2431,40 @@ keys() { set +e # __offline may be empty, don't quote it # shellcheck disable=SC2086 - __json=$(docompose run --rm ethdo validator exit --validator "${__keyfile}" --json --timeout 2m \ + __json=$(__docompose run --rm ethdo validator exit --validator "${__keyfile}" --json --timeout 2m \ --passphrase "${__password}" ${__offline}) - exitstatus=$? - if [ "${exitstatus}" -eq 0 ]; then + __exitstatus=$? + if [ "${__exitstatus}" -eq 0 ]; then echo "${__json}" >".eth/exit_messages/${__pubkey::10}--${__pubkey:90}-exit.json" # shellcheck disable=SC2320 - exitstatus=$? - if [ "${exitstatus}" -eq 0 ]; then + __exitstatus=$? + if [ "${__exitstatus}" -eq 0 ]; then echo "Creating an exit message for validator ${__pubkey} into file \ ./.eth/exit_messages/${__pubkey::10}--${__pubkey:90}-exit.json succeeded" - (( created++ )) + (( __created++ )) else echo "Error writing exit json to file ./.eth/exit_messages/${__pubkey::10}--${__pubkey:90}-exit.json" - (( failed++ )) + (( __failed++ )) fi else echo "Creating an exit message for validator ${__pubkey} from file ${__keyfile} failed" - (( failed++ )) + (( __failed++ )) fi set -e done echo - echo "Created pre-signed exit messages for ${created} validators" - if [ "${created}" -gt 0 ]; then + echo "Created pre-signed exit messages for ${__created} validators" + if [ "${__created}" -gt 0 ]; then echo "You can find them in ./.eth/exit_messages" fi - if [ "${failed}" -gt 0 ]; then - echo "Failed for ${failed} validators" + if [ "${__failed}" -gt 0 ]; then + echo "Failed for ${__failed} validators" fi #elif [ "${1:-}" = "send-exit" ] && ! __i_haz_keys_service silent; then elif [ "${1:-}" = "send-exit" ]; then - var="CL_NODE" - CL_NODE=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - network_name="$(docompose config | awk ' + __var="CL_NODE" + CL_NODE=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + __network_name="$(__docompose config | awk ' BEGIN { found_networks=0; found_default=0; @@ -2352,19 +2482,19 @@ keys() { exit; } ')" - if ! dodocker image ls --format "{{.Repository}}:{{.Tag}}" | grep -q "vc-utils:local"; then + if ! __dodocker image ls --format "{{.Repository}}:{{.Tag}}" | grep -q "vc-utils:local"; then if ! dpkg-query -W -f='${Status}' docker-ce 2>/dev/null | grep -q "ok installed"; then - dodocker build -t vc-utils:local ./vc-utils + __dodocker build -t vc-utils:local ./vc-utils else if ! dpkg-query -W -f='${Status}' docker-buildx-plugin 2>/dev/null | grep -q "ok installed"; then ${__auto_sudo} apt-get update && ${__auto_sudo} apt-get install -y docker-buildx-plugin fi - dodocker buildx build -t vc-utils:local ./vc-utils + __dodocker buildx build -t vc-utils:local ./vc-utils fi fi - dodocker run --rm \ + __dodocker run --rm \ -u 1000:1000 \ - --network "${network_name}" \ + --network "${__network_name}" \ --name send-exit \ -v "$(pwd)/.eth/exit_messages:/exit_messages" \ -v "/etc/localtime:/etc/localtime:ro" \ @@ -2373,10 +2503,10 @@ keys() { vc-utils:local /var/lib/lighthouse/nonesuch.txt eth2 send-exit else #__i_haz_keys_service - docompose run --rm -e OWNER_UID="${__owner_uid}" validator-keys "$@" + __docompose run --rm -e OWNER_UID="${__owner_uid}" validator-keys "$@" fi - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) } @@ -2386,7 +2516,7 @@ upgrade() { start() { - docompose up -d --remove-orphans "$@" + __docompose up -d --remove-orphans "$@" } # Passed by user @@ -2402,7 +2532,7 @@ run() { stop() { - docompose down --remove-orphans "$@" + __docompose down --remove-orphans "$@" } @@ -2418,17 +2548,17 @@ restart() { logs() { - docompose logs "$@" + __docompose logs "$@" } cmd() { - docompose "$@" + __docompose "$@" } terminate() { - if [ -z "$(dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+")" ]; then + if [ -z "$(__dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+")" ]; then echo "There are no data stores - Docker volumes - left to remove for this Ethereum node." stop return 0 @@ -2436,8 +2566,8 @@ terminate() { while true; do read -rp "WARNING - this action will destroy all data stores for this Ethereum node. Do you wish to continue? \ -(No/Yes) " yn - case $yn in +(No/Yes) " __yn + case $__yn in [Yy][Ee][Ss] ) break;; * ) echo "Aborting, no changes made"; exit 130;; esac @@ -2446,16 +2576,16 @@ terminate() { stop # In this case I want the word splitting, so rm can remove all volumes # shellcheck disable=SC2046 - dodocker volume rm $(dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+") + __dodocker volume rm $(__dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+") echo echo "All containers stopped and all volumes deleted" echo } -query_network() { - var="NETWORK" - __prev_network=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) +__query_network() { + __var="NETWORK" + __prev_network=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) NETWORK=$(whiptail --notags --title "Select Network" --menu \ "Which network do you want to run on?" 13 65 6 \ "holesky" "Holešovice Testnet" \ @@ -2496,7 +2626,7 @@ screen.\n\nCustom testnets only work with a URL to fetch their configuration fro } -query_deployment() { +__query_deployment() { if [ "${NETWORK}" = "gnosis" ]; then if uname -m | grep -q riscv64; then echo "Gnosis network has no available client combos on RISC-V. Aborting." @@ -2509,9 +2639,10 @@ query_deployment() { "validator" "Validator client only" 3>&1 1>&2 2>&3) elif uname -m | grep -q aarch64 || uname -m | grep -q arm64; then __deployment=$(whiptail --notags --title "Select deployment type" --menu \ - "What kind of deployment do you want to run?" 10 65 3 \ + "What kind of deployment do you want to run?" 10 65 4 \ "node" "Ethereum node - consensus, execution and validator client" \ "rpc" "Ethereum RPC node - consensus and execution client" \ + "lido_comp" "Lido-compatible node (Community Staking / Simple DVT)" \ "rocket" "Validator client only - integrate with RocketPool" 3>&1 1>&2 2>&3) elif uname -m | grep -q riscv64; then __deployment=$(whiptail --notags --title "Select deployment type" --menu \ @@ -2521,9 +2652,10 @@ query_deployment() { "rocket" "Validator client only - integrate with RocketPool" 3>&1 1>&2 2>&3) elif uname -m | grep -q x86_64; then __deployment=$(whiptail --notags --title "Select deployment type" --menu \ - "What kind of deployment do you want to run?" 11 65 4 \ + "What kind of deployment do you want to run?" 11 65 5 \ "node" "Ethereum node - consensus, execution and validator client" \ "rpc" "Ethereum RPC node - consensus and execution client" \ + "lido_comp" "Lido-compatible node (Community Staking / Simple DVT)" \ "rocket" "Validator client only - integrate with RocketPool" \ "ssv" "SSV node - consensus, execution and ssv-node" 3>&1 1>&2 2>&3) else @@ -2532,11 +2664,20 @@ query_deployment() { uname -m exit 1 fi + + if [ "${__deployment}" = "lido_comp" ]; then + __deployment=$(whiptail --notags --title "Select deployment type for Lido" --menu \ + "What kind of deployment to participate in Lido protocol do you want to run?" 13 90 3 \ + "lido_csm" "[Community Staking] CSM node - Consensus, execution and validator client" \ + "lido_ssv" "[Simple DVT] SSV node - Consensus, execution and ssv-node" \ + "lido_obol" "[Simple DVT] Obol node - Nodes, validator client and charon node (obol middleware)" 3>&1 1>&2 2>&3) + fi + echo "Your deployment choice is: ${__deployment}" } -query_validator_client() { +__query_validator_client() { if [ "${NETWORK}" = "gnosis" ]; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select validator client" --menu \ "Which validator client do you want to run?" 11 65 4 \ @@ -2566,7 +2707,8 @@ query_validator_client() { "lighthouse-vc-only.yml" "Lighthouse validator client" \ "teku-vc-only.yml" "Teku validator client" \ "lodestar-vc-only.yml" "Lodestar validator client" \ - "nimbus-vc-only.yml" "Nimbus validator client" 3>&1 1>&2 2>&3) + "nimbus-vc-only.yml" "Nimbus validator client" \ + "prysm-vc-only.yml" "Prysm validator client" 3>&1 1>&2 2>&3) else CONSENSUS_CLIENT=$(whiptail --notags --title "Select validator client" --menu \ "Which validator client do you want to run?" 12 65 5 \ @@ -2581,12 +2723,12 @@ query_validator_client() { } -query_consensus_client() { +__query_consensus_client() { if [ "${NETWORK}" = "gnosis" ]; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select consensus client" --menu \ "Which consensus client do you want to run?" 11 65 4 \ - "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ "teku.yml" "Teku (Java) - consensus and validator client" \ + "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ "lodestar.yml" "Lodestar (Javascript) - consensus and validator client" \ "nimbus.yml" "Nimbus (Nim) - consensus and validator client" 3>&1 1>&2 2>&3) elif uname -m | grep -q aarch64 || uname -m | grep -q arm64; then @@ -2595,8 +2737,10 @@ query_consensus_client() { "nimbus.yml" "Nimbus (Nim) - consensus and validator client" \ "grandine-allin1.yml" "Grandine (Rust) - consensus with built-in validator client" \ "lodestar.yml" "Lodestar (Javascript) - consensus and validator client" \ + "teku.yml" "Teku (Java) - consensus and validator client" \ "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ - "teku.yml" "Teku (Java) - consensus and validator client" 3>&1 1>&2 2>&3) + "prysm.yml" "Prysm (Go) - consensus and validator client" \ + 3>&1 1>&2 2>&3) elif uname -m | grep -q riscv64; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select consensus client" --menu \ "Which consensus client do you want to run?" 11 65 4 \ @@ -2606,17 +2750,18 @@ query_consensus_client() { "Which consensus client do you want to run?" 13 65 6 \ "teku.yml" "Teku (Java) - consensus and validator client" \ "grandine-allin1.yml" "Grandine (Rust) - consensus with built-in validator client" \ - "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ - "nimbus.yml" "Nimbus (Nim) - consensus and validator client" \ "lodestar.yml" "Lodestar (Javascript) - consensus and validator client" \ - "prysm.yml" "Prysm (Go) - consensus and validator client" 3>&1 1>&2 2>&3) + "nimbus.yml" "Nimbus (Nim) - consensus and validator client" \ + "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ + "prysm.yml" "Prysm (Go) - consensus and validator client" \ + 3>&1 1>&2 2>&3) fi echo "Your consensus client file is:" "${CONSENSUS_CLIENT}" } -query_consensus_only_client() { +__query_consensus_only_client() { if [ "${NETWORK}" = "gnosis" ]; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select consensus client" --menu \ "Which consensus client do you want to run?" 11 65 4 \ @@ -2631,7 +2776,9 @@ query_consensus_only_client() { "grandine-cl-only.yml" "Grandine (Rust) - consensus client" \ "lodestar-cl-only.yml" "Lodestar (Javascript) - consensus client" \ "lighthouse-cl-only.yml" "Lighthouse (Rust) - consensus client" \ - "teku-cl-only.yml" "Teku (Java) - consensus client" 3>&1 1>&2 2>&3) + "teku-cl-only.yml" "Teku (Java) - consensus client" \ + "prysm-cl-only.yml" "Prysm (Go) - consensus client" \ + 3>&1 1>&2 2>&3) elif uname -m | grep -q riscv64; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select consensus client" --menu \ "Which consensus client do you want to run?" 11 65 4 \ @@ -2651,15 +2798,15 @@ query_consensus_only_client() { } -query_custom_execution_client() { +__query_custom_execution_client() { if [ "${__minty_fresh}" -eq 1 ]; then EL_CUSTOM_NODE="" JWT_SECRET="" else - var="EL_NODE" - EL_CUSTOM_NODE=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - var="JWT_SECRET" - JWT_SECRET=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="EL_NODE" + EL_CUSTOM_NODE=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + __var="JWT_SECRET" + JWT_SECRET=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) fi EL_CUSTOM_NODE=$(whiptail --title "Configure custom execution client" --inputbox "What is the URL for your custom \ execution client? (right-click to paste)" 10 65 "${EL_CUSTOM_NODE}" 3>&1 1>&2 2>&3) @@ -2681,12 +2828,13 @@ again or Cancel on the next screen." 10 65 } -query_execution_client() { +__query_execution_client() { if [ "${NETWORK}" = "gnosis" ]; then if uname -m | grep -q aarch64 || uname -m | grep -q arm64; then EXECUTION_CLIENT=$(whiptail --notags --title "Select execution client" --menu \ "Which execution client do you want to run?" 9 65 2 \ "nethermind.yml" "Nethermind (.NET)" \ + "erigon.yml" "Erigon (Go)" \ "NONE" "Custom - Distributed" 3>&1 1>&2 2>&3) else EXECUTION_CLIENT=$(whiptail --notags --title "Select execution client" --menu \ @@ -2700,6 +2848,7 @@ query_execution_client() { "Which execution client do you want to run?" 11 65 4 \ "besu.yml" "Besu (Java)" \ "nethermind.yml" "Nethermind (.NET)" \ + "erigon.yml" "Erigon (Go)" \ "geth.yml" "Geth (Go)" \ "NONE" "Custom - Distributed" 3>&1 1>&2 2>&3) elif uname -m | grep -q riscv64; then @@ -2713,14 +2862,14 @@ query_execution_client() { "reth.yml" "Reth (Rust)" \ "besu.yml" "Besu (Java)" \ "nethermind.yml" "Nethermind (.NET)" \ - "geth.yml" "Geth (Go)" \ "erigon.yml" "Erigon (Go)" \ + "geth.yml" "Geth (Go)" \ "NONE" "Custom - Distributed" 3>&1 1>&2 2>&3) fi if [ "${EXECUTION_CLIENT}" == "NONE" ]; then unset EXECUTION_CLIENT - query_custom_execution_client + __query_custom_execution_client EL_NODE="${EL_CUSTOM_NODE}" else echo "Your execution client file is:" "${EXECUTION_CLIENT}" @@ -2734,7 +2883,7 @@ query_execution_client() { } -query_grafana() { +__query_grafana() { if (whiptail --title "Grafana" --yesno "Do you want to use Grafana dashboards?" 10 65) then if [[ "$OSTYPE" == "darwin"* ]]; then # macOS doesn't do well with / bind mount - leave node-exporter, cadvisor and loki/promtail off by default @@ -2748,7 +2897,7 @@ query_grafana() { } -query_remote_beacon() { +__query_remote_beacon() { if [ "${__minty_fresh}" -eq 1 ]; then if [ "${__deployment}" = "rocket" ]; then REMOTE_BEACON="http://eth2:5052" @@ -2756,8 +2905,8 @@ query_remote_beacon() { REMOTE_BEACON="" fi else - var="CL_NODE" - REMOTE_BEACON=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="CL_NODE" + REMOTE_BEACON=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) fi REMOTE_BEACON=$(whiptail --title "Configure remote consensus client" --inputbox "What is the URL for your remote \ consensus client? (right-click to paste)" 10 60 "${REMOTE_BEACON}" 3>&1 1>&2 2>&3) @@ -2766,12 +2915,12 @@ consensus client? (right-click to paste)" 10 60 "${REMOTE_BEACON}" 3>&1 1>&2 2>& } -query_checkpoint_beacon() { +__query_checkpoint_beacon() { if [ "${__minty_fresh}" -eq 1 ] || [ "${__network_change}" -eq 1 ]; then RAPID_SYNC_URL="" else - var="RAPID_SYNC_URL" - RAPID_SYNC_URL=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="RAPID_SYNC_URL" + RAPID_SYNC_URL=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) fi if [ -z "${RAPID_SYNC_URL}" ]; then case "${NETWORK}" in @@ -2800,9 +2949,9 @@ checkpoint sync provider? (right-click to paste)" 10 65 "${RAPID_SYNC_URL}" 3>&1 } -query_graffiti() { - var="GRAFFITI" - GRAFFITI=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) +__query_graffiti() { + __var="GRAFFITI" + GRAFFITI=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) while true; do GRAFFITI=$(whiptail --title "Configure Graffiti" --inputbox "What Graffiti do you want to send with your blocks? \ @@ -2819,22 +2968,34 @@ query_graffiti() { } -query_rapid_sync() { +__query_rapid_sync() { if [[ "${NETWORK}" =~ ^https?:// ]]; then RAPID_SYNC_URL="" return fi - query_checkpoint_beacon + __query_checkpoint_beacon } -query_coinbase() { - var="FEE_RECIPIENT" - FEE_RECIPIENT=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) +__query_coinbase() { + __var="FEE_RECIPIENT" + FEE_RECIPIENT=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) while true; do set +e # Can't rely on the error handler here because of the special-casing below for update() - if [ "${__during_update}" -eq 1 ] || [ ! "${__deployment}" = rpc ]; then + if [[ "${__deployment}" =~ "lido_" ]]; then + case "${NETWORK}" in + "mainnet") + FEE_RECIPIENT="0x388c818ca8b9251b393131c08a736a67ccb19297" + ;; + "holesky") + FEE_RECIPIENT="0xE73a3602b99f1f913e72F8bdcBC235e206794Ac8" + ;; + *) + FEE_RECIPIENT="0x0000000000000000000000000000000000000000" + ;; + esac + elif [ "${__during_update}" -eq 1 ] || [ ! "${__deployment}" = rpc ]; then FEE_RECIPIENT=$(whiptail --title "Configure rewards address" --inputbox "What is the address you want \ transaction rewards to be sent to by default? (right-click to paste, CANNOT be an ENS)" 10 65 "${FEE_RECIPIENT}" \ 3>&1 1>&2 2>&3) @@ -2844,9 +3005,9 @@ address? Yes even on an RPC node. Can be any address at all. (right-click to pas "${FEE_RECIPIENT}" 3>&1 1>&2 2>&3) fi - exitstatus=$? + __exitstatus=$? set -e - if [ $exitstatus -eq 0 ]; then + if [ $__exitstatus -eq 0 ]; then if [[ ${FEE_RECIPIENT} == 0x* && ${#FEE_RECIPIENT} -eq 42 ]]; then echo "Your rewards address is: ${FEE_RECIPIENT}" break @@ -2860,7 +3021,7 @@ screen.\n\nThe client will not start successfully until a valid ETH rewards addr echo "Please make requested changes manually or run \"$__me update\" again" echo "before running \"$__me up\"." echo - echo "Without a FEE_RECIPIENT set in \"${ENV_FILE}\", containers will not" + echo "Without a FEE_RECIPIENT set in \"${__env_file}\", containers will not" echo "start successfully. Already running containers will keep running with the" echo "old configuration until you are ready to restart them." else @@ -2873,7 +3034,7 @@ screen.\n\nThe client will not start successfully until a valid ETH rewards addr } -query_mev() { +__query_mev() { if [ "${NETWORK}" = "gnosis" ]; then return 0 fi @@ -2883,14 +3044,12 @@ query_mev() { "holesky") MEV_RELAYS="https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net,\ https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz,\ -https://0xb1d229d9c21298a87846c7022ebeef277dfc321fe674fa45312e20b5b6c400bfde9383f801848d7837ed5fc449083a12@relay-holesky.edennetwork.io,\ https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.holesky.blxrbdn.com,\ https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money" ;; "mainnet") MEV_RELAYS="https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,\ https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,\ -https://0xb3ee7afcf27f1f1259ac1787876318c6584ee353097a50ed84f51a1f21a323b3736f271a895c7ce918c038e4265918be@relay.edennetwork.io,\ https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,\ https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,\ https://0x98650451ba02064f7b000f5768cf0cf4d4e492317d82871bdc87ef841a0743f69f0f1eea11168503240ac35d101c9135@mainnet-relay.securerpc.com" @@ -2898,8 +3057,8 @@ https://0x98650451ba02064f7b000f5768cf0cf4d4e492317d82871bdc87ef841a0743f69f0f1e esac return 0 fi - var="MEV_BOOST" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="MEV_BOOST" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 if [[ "${CONSENSUS_CLIENT}" =~ "-vc-only.yml" ]]; then @@ -2910,11 +3069,94 @@ want to use MEV Boost?" 10 65); then fi return 0 fi + if [[ "${__deployment}" =~ "lido_" ]]; then + MEV_BOOST="true" + while true; do + MEV_RELAYS="" + __selected="" + declare -A relays=() + declare -A optional_relays=() + case "${NETWORK}" in + "mainnet") + relays=( + ['Agnostic']="https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net" + ['bloXroute']="https://0xb0b07cd0abef743db4260b0ed50619cf6ad4d82064cb4fbec9d3ec530f7c5e6793d9f286c4e082c0244ffb9f2658fe88@bloxroute.regulated.blxrbdn.com" + ['Aestus']="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live" + ['bloXroute Max-Profit']="https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com" + ['Flashbots']="https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net" + ['Eden Network']="https://0xb3ee7afcf27f1f1259ac1787876318c6584ee353097a50ed84f51a1f21a323b3736f271a895c7ce918c038e4265918be@relay.edennetwork.io" + ['Ultra Sound']="https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money" + ) + optional_relays=( + ['Manifold Finance']="https://0x98650451ba02064f7b000f5768cf0cf4d4e492317d82871bdc87ef841a0743f69f0f1eea11168503240ac35d101c9135@mainnet-relay.securerpc.com/" + ) + __selected=$(whiptail --title "Relays list" --checklist \ + "Choose relays" 15 50 9 \ + "Agnostic" "" ON \ + "bloXroute" "" ON \ + "Aestus" "" ON \ + "bloXroute Max-Profit" "" ON \ + "Flashbots" "" ON \ + "Eden Network" "" ON \ + "Manifold Finance" "(optional)" ON \ + "Ultra Sound" "" ON 3>&1 1>&2 2>&3) + ;; + "holesky") + relays=( + ['Aestus']="https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7c0406754376c2c8285a36c630346aa5c5f833@holesky.aestus.live" + ['Titan']="https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz" + ['Flashbots Boost']="https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net" + ['Ultrasound']="https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money" + ) + __selected=$(whiptail --title "Relays list" --checklist \ + "Choose relays" 12 30 5 \ + "Aestus" "" ON \ + "Titan" "" ON \ + "Flashbots Boost" "" ON \ + "Ultrasound" "" ON 3>&1 1>&2 2>&3) + ;; + *) + echo "No MEV RELAYS configured for ${NETWORK}" + return + ;; + esac + for i in "${!relays[@]}"; do + if [[ ${__selected} =~ ${i} ]]; then + if [ -z "${MEV_RELAYS}" ]; then + MEV_RELAYS="${relays[$i]}" + else + MEV_RELAYS="${MEV_RELAYS},${relays[$i]}" + fi + fi + done + exitstatus=$? + if [ $exitstatus -eq 0 ]; then + if [ -z "${MEV_RELAYS}" ]; then + whiptail --msgbox "At least one mandatory relay should be chosen" 10 75 + continue + fi + else + echo "You chose Cancel." + exit 1 + fi + for i in "${!optional_relays[@]}"; do + if [[ ${__selected} =~ ${i} ]]; then + if [ -z "${MEV_RELAYS}" ]; then + MEV_RELAYS="${optional_relays[$i]}" + else + MEV_RELAYS="${MEV_RELAYS},${optional_relays[$i]}" + fi + fi + done + break + done + return 0 + fi if (whiptail --title "MEV Boost" --yesno "Do you want to use MEV Boost?" 10 65) then MEV_BOOST="true" - if [ "${value}" = "true" ]; then - var="MEV_RELAYS" - MEV_RELAYS=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + if [ "${__value}" = "true" ]; then + __var="MEV_RELAYS" + MEV_RELAYS=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) else case "${NETWORK}" in "sepolia") @@ -2923,14 +3165,13 @@ want to use MEV Boost?" 10 65); then "holesky") MEV_RELAYS="https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net,\ https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz,\ -https://0xb1d229d9c21298a87846c7022ebeef277dfc321fe674fa45312e20b5b6c400bfde9383f801848d7837ed5fc449083a12@relay-holesky.edennetwork.io,\ https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.holesky.blxrbdn.com,\ https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money" ;; "mainnet") MEV_RELAYS=https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,\ https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,\ -https://0xb3ee7afcf27f1f1259ac1787876318c6584ee353097a50ed84f51a1f21a323b3736f271a895c7ce918c038e4265918be@relay.edennetwork.io,\ +https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz,\ https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,\ https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,\ https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,\ @@ -2944,7 +3185,6 @@ https://0x8c7d33605ecef85403f8b7289c8058f440cbb6bf72b055dfe2f3e2c6695b6a1ea5a9cd fi MEV_RELAYS=$(whiptail --title "Configure MEV relays" --inputbox "What MEV relay(s) do you want to use? \ (right-click to paste)" 10 65 "${MEV_RELAYS}" 3>&1 1>&2 2>&3) - echo "Your MEV relay(s): ${MEV_RELAYS}" else MEV_BOOST="false" @@ -2952,11 +3192,154 @@ https://0x8c7d33605ecef85403f8b7289c8058f440cbb6bf72b055dfe2f3e2c6695b6a1ea5a9cd fi } -query_dkg() { +__lido_withdrawal_credentials_address() { + __lido_address="" + case "${NETWORK}" in + "mainnet") + __lido_address="0xB9D7934878B5FB9610B3fE8A5e441e8fad7E293f" + ;; + "holesky") + __lido_address="0xF0179dEC45a37423EAD4FaD5fCb136197872EAd9" + ;; + *) + __lido_address="0x0000000000000000000000000000000000000000" + ;; + esac + echo "${__lido_address}" +} + +__lido_keys_attention_message() { + whiptail --title "Attention" --msgbox "Please, make sure that you set 32 ETH when generated deposit data\nAnd right execution address for your validator keys: $(__lido_withdrawal_credentials_address)\nOtherwise, your keys will not be valid!" 10 80 +} + +__query_lido_keys_generation() { + if [ "${NETWORK}" = "mainnet" ]; then + if (whiptail --title "Security warning" --yesno "Key generation is not recommended on MAINNET for security reasons.\n\nIt is recommended to Select 'No' to skip the step and generate keys in a more secure way later (ex. on an airgapped live USB)\n\nOtherwise, Select 'Yes' to proceed with key generation on this machine" 13 85) then + echo "Proceeding with key generation on MAINNET." + else + __lido_keys_attention_message + return 0 + fi + fi + + __num_validators="1" + __keystore_password="" + __keystore_password_confirm="" + __num_validators=$(whiptail --title "Validators count" --inputbox "Enter the number of validators" 8 60 "${__num_validators}" 3>&1 1>&2 2>&3) + while true; do + __keystore_password=$(whiptail --title "Keystore password" --passwordbox "Enter validators keystore password (at least 8 chars)" 8 60 "${__keystore_password}" 3>&1 1>&2 2>&3) + + exitstatus=$? + if [ $exitstatus -eq 0 ]; then + if [[ ${#__keystore_password} -ge 8 ]]; then + __keystore_password_confirm=$(whiptail --title "Keystore password" --passwordbox "Confirm validators keystore password" 8 60 "${__keystore_password_confirm}" 3>&1 1>&2 2>&3) + if [ "${__keystore_password}" = "${__keystore_password_confirm}" ]; then + echo "Keystore password set." + break + else + whiptail --msgbox "Passwords do not match. Please try again." 10 60 + fi + else + whiptail --msgbox "The keystore password secret needs to be at least 8 characters long. You can try \ +again or Cancel on the next screen." 10 75 + fi + else + echo "You chose Cancel." + exit 1 + fi + done + + exitstatus=$? + if [ $exitstatus -eq 0 ]; then + echo "Your number of validators is:" "${__num_validators}" + __mnemonic="existing" + if (whiptail --title "Mnemonic" --yesno "Do you want to generate new mnemonic?" 8 60) then + __mnemonic="new" + fi + export NETWORK=${NETWORK} && __docompose --profile tools run --rm deposit-cli-${__mnemonic} \ + --uid "$(id -u)" \ + --execution_address "$(__lido_withdrawal_credentials_address)" \ + --num_validators "${__num_validators}" \ + --keystore_password "${__keystore_password}" \ + --non_interactive + else + echo "You chose Cancel." + exit 1 + fi +} + + +__query_lido_obol_enr() { + ${__as_owner} mkdir -p ./.eth/charon + __outcome__=$(__docompose -f ./lido-obol.yml run -u "$(id -u)":"$(id -g)" --rm charon-create-enr) + if [[ "${__outcome__}" =~ "Created ENR private key:" ]]; then + __lido_obol_operator_enr=$(echo "${__outcome__}" | grep -e 'enr:') + else + echo "Something went wrong. Please, try again." + exit 1 + fi + + echo "Your created ENR is:" "${__lido_obol_operator_enr}" + echo "${__lido_obol_operator_enr}" >> "./.eth/charon-enr-public-key" + whiptail --title "Lido Obol operator ENR creation outcome" --msgbox "Your ENR is created!\n\n1. Backup your private key (path: .eth/charon-enr-private-key)!\n2. Copy your public ENR for the futher steps\n\nYour public ENR is:\n\n${__lido_obol_operator_enr}" 16 80 +} + +__query_lido_obol_cluster_definition() { + __cluster_definition_url=$(whiptail --title "Lido Obol cluster creation" --inputbox "\nPut your cluster definition link below:" 10 80 "https://api.obol.tech/dv/example_link_to_your_definition" 3>&1 1>&2 2>&3) + if [ "${__cluster_definition_url}" = "" ]; then + echo "Cluster definition URL can't be empty" + exit 1 + fi + exitstatus=$? + if [ $exitstatus -eq 0 ]; then + ${__as_owner} curl -o ./.eth/cluster_definition.tmp -s "${__cluster_definition_url}" -H "Accept: application/json" +# shellcheck disable=SC2086 + __cluster_definition_is_valid=$(__docompose -f ./lido-obol.yml run --rm -v "$(pwd)"/.eth/cluster_definition.tmp:/cluster_definition.json:ro curl-jq sh -c \ + "cat /cluster_definition.json | jq -r 'all(.validators[]; .fee_recipient_address == \"'${FEE_RECIPIENT}'\" and .withdrawal_address == \"'$(__lido_withdrawal_credentials_address)'\")'" | tail -n 1) + set -e + if [ "${__cluster_definition_is_valid}" = "true" ]; then + echo "Your cluster definition url is:" "${__cluster_definition_url}" + ${__as_owner} mv ./.eth/cluster_definition.tmp ./.eth/cluster-definition.json + else + whiptail --title "Lido Obol cluster creation" --msgbox "Your cluster definition is not valid.\n\nCheck that every validator has \`fee_recipient_address\` and \`withdrawal_address\` equal to Lido contracts and try again.\n\nLido fee recipient: '${FEE_RECIPIENT}'\nLido withdrawal credentials: '$(__lido_withdrawal_credentials_address)'" 14 90 + echo "Your cluster definition is NOT valid." + ${__as_owner} rm ./.eth/cluster_definition.tmp + exit 1 + fi + else + echo "You chose Cancel." + exit 1 + fi +} + +__query_lido_obol_cluster_dkg() { + if [ -d ./.eth/validator_keys ]; then + __folder_postfix=${EPOCHSECONDS} + ${__as_owner} mkdir ./.eth_backup_"$__folder_postfix" + ${__as_owner} cp -vr ./.eth/validator_keys ./.eth_backup_"$__folder_postfix"/validator_keys + ${__as_owner} rm -rf ./.eth/validator_keys + fi + if (whiptail --title "DKG ceremony" --yesno "Do you want to start DKG ceremony?\n\nMake sure all participants are ready!" 10 60) then + __outcome__=$(__docompose -f ./lido-obol.yml run -u "$(id -u)":"$(id -g)" --rm charon-run-dkg) + exitstatus=$? + if [ $exitstatus -ne 0 ]; then + echo "Something went wrong. Please, try again." + exit 1 + fi + echo "DKG ceremony finished successfully" + whiptail --title "Finish" --msgbox "\nThe DKG is finished!" 10 40 + else + whiptail --title "DKG ceremony" --msgbox "You should start DKG ceremony before proceeding further" 8 60 + echo "DKG ceremony starting is canceled" + exit 1 + fi +} + +__query_dkg() { __ssv_operator_id=-1 if (whiptail --title "DKG ceremony" --yesno "Do you want to participate in DKG ceremonies as an operator?" 10 60); then __key_file_content=$(${__auto_sudo} cat ./ssv-config/encrypted_private_key.json) - __public_key=$(docompose -f ./ssv-dkg.yml run --rm curl-jq sh -c \ + __public_key=$(__docompose -f ./ssv-dkg.yml run --rm curl-jq sh -c \ "echo '${__key_file_content}' | jq -r '.pubKey'" | tail -n 1) echo "Your SSV node public key is: ${__public_key}" __ssv_operator_id=$(whiptail --title "Register SSV operator" --inputbox "\n1. Your SSV node public key:\n\n${__public_key}\n\n2. Register your operator in the SSV network with the public key\n\n3. Input your Operator ID \ @@ -2972,43 +3355,43 @@ query_dkg() { rm -f ssv-config/dkg-config.yaml.original } -set_value_in_env() { -# Assumes that "var" has been set to the name of the variable to be changed - if [ "${!var+x}" ]; then - if ! grep -qF "${var}" "${ENV_FILE}" 2>/dev/null ; then - echo "${var}=${!var}" >> "${ENV_FILE}" +__set_value_in_env() { +# Assumes that "__var" has been set to the name of the variable to be changed + if [ "${!__var+x}" ]; then + if ! grep -qF "${__var}" "${__env_file}" 2>/dev/null ; then + echo "${__var}=${!__var}" >> "${__env_file}" else # Handle & in GRAFFITI gracefully - sed -i'.original' -e "s~^\(${var}\s*=\s*\).*\$~\1${!var//&/\\&}~" "${ENV_FILE}" + sed -i'.original' -e "s~^\(${__var}\s*=\s*\).*\$~\1${!__var//&/\\&}~" "${__env_file}" fi fi } -handle_error() { +__handle_error() { if [[ ! $- =~ e ]]; then # set +e, do nothing return 0 fi - local exit_code=$1 + local __exit_code=$1 echo - if [ "$exit_code" -eq 130 ]; then + if [ "$__exit_code" -eq 130 ]; then echo "$__me terminated by user" - elif [ "$__during_config" -eq 1 ] && [ "$exit_code" -eq 1 ]; then + elif [ "$__during_config" -eq 1 ] && [ "$__exit_code" -eq 1 ]; then echo "Canceled config wizard." else - echo "$__me terminated with exit code $exit_code on line $2" + echo "$__me terminated with exit code $__exit_code on line $2" if [ -n "${__command}" ]; then echo "This happened during $__me ${__command} ${__params}" fi fi if [ "$__during_update" -eq 1 ] && [ "$__during_migrate" -eq 1 ]; then - cp "${ENV_FILE}" "${ENV_FILE}".partial - cp "${ENV_FILE}".source "${ENV_FILE}" + cp "${__env_file}" "${__env_file}".partial + cp "${__env_file}".source "${__env_file}" echo - echo "Restored your ${ENV_FILE} file, to undo partial migration. Please verify it looks correct." - echo "The partially migrated file is in ${ENV_FILE}.partial for troubleshooting." + echo "Restored your ${__env_file} file, to undo partial migration. Please verify it looks correct." + echo "The partially migrated file is in ${__env_file}.partial for troubleshooting." fi if [ "$__during_postgres" -eq 1 ]; then echo @@ -3019,7 +3402,7 @@ handle_error() { echo "Starting the node again could get you slashed." echo echo "Marking Web3signer as unsafe to start." - dodocker run --rm -v "$(dodocker volume ls -q -f "name=web3signer-keys")":/var/lib/web3signer \ + __dodocker run --rm -v "$(__dodocker volume ls -q -f "name=web3signer-keys")":/var/lib/web3signer \ alpine:3 touch /var/lib/web3signer/.migration_fatal_error elif [ "$__migrated" -eq 1 ]; then echo "Web3signer slashing protection database migration failed, after switching to the migrated data." @@ -3036,18 +3419,18 @@ handle_error() { } -check_legacy() { - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) +__check_legacy() { + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ "${value}" =~ "-allin1.yml" && ! "${value}" =~ "grandine-allin1.yml" ]]; then # Warn re Grandine once VC - if [[ "${value}" =~ "teku-allin1.yml" ]]; then + if [[ "${__value}" =~ "-allin1.yml" && ! "${__value}" =~ "grandine-allin1.yml" ]]; then # Warn re Grandine once VC + if [[ "${__value}" =~ "teku-allin1.yml" ]]; then __client="Teku" - elif [[ "${value}" =~ "nimbus-allin1.yml" ]]; then + elif [[ "${__value}" =~ "nimbus-allin1.yml" ]]; then __client="Nimbus" - elif [[ "${value}" =~ "grandine-allin1.yml" ]]; then + elif [[ "${__value}" =~ "grandine-allin1.yml" ]]; then __client="Grandine" else __client="Mystery" @@ -3063,8 +3446,8 @@ config() { # Do not track changes to ext-network.yml ${__as_owner} git update-index --assume-unchanged ext-network.yml # Create ENV file if needed - if ! [[ -f "${ENV_FILE}" ]]; then - ${__as_owner} cp default.env "${ENV_FILE}" + if ! [[ -f "${__env_file}" ]]; then + ${__as_owner} cp default.env "${__env_file}" __minty_fresh=1 else __minty_fresh=0 @@ -3072,17 +3455,20 @@ config() { __during_config=1 - check_legacy - query_network - query_deployment + __check_legacy + __query_network + __query_deployment case "${__deployment}" in - "node") - query_consensus_client + "node" | "lido_csm") + __query_consensus_client + ;; + "lido_obol") + __query_consensus_client ;; "validator" | "rocket") - query_validator_client + __query_validator_client ;; - "ssv") + "ssv" | "lido_ssv") if [ "${NETWORK}" = "holesky" ]; then sed -i'.original' 's/ Network: .*/ Network: holesky/' ssv-config/config.yaml elif [ "${NETWORK}" = "mainnet" ]; then @@ -3102,17 +3488,17 @@ config() { fi if [ ! -f "./ssv-config/encrypted_private_key.json" ]; then echo "Creating encrypted operator private key" - dodocker run --name ssv-node-key-generation -v "$(pwd)/ssv-config/password.pass":/password.pass \ + __dodocker run --name ssv-node-key-generation -v "$(pwd)/ssv-config/password.pass":/password.pass \ -it bloxstaking/ssv-node:latest /go/bin/ssvnode generate-operator-keys \ - --password-file=/password.pass && dodocker cp ssv-node-key-generation:/encrypted_private_key.json \ - ./ssv-config/encrypted_private_key.json && dodocker rm ssv-node-key-generation + --password-file=/password.pass && __dodocker cp ssv-node-key-generation:/encrypted_private_key.json \ + ./ssv-config/encrypted_private_key.json && __dodocker rm ssv-node-key-generation ${__auto_sudo} chown 12000:12000 ./ssv-config/encrypted_private_key.json fi - query_dkg - query_consensus_only_client + __query_dkg + __query_consensus_only_client ;; "rpc") - query_consensus_only_client + __query_consensus_only_client ;; *) echo "Unknown deployment ${__deployment}, this is a bug." @@ -3126,34 +3512,150 @@ config() { if [[ ! "${CONSENSUS_CLIENT}" =~ "-vc-only.yml" ]]; then CL_NODE="http://consensus:5052" - query_execution_client - query_rapid_sync - query_mev - query_grafana - query_coinbase - if [ "${__deployment}" = "node" ]; then - query_graffiti + __query_execution_client + __query_rapid_sync + __query_mev + __query_grafana + __query_coinbase + if [[ "${__deployment}" = "node" || "${__deployment}" = "lido_csm" ]]; then + __query_graffiti + fi + if [ "${__deployment}" = "lido_csm" ]; then + if (whiptail --title "Keys generation" --yesno "Do you want to generate validator keys?" 10 60) then + __query_lido_keys_generation + else + __lido_keys_attention_message + fi + if [ "${NETWORK}" = "holesky" ]; then + __link="https://csm.testnet.fi" + else + __link="https://csm.lido.fi" + fi + whiptail --title "Finish" --msgbox "Final steps!\n\n1. Run your node './ethd start'\n\n2. Wait until your node is fully synchronized\n\n4. Open ${__link} to submit your keys with '.eth/validator_keys/deposit-data-*.json' file content\n\n5. Wait for keys validation\n\n6. Import your keys by './ethd keys import'" 19 85 fi else unset EXECUTION_CLIENT unset GRAFANA_CLIENT - query_remote_beacon + __query_remote_beacon # This gets used, but shellcheck doesn't recognize that # shellcheck disable=SC2034 CL_NODE="${REMOTE_BEACON}" - query_mev - query_coinbase - query_graffiti + __query_mev + __query_coinbase + __query_graffiti fi __during_config=0 + if [ "${__deployment}" = "lido_obol" ]; then + CL_NODE="http://charon:3600" + case "${NETWORK}" in + "mainnet") +# We are using the variable +# shellcheck disable=SC2034 + VE_LOCATOR_ADDRESS="0xC1d0b3DE6792Bf6b4b37EccdcC24e45978Cfd2Eb" +# We are using the variable +# shellcheck disable=SC2034 + VE_ORACLE_ADDRESSES_ALLOWLIST='["0x140Bd8FbDc884f48dA7cb1c09bE8A2fAdfea776E","0xA7410857ABbf75043d61ea54e07D57A6EB6EF186","0x404335BcE530400a5814375E7Ec1FB55fAff3eA2","0x946D3b081ed19173dC83Cd974fC69e1e760B7d78","0x007DE4a5F7bc37E2F26c0cb2E8A95006EE9B89b5","0xEC4BfbAF681eb505B94E4a7849877DC6c600Ca3A","0x61c91ECd902EB56e314bB2D5c5C07785444Ea1c8","0x1Ca0fEC59b86F549e1F1184d97cb47794C8Af58d","0xc79F702202E3A6B0B6310B537E786B9ACAA19BAf"]' +# We are using the variable +# shellcheck disable=SC2034 + VE_STAKING_MODULE_ID="2" +# We are using the variable +# shellcheck disable=SC2034 + LIDO_DV_EXIT_EXIT_EPOCH="194048" # capella + ;; + "holesky") +# We are using the variable +# shellcheck disable=SC2034 + VE_LOCATOR_ADDRESS="0x28FAB2059C713A7F9D8c86Db49f9bb0e96Af1ef8" +# We are using the variable +# shellcheck disable=SC2034 + VE_ORACLE_ADDRESSES_ALLOWLIST='["0x12A1D74F8697b9f4F1eEBb0a9d0FB6a751366399","0xD892c09b556b547c80B7d8c8cB8d75bf541B2284","0xf7aE520e99ed3C41180B5E12681d31Aa7302E4e5","0x31fa51343297FFce0CC1E67a50B2D3428057D1b1","0x81E411f1BFDa43493D7994F82fb61A415F6b8Fd4","0x4c75FA734a39f3a21C57e583c1c29942F021C6B7","0xD3b1e36A372Ca250eefF61f90E833Ca070559970","0xF0F23944EfC5A63c53632C571E7377b85d5E6B6f","0xb29dD2f6672C0DFF2d2f173087739A42877A5172","0x3799bDA7B884D33F79CEC926af21160dc47fbe05"]' +# We are using the variable +# shellcheck disable=SC2034 + VE_STAKING_MODULE_ID="2" +# We are using the variable +# shellcheck disable=SC2034 + LIDO_DV_EXIT_EXIT_EPOCH="256" # capella + ;; + *) + ;; + esac + + if [ -f "./.eth/cluster-lock.json" ]; then + if (whiptail --title "Lido Obol cluster exists" --yesno "Your cluster has already been created. Continue with it?" 10 60); then +# shellcheck disable=SC2086 + __cluster_lock_is_valid=$(__docompose -f ./lido-obol.yml run --rm -v "$(pwd)"/.eth/cluster-lock.json:/cluster-lock.json:ro curl-jq sh -c \ + "cat /cluster-lock.json | jq -r 'all(.cluster_definition.validators[]; .fee_recipient_address == \"'${FEE_RECIPIENT}'\" and .withdrawal_address == \"'$(__lido_withdrawal_credentials_address)'\")'" | tail -n 1) + if [[ "${__cluster_lock_is_valid}" =~ "true" ]]; then + echo "Your cluster lock is valid." + else + whiptail --title "Lido Obol cluster definition" --msgbox "Your cluster lock file './.eth/cluster-lock.json' is not valid.\n\nCheck that every validator has \`fee_recipient_address\` and \`withdrawal_address\` equal to Lido contracts and try again.\n\nLido fee recipient: '${FEE_RECIPIENT}'\nLido withdrawal credentials: '$(__lido_withdrawal_credentials_address)'" 14 90 + echo "Your cluster lock is NOT valid." + exit 1 + fi + elif (whiptail --title "Lido Obol cluster creation" --yesno "Backup a previously created cluster to create a new one?" 10 80); then + ${__as_owner} cp -vr ./.eth ./.eth_backup_"$EPOCHSECONDS" + ${__as_owner} rm -rf ./.eth + __query_lido_obol_enr + __query_lido_obol_cluster_definition + __query_lido_obol_cluster_dkg + else + whiptail --title "Lido Obol cluster creation" --msgbox "The \`.eth\` folder must be empty or non-existent to continue" 10 80 + echo "The \`.eth\` folder must be empty to create a new cluster" + exit 1 + fi + else + if [ -f "./.eth/charon-enr-private-key" ] && [ -f "./.eth/charon-enr-public-key" ]; then + if (whiptail --title "Lido Obol operator ENR creation" --yesno "You already have ENR. Use it?" 8 50); then + echo "Use existing ENR" + else + ${__as_owner} cp -vr ./.eth ./.eth_backup_"$EPOCHSECONDS" + ${__as_owner} rm -rf ./.eth + __query_lido_obol_enr + fi + else + __query_lido_obol_enr + fi + + if [ -f "./.eth/cluster-definition.json" ]; then + if (whiptail --title "Lido Obol cluster creation in process" --yesno "You already have cluster definition. Use it?" 10 60); then +# shellcheck disable=SC2086 + __cluster_definition_is_valid=$(__docompose -f ./lido-obol.yml run --rm -v "$(pwd)"/.eth/cluster-definition.json:/cluster-definition.json:ro curl-jq sh -c \ + "cat /cluster-definition.json | jq -r 'all(.validators[]; .fee_recipient_address == \"'${FEE_RECIPIENT}'\" and .withdrawal_address == \"'$(__lido_withdrawal_credentials_address)'\")'" | tail -n 1) + if [ "${__cluster_definition_is_valid}" = "true" ]; then + echo "Your cluster definition is valid." + else + whiptail --title "Lido Obol cluster creation" --msgbox "Your cluster definition is not valid.\n\nCheck that every validator has \`fee_recipient_address\` and \`withdrawal_address\` equal to Lido contracts and try again.\n\nLido fee recipient: '${FEE_RECIPIENT}'\nLido withdrawal credentials: '$(__lido_withdrawal_credentials_address)'" 14 90 + echo "Your cluster definition is NOT valid." + exit 1 + fi + else + __query_lido_obol_cluster_definition + fi + else + __query_lido_obol_cluster_definition + fi + __query_lido_obol_cluster_dkg + fi + +# We are using the variable +# shellcheck disable=SC2034 + VE_OPERATOR_ID=$(whiptail --title "Lido Operator ID" --inputbox "Put your Operator ID from Lido Operators dashboard \ +(right-click to paste)" 10 60 3>&1 1>&2 2>&3) + __obol_prom_remote_token=$(whiptail --title "Obol prometheus" --inputbox "Put Obol Prometheus remote write token \ +(right-click to paste)" 10 60 3>&1 1>&2 2>&3) + cat ./prometheus/obol-prom.yml > ./prometheus/custom-prom.yml + sed -i'.original' "s| credentials: OBOL_PROM_REMOTE_WRITE_TOKEN| credentials: ${__obol_prom_remote_token}|" ./prometheus/custom-prom.yml + rm -f ./prometheus/custom-prom.yml.original + fi + COMPOSE_FILE="${CONSENSUS_CLIENT}" if [ -n "${EXECUTION_CLIENT+x}" ]; then COMPOSE_FILE="${COMPOSE_FILE}:${EXECUTION_CLIENT}" fi - if [ "${__deployment}" = "ssv" ]; then + if [[ "${__deployment}" = "ssv" || "${__deployment}" = "lido_ssv" ]]; then COMPOSE_FILE="${COMPOSE_FILE}:ssv.yml" if [[ -n "${__ssv_operator_id}" && ! "${__ssv_operator_id}" = "-1" ]]; then COMPOSE_FILE="${COMPOSE_FILE}:ssv-dkg.yml" @@ -3165,10 +3667,16 @@ config() { if [ "${MEV_BOOST}" = "true" ] && [ ! "${__deployment}" = "rocket" ]; then COMPOSE_FILE="${COMPOSE_FILE}:mev-boost.yml" fi + if [ "${__deployment}" = "lido_obol" ]; then + COMPOSE_FILE="${COMPOSE_FILE}:lido-obol.yml" + fi if { [ "${__deployment}" = "node" ] || [ "${__deployment}" = "rocket" ]; } \ && [ "${NETWORK}" = "holesky" ]; then COMPOSE_FILE="${COMPOSE_FILE}:deposit-cli.yml" fi + if [ "${__deployment}" = "lido_csm" ]; then + COMPOSE_FILE="${COMPOSE_FILE}:deposit-cli.yml" + fi # Not multi-arch, this would break on ARM64 # COMPOSE_FILE="${COMPOSE_FILE}:ethdo.yml" if [ "${__deployment}" = "rocket" ]; then @@ -3178,59 +3686,78 @@ config() { echo "Your COMPOSE_FILE is:" "${COMPOSE_FILE}" - var=FEE_RECIPIENT - set_value_in_env - var=GRAFFITI - set_value_in_env - var=CL_NODE - set_value_in_env - var=RAPID_SYNC_URL - set_value_in_env - var=COMPOSE_FILE - set_value_in_env - var=EL_NODE - set_value_in_env - var=JWT_SECRET - set_value_in_env - var=NETWORK - set_value_in_env - var=MEV_BOOST - set_value_in_env - var=MEV_RELAYS - set_value_in_env + __var=FEE_RECIPIENT + __set_value_in_env + __var=GRAFFITI + __set_value_in_env + __var=CL_NODE + __set_value_in_env + __var=RAPID_SYNC_URL + __set_value_in_env + __var=COMPOSE_FILE + __set_value_in_env + __var=EL_NODE + __set_value_in_env + __var=JWT_SECRET + __set_value_in_env + __var=NETWORK + __set_value_in_env + __var=MEV_BOOST + __set_value_in_env + __var=MEV_RELAYS + __set_value_in_env + if [ "${__deployment}" = "lido_obol" ]; then + var=LIDO_DV_EXIT_EXIT_EPOCH + __set_value_in_env + var=VE_OPERATOR_ID + __set_value_in_env + var=VE_LOCATOR_ADDRESS + __set_value_in_env + var=VE_ORACLE_ADDRESSES_ALLOWLIST + __set_value_in_env + var=VE_STAKING_MODULE_ID + __set_value_in_env +# We are using the variable +# shellcheck disable=SC2034 + ENABLE_DIST_ATTESTATION_AGGR="true" + var=ENABLE_DIST_ATTESTATION_AGGR + __set_value_in_env + fi if [[ "${NETWORK}" = "gnosis" ]] && [[ "${CONSENSUS_CLIENT}" =~ "nimbus" ]] ; then # We are using the variable # shellcheck disable=SC2034 NIM_DOCKERFILE=Dockerfile.sourcegnosis - var=NIM_DOCKERFILE - set_value_in_env + __var=NIM_DOCKERFILE + __set_value_in_env fi if uname -m | grep -q riscv64; then # We are using the variable # shellcheck disable=SC2034 NIM_DOCKERFILE=Dockerfile.source - var=NIM_DOCKERFILE - set_value_in_env + __var=NIM_DOCKERFILE + __set_value_in_env # We are using the variable # shellcheck disable=SC2034 GETH_DOCKERFILE=Dockerfile.source - var=GETH_DOCKERFILE - set_value_in_env + __var=GETH_DOCKERFILE + __set_value_in_env fi - var="SIREN_PASSWORD" - SIREN_PASSWORD=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="SIREN_PASSWORD" + SIREN_PASSWORD=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ -z "${SIREN_PASSWORD}" ]; then SIREN_PASSWORD=$(head -c 8 /dev/urandom | od -A n -t u8 | tr -d '[:space:]' | sha256sum | head -c 32) - set_value_in_env + __set_value_in_env fi + __enable_v6 + ${__as_owner} rm .env.original - pull_and_build - nag_os_version + __pull_and_build + __nag_os_version echo - echo "Your configuration file is: $(dirname "$(realpath "${BASH_SOURCE[0]}")")/${ENV_FILE}" + echo "Your configuration file is: $(dirname "$(realpath "${BASH_SOURCE[0]}")")/${__env_file}" echo "You can change advanced config items with \"nano .env\" when in the $(dirname "$(realpath "${BASH_SOURCE[0]}")") directory." echo } @@ -3239,108 +3766,112 @@ config() { version() { grep "^This is" README.md echo - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Client versions - case "${value}" in + case "${__value}" in + *lido-obol.yml* ) + __docompose exec charon charon version + echo + ;;& *ssv.yml* ) - docompose exec ssv-node /go/bin/ssvnode --version + __docompose exec ssv-node /go/bin/ssvnode --version echo ;;& *lighthouse.yml* | *lighthouse-cl-only* ) - docompose exec consensus lighthouse --version + __docompose exec consensus lighthouse --version echo ;;& *lighthouse-vc-only* ) - docompose exec validator lighthouse --version + __docompose exec validator lighthouse --version echo ;;& *lodestar.yml* | *lodestar-cl-only* ) - docompose exec consensus node /usr/app/node_modules/.bin/lodestar --version + __docompose exec consensus node /usr/app/node_modules/.bin/lodestar --version echo ;;& *lodestar-vc-only* ) - docompose exec validator node /usr/app/node_modules/.bin/lodestar --version + __docompose exec validator node /usr/app/node_modules/.bin/lodestar --version echo ;;& *prysm.yml* ) - docompose exec consensus beacon-chain --version + __docompose exec consensus beacon-chain --version echo - docompose exec validator validator --version + __docompose exec validator validator --version echo ;;& *prysm-cl-only* ) - docompose exec consensus beacon-chain --version + __docompose exec consensus beacon-chain --version echo ;;& *prysm-vc-only* ) - docompose exec validator validator --version + __docompose exec validator validator --version echo ;;& *nimbus.yml* | *nimbus-allin1.yml* | *nimbus-cl-only* ) - docompose exec consensus nimbus_beacon_node --version + __docompose exec consensus nimbus_beacon_node --version echo ;;& *nimbus-vc-only* ) - docompose exec validator nimbus_validator_client --version + __docompose exec validator nimbus_validator_client --version echo ;;& *teku.yml* | *teku-allin1.yml* | *teku-cl-only* ) - docompose exec consensus /opt/teku/bin/teku --version + __docompose exec consensus /opt/teku/bin/teku --version echo ;;& *teku-vc-only* ) - docompose exec validator /opt/teku/bin/teku --version + __docompose exec validator /opt/teku/bin/teku --version echo ;;& *grandine.yml* | *grandine-allin1.yml* | *grandine-cl-only* ) - docompose exec consensus grandine --version + __docompose exec consensus grandine --version echo ;;& *grandine-vc-only* ) - docompose exec validator grandine --version + __docompose exec validator grandine --version echo ;;& *geth.yml* ) - docompose exec execution geth version + __docompose exec execution geth version echo ;;& *reth.yml* ) - docompose exec execution reth --version + __docompose exec execution reth --version echo ;;& *besu.yml* ) - docompose exec execution /opt/besu/bin/besu --version + __docompose exec execution /opt/besu/bin/besu --version echo ;;& *nethermind.yml* ) - docompose exec execution /nethermind/nethermind --version + __docompose exec execution /nethermind/nethermind --version echo ;;& *erigon.yml* ) - docompose exec execution erigon --version + __docompose exec execution erigon --version echo ;;& *web3signer.yml* ) - docompose exec web3signer /opt/web3signer/bin/web3signer --version + __docompose exec web3signer /opt/web3signer/bin/web3signer --version echo - docompose exec postgres pg_config --version + __docompose exec postgres pg_config --version echo ;;& *mev-boost.yml* ) - docompose exec mev-boost /app/mev-boost -version + __docompose exec mev-boost /app/mev-boost -version echo ;;& *grafana.yml* ) - docompose exec prometheus /bin/prometheus --version + __docompose exec prometheus /bin/prometheus --version echo echo -n "Grafana " - docompose exec grafana /run.sh -v + __docompose exec grafana /run.sh -v echo ;;& *traefik-*.yml* ) echo "Traefik" - docompose exec traefik traefik version + __docompose exec traefik traefik version echo ;;& esac @@ -3372,11 +3903,11 @@ __full_help() { echo " config" echo " configures ${__project_name} with your choice of Ethereum clients" echo " keys ACTION [--non-interactive]" - echo " list, delete, import keys; their fee recipients; and gas fees" + echo " list, count, delete, import keys; their fee recipients; and gas fees" echo " Run without ACTION to get help text" echo " update [--refresh-targets] [--non-interactive]" echo " updates all client versions and ${__project_name} itself" - echo " --refresh-targets will reset your custom build targets in ${ENV_FILE} to defaults" + echo " --refresh-targets will reset your custom build targets in ${__env_file} to defaults" echo " up|start [service-name]" echo " starts the Ethereum node, or restarts containers that had their image or" echo " configuration changed. Can also start a specific service by name" @@ -3433,7 +3964,7 @@ help() { } # Main body from here -ENV_FILE=.env +__env_file=.env __during_config=0 __during_update=0 __during_postgres=0 @@ -3445,7 +3976,7 @@ if [ ! -f ~/.profile ] || ! grep -q "alias ethd" ~/.profile; then __me="./$__me" fi -trap 'handle_error $? $LINENO' ERR +trap '__handle_error $? $LINENO' ERR if [[ "$#" -eq 0 || "$*" =~ "-h" ]]; then # Lazy match for -h and --help but also --histogram, so careful here help "$@" @@ -3467,11 +3998,11 @@ __command="$1" shift __params=$* -handle_root -determine_distro -prep_conffiles +__handle_root +__determine_distro +__prep_conffiles -check_for_snap +__check_for_snap # Don't check for Docker before it's installed if [ "$__command" = "install" ]; then @@ -3479,8 +4010,8 @@ if [ "$__command" = "install" ]; then exit "$?" fi -handle_docker_sudo -check_compose_version +__handle_docker_sudo +__check_compose_version if [ "${__old_compose}" -eq 1 ]; then echo @@ -3508,12 +4039,12 @@ if ! type -P whiptail >/dev/null 2>&1; then exit 0 fi -if ! dodocker images >/dev/null 2>&1; then +if ! __dodocker images >/dev/null 2>&1; then echo "Please ensure you can call $__docker_exe before running ${__project_name}." exit 0 fi -if ! docompose --help >/dev/null 2>&1; then +if ! __docompose --help >/dev/null 2>&1; then echo "Please ensure you can call $__compose_exe before running ${__project_name}." exit 0 fi @@ -3528,7 +4059,7 @@ case "$__command" in ;; esac -check_disk_space +__check_disk_space if [ "${__compose_upgraded}" -eq 1 ]; then echo diff --git a/flashbots/Dockerfile.source b/flashbots/Dockerfile.source index 5bce47be..653e596c 100644 --- a/flashbots/Dockerfile.source +++ b/flashbots/Dockerfile.source @@ -1,5 +1,5 @@ # Build in a stock Go build container -FROM golang:1.22-alpine AS builder +FROM golang:1.23-alpine AS builder # Unused, this is here to avoid build time complaints ARG DOCKER_TAG diff --git a/geth.yml b/geth.yml index 638f8b1c..b0c7ad5d 100644 --- a/geth.yml +++ b/geth.yml @@ -31,9 +31,10 @@ services: - ANCIENT_DIR=${ANCIENT_DIR:-} volumes: - geth-eth1-data:/var/lib/goethereum + - geth-el-data:/var/lib/geth - ${ANCIENT_DIR:-.nada}:/var/lib/ancient - /etc/localtime:/etc/localtime:ro - - jwtsecret:/var/lib/goethereum/ee-secret + - jwtsecret:/var/lib/geth/ee-secret ports: - ${HOST_IP:-}:${EL_P2P_PORT:-30303}:${EL_P2P_PORT:-30303}/tcp - ${HOST_IP:-}:${EL_P2P_PORT:-30303}:${EL_P2P_PORT:-30303}/udp @@ -51,8 +52,6 @@ services: - 0.0.0.0 - --http.vhosts=* - --http.corsdomain=* - - --datadir - - /var/lib/goethereum - --port - ${EL_P2P_PORT:-30303} - --http.port @@ -68,7 +67,7 @@ services: - --pprof.addr - 0.0.0.0 - --authrpc.jwtsecret - - /var/lib/goethereum/ee-secret/jwtsecret + - /var/lib/geth/ee-secret/jwtsecret - --authrpc.addr - 0.0.0.0 - --authrpc.port @@ -84,6 +83,7 @@ services: - metrics.network=${NETWORK} volumes: + geth-el-data: geth-eth1-data: jwtsecret: diff --git a/geth/Dockerfile.binary b/geth/Dockerfile.binary index 9d2263e2..a30f82a3 100644 --- a/geth/Dockerfile.binary +++ b/geth/Dockerfile.binary @@ -27,7 +27,8 @@ RUN adduser \ --ingroup "${USER}" \ "${USER}" -RUN mkdir -p /var/lib/goethereum/ee-secret && chown -R ${USER}:${USER} /var/lib/goethereum && chmod -R 700 /var/lib/goethereum && chmod 777 /var/lib/goethereum/ee-secret +RUN mkdir -p /var/lib/goethereum && chown -R ${USER}:${USER} /var/lib/goethereum && chmod -R 700 /var/lib/goethereum +RUN mkdir -p /var/lib/geth/ee-secret && chown -R ${USER}:${USER} /var/lib/geth && chmod -R 700 /var/lib/geth && chmod 777 /var/lib/geth/ee-secret # Cannot assume buildkit, hence no chmod COPY --chown=${USER}:${USER} ./docker-entrypoint.sh /usr/local/bin/ diff --git a/geth/Dockerfile.source b/geth/Dockerfile.source index a9ead3c3..69f55a81 100644 --- a/geth/Dockerfile.source +++ b/geth/Dockerfile.source @@ -1,5 +1,5 @@ # Build Geth in a stock Go build container -FROM golang:1.22-alpine AS builder +FROM golang:1.23-alpine AS builder # Unused, this is here to avoid build time complaints ARG DOCKER_TAG @@ -38,7 +38,8 @@ RUN adduser \ --ingroup "${USER}" \ "${USER}" -RUN mkdir -p /var/lib/goethereum/ee-secret && chown -R ${USER}:${USER} /var/lib/goethereum && chmod -R 700 /var/lib/goethereum && chmod 777 /var/lib/goethereum/ee-secret +RUN mkdir -p /var/lib/goethereum && chown -R ${USER}:${USER} /var/lib/goethereum && chmod -R 700 /var/lib/goethereum +RUN mkdir -p /var/lib/geth/ee-secret && chown -R ${USER}:${USER} /var/lib/geth && chmod -R 700 /var/lib/geth && chmod 777 /var/lib/geth/ee-secret # Cannot assume buildkit, hence no chmod COPY --from=builder --chown=${USER}:${USER} /src/go-ethereum/build/bin/geth /usr/local/bin/ diff --git a/geth/docker-entrypoint.sh b/geth/docker-entrypoint.sh index 1238801c..a16a09b6 100755 --- a/geth/docker-entrypoint.sh +++ b/geth/docker-entrypoint.sh @@ -2,28 +2,28 @@ set -euo pipefail if [ "$(id -u)" = '0' ]; then - chown -R geth:geth /var/lib/goethereum + chown -R geth:geth /var/lib/geth exec su-exec geth docker-entrypoint.sh "$@" fi if [ -n "${JWT_SECRET}" ]; then - echo -n "${JWT_SECRET}" > /var/lib/goethereum/ee-secret/jwtsecret + echo -n "${JWT_SECRET}" > /var/lib/geth/ee-secret/jwtsecret echo "JWT secret was supplied in .env" fi -if [[ ! -f /var/lib/goethereum/ee-secret/jwtsecret ]]; then +if [[ ! -f /var/lib/geth/ee-secret/jwtsecret ]]; then echo "Generating JWT secret" __secret1=$(head -c 8 /dev/urandom | od -A n -t u8 | tr -d '[:space:]' | sha256sum | head -c 32) __secret2=$(head -c 8 /dev/urandom | od -A n -t u8 | tr -d '[:space:]' | sha256sum | head -c 32) - echo -n "${__secret1}""${__secret2}" > /var/lib/goethereum/ee-secret/jwtsecret + echo -n "${__secret1}""${__secret2}" > /var/lib/geth/ee-secret/jwtsecret fi -if [[ -O "/var/lib/goethereum/ee-secret" ]]; then +if [[ -O "/var/lib/geth/ee-secret" ]]; then # In case someone specifies JWT_SECRET but it's not a distributed setup - chmod 777 /var/lib/goethereum/ee-secret + chmod 777 /var/lib/geth/ee-secret fi -if [[ -O "/var/lib/goethereum/ee-secret/jwtsecret" ]]; then - chmod 666 /var/lib/goethereum/ee-secret/jwtsecret +if [[ -O "/var/lib/geth/ee-secret/jwtsecret" ]]; then + chmod 666 /var/lib/geth/ee-secret/jwtsecret fi __ancient="" @@ -41,26 +41,33 @@ if [[ "${NETWORK}" =~ ^https?:// ]]; then echo "This appears to be the ${repo} repo, branch ${branch} and config directory ${config_dir}." # For want of something more amazing, let's just fail if git fails to pull this set -e - if [ ! -d "/var/lib/goethereum/testnet/${config_dir}" ]; then - mkdir -p /var/lib/goethereum/testnet - cd /var/lib/goethereum/testnet + if [ ! -d "/var/lib/geth/testnet/${config_dir}" ]; then + mkdir -p /var/lib/geth/testnet + cd /var/lib/geth/testnet git init --initial-branch="${branch}" git remote add origin "${repo}" git config core.sparseCheckout true echo "${config_dir}" > .git/info/sparse-checkout git pull origin "${branch}" fi - bootnodes="$(paste -s -d, "/var/lib/goethereum/testnet/${config_dir}/bootnode.txt")" - networkid="$(jq -r '.config.chainId' "/var/lib/goethereum/testnet/${config_dir}/genesis.json")" + bootnodes="$(paste -s -d, "/var/lib/geth/testnet/${config_dir}/bootnode.txt")" + networkid="$(jq -r '.config.chainId' "/var/lib/geth/testnet/${config_dir}/genesis.json")" set +e __network="--bootnodes=${bootnodes} --networkid=${networkid} --http.api=eth,net,web3,debug,admin,txpool" - if [ ! -d "/var/lib/goethereum/geth/chaindata/" ]; then - geth init --state.scheme path --datadir /var/lib/goethereum "/var/lib/goethereum/testnet/${config_dir}/genesis.json" + if [ ! -d "/var/lib/geth/geth/chaindata/" ]; then + geth init --datadir /var/lib/geth "/var/lib/geth/testnet/${config_dir}/genesis.json" fi else __network="--${NETWORK}" fi +# New or old datadir +if [ -d /var/lib/goethereum/geth/chaindata ]; then + __datadir="--datadir /var/lib/goethereum" +else + __datadir="--datadir /var/lib/geth" +fi + # Set verbosity shopt -s nocasematch case ${LOG_LEVEL} in @@ -92,13 +99,6 @@ else __prune="" fi -if [ "${IPV6}" = "true" ]; then - echo "Configuring Geth for discv5 for IPv6 advertisements" - __ipv6="--discv5" -else - __ipv6="" -fi - # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 -exec "$@" ${__ancient} ${__ipv6} ${__network} ${__prune} ${__verbosity} ${EL_EXTRAS} +exec "$@" ${__datadir} ${__ancient} ${__network} ${__prune} ${__verbosity} ${EL_EXTRAS} diff --git a/grafana-cloud.yml b/grafana-cloud.yml index 97f78b45..a0591e3f 100644 --- a/grafana-cloud.yml +++ b/grafana-cloud.yml @@ -43,7 +43,7 @@ services: ethereum-metrics-exporter: restart: "unless-stopped" - image: samcm/ethereum-metrics-exporter:0.23.0-debian + image: samcm/ethereum-metrics-exporter:0.24.0-debian entrypoint: - /ethereum-metrics-exporter - --consensus-url=${CL_NODE} @@ -67,10 +67,10 @@ services: pid: host restart: unless-stopped volumes: - - '/:/host:ro,rslave' + - /:/host:ro,rslave - /etc/hostname:/etc/nodename:ro - - /proc:/host/proc:ro - - /sys:/host/sys:ro + - /proc:/host/proc:ro,rslave + - /sys:/host/sys:ro,rslave - /etc/localtime:/etc/localtime:ro <<: *logging labels: @@ -112,10 +112,10 @@ services: image: gcr.io/cadvisor/cadvisor:v0.49.1 volumes: - /var/run/docker.sock:/var/run/docker.sock:ro - - /:/rootfs:ro + - /:/rootfs:ro,rslave - /var/run:/var/run - - /sys:/sys:ro - - /var/lib/docker:/var/lib/docker:ro + - /sys:/sys:ro,rslave + - /var/lib/docker:/var/lib/docker:ro,rslave command: - --docker_only - --housekeeping_interval=30s @@ -134,7 +134,7 @@ services: - /etc/machine-id:/etc/machine-id:ro - ./promtail:/etc/promtail - promtail-data:/tmp - - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/lib/docker/containers:/var/lib/docker/containers:ro,rslave entrypoint: ./etc/promtail/entrypoint.sh command: ["/usr/bin/promtail"] environment: diff --git a/grafana-rootless.yml b/grafana-rootless.yml index 4fc2f2c9..18408291 100644 --- a/grafana-rootless.yml +++ b/grafana-rootless.yml @@ -34,7 +34,7 @@ services: ethereum-metrics-exporter: restart: "unless-stopped" - image: samcm/ethereum-metrics-exporter:0.23.0-debian + image: samcm/ethereum-metrics-exporter:0.24.0-debian entrypoint: - /ethereum-metrics-exporter - --consensus-url=${CL_NODE} diff --git a/grafana.yml b/grafana.yml index a7a8bab4..30dc9d03 100644 --- a/grafana.yml +++ b/grafana.yml @@ -35,7 +35,7 @@ services: ethereum-metrics-exporter: restart: "unless-stopped" - image: samcm/ethereum-metrics-exporter:0.23.0-debian + image: samcm/ethereum-metrics-exporter:0.24.0-debian entrypoint: - /ethereum-metrics-exporter - --consensus-url=${CL_NODE} @@ -60,10 +60,10 @@ services: pid: host restart: unless-stopped volumes: - - '/:/host:ro,rslave' + - /:/host:ro,rslave - /etc/hostname:/etc/nodename:ro - - /proc:/host/proc:ro - - /sys:/host/sys:ro + - /proc:/host/proc:ro,rslave + - /sys:/host/sys:ro,rslave - /etc/localtime:/etc/localtime:ro <<: *logging labels: @@ -105,10 +105,10 @@ services: image: gcr.io/cadvisor/cadvisor:v0.49.1 volumes: - /var/run/docker.sock:/var/run/docker.sock:ro - - /:/rootfs:ro + - /:/rootfs:ro,rslave - /var/run:/var/run - - /sys:/sys:ro - - /var/lib/docker:/var/lib/docker:ro + - /sys:/sys:ro,rslave + - /var/lib/docker:/var/lib/docker:ro,rslave command: - --docker_only - --housekeeping_interval=30s @@ -127,7 +127,7 @@ services: - /etc/machine-id:/etc/machine-id:ro - ./promtail:/etc/promtail - promtail-data:/tmp - - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/lib/docker/containers:/var/lib/docker/containers:ro,rslave entrypoint: ./etc/promtail/entrypoint.sh command: ["/usr/bin/promtail"] environment: diff --git a/grafana/provision.sh b/grafana/provision.sh index 65be2f84..50f14d4b 100755 --- a/grafana/provision.sh +++ b/grafana/provision.sh @@ -27,15 +27,18 @@ case "$CLIENT" in # lighthouse_summary __url='https://raw.githubusercontent.com/sigp/lighthouse-metrics/master/dashboards/Summary.json' __file='/etc/grafana/provisioning/dashboards/lighthouse_summary.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Summary"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Summary"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" # lighthouse_validator_client __url='https://raw.githubusercontent.com/sigp/lighthouse-metrics/master/dashboards/ValidatorClient.json' __file='/etc/grafana/provisioning/dashboards/lighthouse_validator_client.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Validator Client"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Validator Client"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" # lighthouse_validator_monitor __url='https://raw.githubusercontent.com/sigp/lighthouse-metrics/master/dashboards/ValidatorMonitor.json' __file='/etc/grafana/provisioning/dashboards/lighthouse_validator_monitor.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Validator Monitor"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Validator Monitor"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" ;;& *teku* ) # teku_overview @@ -49,13 +52,15 @@ case "$CLIENT" in # nimbus_dashboard __url='https://raw.githubusercontent.com/status-im/nimbus-eth2/master/grafana/beacon_nodes_Grafana_dashboard.json' __file='/etc/grafana/provisioning/dashboards/nimbus_dashboard.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Nimbus Dashboard"' | jq 'walk(if . == "${DS_PROMETHEUS-PROXY}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Nimbus Dashboard"' \ + | jq 'walk(if . == "${DS_PROMETHEUS-PROXY}" then "Prometheus" else . end)' >"${__file}" ;;& *lodestar* ) # lodestar summary __url='https://raw.githubusercontent.com/ChainSafe/lodestar/stable/dashboards/lodestar_summary.json' __file='/etc/grafana/provisioning/dashboards/lodestar_summary.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lodestar Dashboard"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' \ + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lodestar Dashboard"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' \ | jq '.templating.list[3].query |= "consensus" | .templating.list[4].query |= "validator"' \ | jq 'walk(if . == "prometheus_local" then "Prometheus" else . end)' >"${__file}" ;;& @@ -63,7 +68,8 @@ case "$CLIENT" in # geth_dashboard __url='https://gist.githubusercontent.com/karalabe/e7ca79abdec54755ceae09c08bd090cd/raw/3a400ab90f9402f2233280afd086cb9d6aac2111/dashboard.json' __file='/etc/grafana/provisioning/dashboards/geth_dashboard.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Geth Dashboard"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Geth Dashboard"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" ;;& *erigon* ) # erigon_dashboard @@ -77,7 +83,8 @@ case "$CLIENT" in __revision=$(wget -t 3 -T 10 -qO - https://grafana.com/api/dashboards/${__id} | jq .revision) __url="https://grafana.com/api/dashboards/${__id}/revisions/${__revision}/download" __file='/etc/grafana/provisioning/dashboards/besu_dashboard.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Besu Dashboard"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Besu Dashboard"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" ;;& *reth* ) # reth_dashboard @@ -106,16 +113,29 @@ case "$CLIENT" in ;;& *ssv.yml* ) # SSV Operator Dashboard - __url='https://raw.githubusercontent.com/bloxapp/ssv/main/monitoring/grafana/dashboard_ssv_operator_performance.json' + __url='https://raw.githubusercontent.com/ssvlabs/ssv/main/monitoring/grafana/dashboard_ssv_operator_performance.json' __file='/etc/grafana/provisioning/dashboards/ssv_operator_dashboard.json' wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "SSV Operator Performance Dashboard"' \ - | jq '.templating.list[0].current |= {selected: false, text: "ssv-node", value: "ssv-node"} | .templating.list[0].options = [ { "selected": true, "text": "ssv-node", "value": "ssv-node" } ] | .templating.list[0].query = "ssv-node"' \ - | sed 's/eXfXfqH7z/Prometheus/g' >"${__file}" - __url='https://raw.githubusercontent.com/bloxapp/ssv/main/monitoring/grafana/dashboard_ssv_node.json' + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + __url='https://raw.githubusercontent.com/ssvlabs/ssv/main/monitoring/grafana/dashboard_ssv_node.json' __file='/etc/grafana/provisioning/dashboards/ssv_node_dashboard.json' wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "SSV Node Dashboard"' \ - | jq '.templating.list[0].current |= {selected: false, text: "ssv-node", value: "ssv-node"} | .templating.list[0].options = [ { "selected": true, "text": "ssv-node", "value": "ssv-node" } ] | .templating.list[0].query = "ssv-node"' \ - | sed 's/eXfXfqH7z/Prometheus/g' >"${__file}" + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + ;;& + *lido-obol.yml* ) + # Lido Obol Dashboard + __url_charon='https://raw.githubusercontent.com/ObolNetwork/lido-charon-distributed-validator-node/main/grafana/dashboards/dash_charon_overview.json' + __file_charon='/etc/grafana/provisioning/dashboards/charon.json' + wget -t 3 -T 10 -qcO - "${__url_charon}" | sed 's/"uid": "prometheus"/"uid": "PBFA97CFB590B2093"/g' >"${__file_charon}" + __url_single_node='https://raw.githubusercontent.com/ObolNetwork/lido-charon-distributed-validator-node/main/grafana/dashboards/single_node_dashboard.json' + __file_single_node='/etc/grafana/provisioning/dashboards/single_node_dashboard.json' + wget -t 3 -T 10 -qcO - "${__url_single_node}" | sed 's/"uid": "prometheus"/"uid": "PBFA97CFB590B2093"/g' >"${__file_single_node}" + __url_validator_ejector='https://raw.githubusercontent.com/ObolNetwork/lido-charon-distributed-validator-node/main/grafana/dashboards/validator_ejector_overview.json' + __file_validator_ejector='/etc/grafana/provisioning/dashboards/validator_ejector_overview.json' + wget -t 3 -T 10 -qcO - "${__url_validator_ejector}" | sed 's/"uid": "prometheus"/"uid": "PBFA97CFB590B2093"/g' >"${__file_validator_ejector}" + __url_logs='https://raw.githubusercontent.com/ObolNetwork/lido-charon-distributed-validator-node/main/grafana/dashboards/logs_dashboard.json' + __file_logs='/etc/grafana/provisioning/dashboards/logs_dashboard.json' + wget -t 3 -T 10 -qcO - "${__url_logs}" | sed 's/"uid": "loki"/"uid": "P8E80F9AEF21F6940"/g' >"${__file_logs}" ;;& !(*grafana-rootless*) ) # cadvisor and node exporter dashboard diff --git a/grandine/docker-entrypoint.sh b/grandine/docker-entrypoint.sh index 8aafac91..5a0f62ee 100755 --- a/grandine/docker-entrypoint.sh +++ b/grandine/docker-entrypoint.sh @@ -83,6 +83,14 @@ if [ "${IPV6}" = "true" ]; then echo "Configuring Grandine to listen on IPv6 ports" __ipv6="--listen-address-ipv6 :: --libp2p-port-ipv6 ${CL_P2P_PORT:-9000} --discovery-port-ipv6 ${CL_P2P_PORT:-9000} \ --quic-port-ipv6 ${CL_QUIC_PORT:-9001}" +# ENR discovery on v6 is not yet working, likely too few peers. Manual for now + __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start + set +e + __public_v6=$(curl -s -6 ifconfig.me) + set -e + if [[ "$__public_v6" =~ $__ipv6_pattern ]]; then + __ipv6+=" --enr-address-ipv6 ${__public_v6} --enr-tcp-port-ipv6 ${CL_P2P_PORT:-9000} --enr-udp-port-ipv6 ${CL_P2P_PORT:-9000}" + fi else __ipv6="" fi diff --git a/lido-obol.yml b/lido-obol.yml new file mode 100644 index 00000000..21cac711 --- /dev/null +++ b/lido-obol.yml @@ -0,0 +1,104 @@ +x-logging: &logging + logging: + driver: json-file + options: + max-size: 100m + max-file: "3" + tag: '{{.ImageName}}|{{.Name}}|{{.ImageFullID}}|{{.FullID}}' + +services: + charon: + restart: "unless-stopped" + image: obolnetwork/charon:${CHARON_VERSION:-latest} + volumes: + - .eth:/opt/charon/.charon + <<: *logging + environment: + - CHARON_BEACON_NODE_ENDPOINTS=${OBOL_CL_NODE:-http://consensus:5052} + - CHARON_LOG_LEVEL=${OBOL_LOG_LEVEL:-debug} + - CHARON_LOG_FORMAT=${OBOL_LOG_FORMAT:-console} + - CHARON_P2P_RELAYS=${OBOL_P2P_RELAYS:-https://0.relay.obol.tech,https://1.relay.obol.tech/} + - CHARON_P2P_EXTERNAL_HOSTNAME=${OBOL_P2P_EXTERNAL_HOSTNAME:-} # Empty default required to avoid warnings. + - CHARON_P2P_TCP_ADDRESS=0.0.0.0:${OBOL_P2P_PORT:-3610} + - CHARON_VALIDATOR_API_ADDRESS=0.0.0.0:3600 + - CHARON_MONITORING_ADDRESS=0.0.0.0:3620 + - CHARON_BUILDER_API=${BUILDER_API_ENABLED:-true} + - CHARON_FEATURE_SET_ENABLE=eager_double_linear,consensus_participate + - CHARON_LOKI_ADDRESSES=${CHARON_LOKI_ADDRESSES:-http://loki:3100/loki/api/v1/push} + - CHARON_LOKI_SERVICE=charon + ports: + - ${OBOL_P2P_PORT:-3610}:${OBOL_P2P_PORT:-3610}/tcp # P2P TCP libp2p + healthcheck: + test: wget -qO- http://localhost:3620/readyz + labels: + - metrics.scrape=true + - metrics.path=/metrics + - metrics.port=3620 + - metrics.instance=charon + + charon-create-enr: + profiles: ["tools"] + restart: "no" + image: obolnetwork/charon:${CHARON_VERSION:-latest} + command: create enr + volumes: + - .eth:/opt/charon/.charon + charon-run-dkg: + profiles: ["tools"] + restart: "no" + image: obolnetwork/charon:${CHARON_VERSION:-latest} + volumes: + - .eth:/opt/charon/.charon + command: dkg --publish + curl-jq: + image: curl-jq:local + pull_policy: build + build: + context: ./traefik + dockerfile: Dockerfile.jq + restart: "no" + profiles: [ "tools" ] + + validator-ejector: + platform: linux/amd64 + image: lidofinance/validator-ejector:${VALIDATOR_EJECTOR_VERSION:-1.5.0} + user: ":" + volumes: + - .eth/lido-ejector:/exitmessages + restart: unless-stopped + environment: + - EXECUTION_NODE=${OBOL_EL_NODE:-http://execution:8545} + - CONSENSUS_NODE=${OBOL_CL_NODE:-http://consensus:5052} + - LOCATOR_ADDRESS=${VE_LOCATOR_ADDRESS:-0x28FAB2059C713A7F9D8c86Db49f9bb0e96Af1ef8} + - STAKING_MODULE_ID=${VE_STAKING_MODULE_ID:-2} + - OPERATOR_ID=${VE_OPERATOR_ID} + - ORACLE_ADDRESSES_ALLOWLIST=${VE_ORACLE_ADDRESSES_ALLOWLIST:-["0x140Bd8FbDc884f48dA7cb1c09bE8A2fAdfea776E","0xA7410857ABbf75043d61ea54e07D57A6EB6EF186","0x404335BcE530400a5814375E7Ec1FB55fAff3eA2","0x946D3b081ed19173dC83Cd974fC69e1e760B7d78","0x007DE4a5F7bc37E2F26c0cb2E8A95006EE9B89b5","0xEC4BfbAF681eb505B94E4a7849877DC6c600Ca3A","0x61c91ECd902EB56e314bB2D5c5C07785444Ea1c8","0x1Ca0fEC59b86F549e1F1184d97cb47794C8Af58d","0xc79F702202E3A6B0B6310B537E786B9ACAA19BAf"]} + - MESSAGES_LOCATION=/exitmessages + - RUN_METRICS=true + - HTTP_PORT=8989 + - DISABLE_SECURITY_DONT_USE_IN_PRODUCTION=${VE_DISABLE_SECURITY:-false} + - FORCE_DENCUN_FORK_MODE=true + labels: + - metrics.scrape=true + - metrics.path=/metrics + - metrics.port=8989 + - metrics.instance=validator-ejector + + lido-dv-exit: + image: obolnetwork/lido-dv-exit:${LIDO_DV_EXIT_VERSION:-e8bee1f} + user: ":" + volumes: + - .eth/lido-ejector:/exitmessages + - .eth:/charon + environment: + - LIDODVEXIT_BEACON_NODE_URL=${OBOL_CL_NODE:-http://consensus:5052} + - LIDODVEXIT_CHARON_RUNTIME_DIR=/charon + - LIDODVEXIT_EJECTOR_EXIT_PATH=/exitmessages + - LIDODVEXIT_EXIT_EPOCH=${LIDO_DV_EXIT_EXIT_EPOCH:-194048} + - LIDODVEXIT_LOG_LEVEL=${LIDO_DV_EXIT_LOG_LEVEL:-info} + - LIDODVEXIT_VALIDATOR_QUERY_CHUNK_SIZE=${LIDO_DV_EXIT_VALIDATOR_QUERY_CHUNK_SIZE:-5} + restart: on-failure + +networks: + default: + enable_ipv6: ${IPV6:-false} diff --git a/lighthouse-cl-only.yml b/lighthouse-cl-only.yml index 11dcd912..56918b66 100644 --- a/lighthouse-cl-only.yml +++ b/lighthouse-cl-only.yml @@ -63,6 +63,9 @@ services: - 0.0.0.0 - --http-port - ${CL_REST_PORT:-5052} + - --http-allow-origin=* + - --listen-address + - 0.0.0.0 - --port - ${CL_P2P_PORT:-9000} - --quic-port diff --git a/lighthouse-vc-only.yml b/lighthouse-vc-only.yml index 172c0292..499f3327 100644 --- a/lighthouse-vc-only.yml +++ b/lighthouse-vc-only.yml @@ -32,6 +32,7 @@ services: - GRAFFITI=${GRAFFITI:-} - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} volumes: - lhvalidator-data:/var/lib/lighthouse - /etc/localtime:/etc/localtime:ro diff --git a/lighthouse.yml b/lighthouse.yml index 1eacac90..504224cb 100644 --- a/lighthouse.yml +++ b/lighthouse.yml @@ -109,6 +109,7 @@ services: - GRAFFITI=${GRAFFITI:-} - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} volumes: - lhvalidator-data:/var/lib/lighthouse - /etc/localtime:/etc/localtime:ro diff --git a/lighthouse/docker-entrypoint-vc.sh b/lighthouse/docker-entrypoint-vc.sh index e97ae5ca..6531e35e 100755 --- a/lighthouse/docker-entrypoint-vc.sh +++ b/lighthouse/docker-entrypoint-vc.sh @@ -52,12 +52,19 @@ else __doppel="" fi +# Distributed attestation aggregation +if [ "${ENABLE_DIST_ATTESTATION_AGGR}" = "true" ]; then + __att_aggr="--distributed" +else + __att_aggr="" +fi + if [ "${DEFAULT_GRAFFITI}" = "true" ]; then # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__mev_boost} ${__beacon_stats} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} ${__mev_boost} ${__beacon_stats} ${__doppel} ${__att_aggr} ${VC_EXTRAS} else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__mev_boost} ${__beacon_stats} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__mev_boost} ${__beacon_stats} ${__doppel} ${__att_aggr} ${VC_EXTRAS} fi diff --git a/lighthouse/docker-entrypoint.sh b/lighthouse/docker-entrypoint.sh index 324f21ef..0dd9137a 100755 --- a/lighthouse/docker-entrypoint.sh +++ b/lighthouse/docker-entrypoint.sh @@ -87,7 +87,15 @@ fi if [ "${IPV6}" = "true" ]; then echo "Configuring Lighthouse to listen on IPv6 ports" - __ipv6="--listen-address :: --port6 ${CL_P2P_PORT:-9000} --quic-port6 ${CL_QUIC_PORT:-9001}" + __ipv6="--listen-address :: --port6 ${CL_P2P_PORT:-9000} --enr-udp6-port ${CL_P2P_PORT:-9000} --quic-port6 ${CL_QUIC_PORT:-9001}" +# ENR discovery on v6 is not yet working, likely too few peers. Manual for now + __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start + set +e + __public_v6=$(wget -6 -q -O- ifconfig.me) + set -e + if [[ "$__public_v6" =~ $__ipv6_pattern ]]; then + __ipv6+=" --enr-address ${__public_v6}" + fi else __ipv6="" fi diff --git a/lodestar-cl-only.yml b/lodestar-cl-only.yml index 9509fdeb..06e69b4a 100644 --- a/lodestar-cl-only.yml +++ b/lodestar-cl-only.yml @@ -39,11 +39,13 @@ services: - ARCHIVE_NODE=${ARCHIVE_NODE:-} - NETWORK=${NETWORK} - IPV6=${IPV6:-false} - - CL_P2P_PORT=${CL_P2P_PORT:-9000} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} - NODE_OPTIONS=${LODESTAR_HEAP:---max-old-space-size=8192} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/lodestar-vc-only.yml b/lodestar-vc-only.yml index 53adb2f4..70d5e9a5 100644 --- a/lodestar-vc-only.yml +++ b/lodestar-vc-only.yml @@ -33,6 +33,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} volumes: - lsvalidator-data:/var/lib/lodestar/validators - /etc/localtime:/etc/localtime:ro diff --git a/lodestar.yml b/lodestar.yml index e24aa32f..4ec4204b 100644 --- a/lodestar.yml +++ b/lodestar.yml @@ -39,11 +39,13 @@ services: - ARCHIVE_NODE=${ARCHIVE_NODE:-} - NETWORK=${NETWORK} - IPV6=${IPV6:-false} - - CL_P2P_PORT=${CL_P2P_PORT:-9000} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} - NODE_OPTIONS=${LODESTAR_HEAP:---max-old-space-size=8192} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: @@ -104,6 +106,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} volumes: - lsvalidator-data:/var/lib/lodestar/validators - /etc/localtime:/etc/localtime:ro diff --git a/lodestar/Dockerfile.binary b/lodestar/Dockerfile.binary index 2323f00a..e5cf2a03 100644 --- a/lodestar/Dockerfile.binary +++ b/lodestar/Dockerfile.binary @@ -7,7 +7,8 @@ FROM ${DOCKER_REPO}:${DOCKER_TAG} ARG BUILD_TARGET ARG SRC_REPO -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates tzdata bash gosu git && apt-get clean && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates tzdata bash gosu git wget \ + && apt-get clean && rm -rf /var/lib/apt/lists/* ARG USER=lsconsensus ARG UID=10002 diff --git a/lodestar/Dockerfile.source b/lodestar/Dockerfile.source index dc5a2f3f..401eeadd 100644 --- a/lodestar/Dockerfile.source +++ b/lodestar/Dockerfile.source @@ -1,4 +1,4 @@ -FROM node:22.4-slim AS builder +FROM node:22-slim AS builder # Here only to avoid build-time errors ARG DOCKER_TAG @@ -16,9 +16,10 @@ RUN bash -c "cd .. && rm -rf app && git clone ${SRC_REPO} app && cd app && git c && if [[ ${BUILD_TARGET} =~ pr-.+ ]]; then git fetch origin pull/$(echo ${BUILD_TARGET} | cut -d '-' -f 2)/head:ls-pr; git checkout ls-pr; else git checkout ${BUILD_TARGET}; fi \ && yarn install --non-interactive --frozen-lockfile && yarn build" -FROM node:22.4-slim +FROM node:22-slim -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates tzdata bash gosu git && apt-get clean && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates tzdata bash gosu git wget \ + && apt-get clean && rm -rf /var/lib/apt/lists/* ARG USER=lsconsensus ARG UID=10002 diff --git a/lodestar/docker-entrypoint-vc.sh b/lodestar/docker-entrypoint-vc.sh index 4814e71b..3ebf5e17 100755 --- a/lodestar/docker-entrypoint-vc.sh +++ b/lodestar/docker-entrypoint-vc.sh @@ -60,12 +60,19 @@ else __w3s_url="" fi +# Distributed attestation aggregation +if [ "${ENABLE_DIST_ATTESTATION_AGGR}" = "true" ]; then + __att_aggr="--distributed" +else + __att_aggr="" +fi + if [ "${DEFAULT_GRAFFITI}" = "true" ]; then # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__mev_boost} ${__beacon_stats} ${__doppel} ${__w3s_url} ${VC_EXTRAS} + exec "$@" ${__network} ${__mev_boost} ${__beacon_stats} ${__doppel} ${__w3s_url} ${__att_aggr} ${VC_EXTRAS} else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__mev_boost} ${__beacon_stats} ${__doppel} ${__w3s_url} ${VC_EXTRAS} + exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__mev_boost} ${__beacon_stats} ${__doppel} ${__w3s_url} ${__att_aggr} ${VC_EXTRAS} fi diff --git a/lodestar/docker-entrypoint.sh b/lodestar/docker-entrypoint.sh index 2b2b915f..7660cc99 100755 --- a/lodestar/docker-entrypoint.sh +++ b/lodestar/docker-entrypoint.sh @@ -85,7 +85,15 @@ fi if [ "${IPV6}" = "true" ]; then echo "Configuring Lodestar to listen on IPv6 ports" - __ipv6="--listenAddress6 :: --port6 ${CL_P2P_PORT:-9000}" + __ipv6="--listenAddress 0.0.0.0 --listenAddress6 :: --port6 ${CL_IPV6_P2P_PORT:-9090}" +# ENR discovery on v6 is not yet working, likely too few peers. Manual for now + __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start + set +e + __public_v6=$(wget -6 -q -O- ifconfig.me) + set -e + if [[ "$__public_v6" =~ $__ipv6_pattern ]]; then + __ipv6+=" --enr.ip6 ${__public_v6}" + fi else __ipv6="" fi diff --git a/nethermind.yml b/nethermind.yml index f85d92b7..0a233b2c 100644 --- a/nethermind.yml +++ b/nethermind.yml @@ -29,7 +29,8 @@ services: - AUTOPRUNE_NM=${AUTOPRUNE_NM:-true} - NETWORK=${NETWORK} volumes: - - nm-eth1-data:/var/lib/nethermind + - nethermind-el-data:/var/lib/nethermind + - nm-eth1-data:/var/lib/nethermind-og - /etc/localtime:/etc/localtime:ro - jwtsecret:/var/lib/nethermind/ee-secret ports: @@ -44,8 +45,6 @@ services: entrypoint: - docker-entrypoint.sh - /nethermind/nethermind - - --datadir - - /var/lib/nethermind - --Init.WebSocketsEnabled - "true" - --Network.DiscoveryPort @@ -92,6 +91,7 @@ services: - metrics.network=${NETWORK} volumes: + nethermind-el-data: nm-eth1-data: jwtsecret: diff --git a/nethermind/Dockerfile.binary b/nethermind/Dockerfile.binary index 24bad15d..a2724458 100644 --- a/nethermind/Dockerfile.binary +++ b/nethermind/Dockerfile.binary @@ -36,6 +36,8 @@ RUN adduser \ # This only goes so far. keystore, logs and nethermind_db are volumes and need to be chown'd in the entrypoint RUN chown -R ${USER}:${USER} /nethermind +RUN mkdir -p /var/lib/nethermind-og && chown -R ${USER}:${USER} /var/lib/nethermind-og \ +&& chmod -R 700 /var/lib/nethermind-og RUN mkdir -p /var/lib/nethermind/ee-secret && chown -R ${USER}:${USER} /var/lib/nethermind \ && chmod -R 700 /var/lib/nethermind && chmod 777 /var/lib/nethermind/ee-secret diff --git a/nethermind/Dockerfile.source b/nethermind/Dockerfile.source index d86f92be..c27891c8 100644 --- a/nethermind/Dockerfile.source +++ b/nethermind/Dockerfile.source @@ -49,6 +49,8 @@ WORKDIR /nethermind COPY --from=builder --chown=${USER}:${USER} /nethermind/out . RUN chown -R ${USER}:${USER} /nethermind +RUN mkdir -p /var/lib/nethermind-og && chown -R ${USER}:${USER} /var/lib/nethermind-og \ +&& chmod -R 700 /var/lib/nethermind-og RUN mkdir -p /var/lib/nethermind/ee-secret && chown -R ${USER}:${USER} /var/lib/nethermind \ && chmod -R 700 /var/lib/nethermind && chmod 777 /var/lib/nethermind/ee-secret diff --git a/nethermind/docker-entrypoint.sh b/nethermind/docker-entrypoint.sh index 159f6916..63d94037 100755 --- a/nethermind/docker-entrypoint.sh +++ b/nethermind/docker-entrypoint.sh @@ -79,7 +79,7 @@ else fi fi if [ "${__memtotal}" -ge 30 ]; then - __prune="${__prune} --Pruning.CacheMb=4096 --Pruning.FullPruningMemoryBudgetMb=16384" + __prune="${__prune} --Pruning.FullPruningMemoryBudgetMb=16384" fi fi if [ -n "${__prune}" ]; then @@ -88,6 +88,13 @@ else fi fi +# New or old datadir +if [ -d /var/lib/nethermind-og/nethermind_db ]; then + __datadir="--datadir /var/lib/nethermind-og" +else + __datadir="--datadir /var/lib/nethermind" +fi + # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 -exec "$@" ${__network} ${__prune} ${EL_EXTRAS} +exec "$@" ${__datadir} ${__network} ${__prune} ${EL_EXTRAS} diff --git a/nimbus-allin1.yml b/nimbus-allin1.yml index 65ce944d..0d670be4 100644 --- a/nimbus-allin1.yml +++ b/nimbus-allin1.yml @@ -47,6 +47,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} - EMBEDDED_VC=true + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp diff --git a/nimbus-vc-only.yml b/nimbus-vc-only.yml index b5e12ca8..89440d5e 100644 --- a/nimbus-vc-only.yml +++ b/nimbus-vc-only.yml @@ -38,6 +38,7 @@ services: - GRAFFITI=${GRAFFITI:-} - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: diff --git a/nimbus.yml b/nimbus.yml index dabbf36f..951a9159 100644 --- a/nimbus.yml +++ b/nimbus.yml @@ -102,6 +102,7 @@ services: - GRAFFITI=${GRAFFITI:-} - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: diff --git a/nimbus/docker-entrypoint-vc.sh b/nimbus/docker-entrypoint-vc.sh index d9f6ae5a..5be960b4 100755 --- a/nimbus/docker-entrypoint-vc.sh +++ b/nimbus/docker-entrypoint-vc.sh @@ -50,12 +50,19 @@ else __w3s_url="" fi +# Distributed attestation aggregation +if [ "${ENABLE_DIST_ATTESTATION_AGGR}" = "true" ]; then + __att_aggr="--distributed" +else + __att_aggr="" +fi + if [ "${DEFAULT_GRAFFITI}" = "true" ]; then # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__w3s_url} ${__log_level} ${__doppel} ${__mev_boost} ${VC_EXTRAS} + exec "$@" ${__w3s_url} ${__log_level} ${__doppel} ${__mev_boost} ${__att_aggr} ${VC_EXTRAS} else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__w3s_url} "--graffiti=${GRAFFITI}" ${__log_level} ${__doppel} ${__mev_boost} ${VC_EXTRAS} + exec "$@" ${__w3s_url} "--graffiti=${GRAFFITI}" ${__log_level} ${__doppel} ${__mev_boost} ${__att_aggr} ${VC_EXTRAS} fi diff --git a/prometheus/docker-entrypoint.sh b/prometheus/docker-entrypoint.sh index 6dee85a0..80529bbc 100755 --- a/prometheus/docker-entrypoint.sh +++ b/prometheus/docker-entrypoint.sh @@ -39,6 +39,10 @@ select_clients() { *ssv.yml* ) cp ./rootless/ssv-prom.yml ./rootless.d ;; esac + case "$CLIENT" in + *lido-obol.yml* ) cp ./rootless/lido-obol-prom.yml ./rootless.d ;; + esac + case "$CLIENT" in *traefik-* ) cp ./rootless/traefik-prom.yml ./rootless.d ;; esac diff --git a/prometheus/obol-prom.yml b/prometheus/obol-prom.yml new file mode 100644 index 00000000..a129ec02 --- /dev/null +++ b/prometheus/obol-prom.yml @@ -0,0 +1,8 @@ +remote_write: + - url: https://vm.monitoring.gcp.obol.tech/write + authorization: + credentials: OBOL_PROM_REMOTE_WRITE_TOKEN + write_relabel_configs: + - source_labels: [job] + regex: "charon" + action: keep # Keeps charon metrics and drop metrics from other containers. \ No newline at end of file diff --git a/prometheus/rootless/lido-obol-prom.yml b/prometheus/rootless/lido-obol-prom.yml new file mode 100644 index 00000000..6af549bb --- /dev/null +++ b/prometheus/rootless/lido-obol-prom.yml @@ -0,0 +1,11 @@ +scrape_configs: + - job_name: charon + metrics_path: /metrics + static_configs: + - targets: + - charon:3620 + - job_name: validator-ejector + metrics_path: /metrics + static_configs: + - targets: + - validator-ejector:8989 \ No newline at end of file diff --git a/prysm-cl-only.yml b/prysm-cl-only.yml index 1d12996a..0fc75795 100644 --- a/prysm-cl-only.yml +++ b/prysm-cl-only.yml @@ -87,7 +87,6 @@ services: - "8008" - --suggested-fee-recipient - ${FEE_RECIPIENT} - - --enable-debug-rpc-endpoints labels: - metrics.scrape=true - metrics.path=/metrics diff --git a/prysm-vc-only.yml b/prysm-vc-only.yml index e17fbcec..8c0b4785 100644 --- a/prysm-vc-only.yml +++ b/prysm-vc-only.yml @@ -37,6 +37,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: @@ -60,7 +61,6 @@ services: - 0.0.0.0 - --monitoring-port - "8009" - - --web - --grpc-gateway-host - 0.0.0.0 - --grpc-gateway-port @@ -70,8 +70,9 @@ services: - consensus:5052 - --suggested-fee-recipient - ${FEE_RECIPIENT} - - --wallet-password-file - - /var/lib/prysm/password.txt + - --enable-beacon-rest-api + - --beacon-rest-api-provider + - ${CL_NODE:-http://consensus:5052} labels: - traefik.enable=true - traefik.http.routers.prysm.entrypoints=web,websecure @@ -113,7 +114,9 @@ services: - validator - exit - --wallet-dir=/var/lib/prysm/ - - --beacon-rpc-provider=${CL_NODE:-http://consensus:4000} + - --enable-beacon-rest-api + - --beacon-rest-api-provider + - ${CL_NODE:-http://consensus:5052} - --wallet-password-file=/var/lib/prysm/password.txt - --${NETWORK} diff --git a/prysm.yml b/prysm.yml index 314a55b5..cde37e1c 100644 --- a/prysm.yml +++ b/prysm.yml @@ -89,7 +89,6 @@ services: - "8008" - --suggested-fee-recipient - ${FEE_RECIPIENT} - - --enable-debug-rpc-endpoints labels: - metrics.scrape=true - metrics.path=/metrics @@ -116,6 +115,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: @@ -139,7 +139,6 @@ services: - 0.0.0.0 - --monitoring-port - "8009" - - --web - --grpc-gateway-host - 0.0.0.0 - --grpc-gateway-port @@ -149,8 +148,9 @@ services: - consensus:5052 - --suggested-fee-recipient - ${FEE_RECIPIENT} - - --wallet-password-file - - /var/lib/prysm/password.txt + - --enable-beacon-rest-api + - --beacon-rest-api-provider + - ${CL_NODE:-http://consensus:5052} depends_on: - consensus labels: @@ -194,7 +194,9 @@ services: - validator - exit - --wallet-dir=/var/lib/prysm/ - - --beacon-rpc-provider=consensus:4000 + - --enable-beacon-rest-api + - --beacon-rest-api-provider + - ${CL_NODE:-http://consensus:5052} - --wallet-password-file=/var/lib/prysm/password.txt - --${NETWORK} depends_on: diff --git a/prysm/Dockerfile.source b/prysm/Dockerfile.source index 41c05663..11ce138b 100644 --- a/prysm/Dockerfile.source +++ b/prysm/Dockerfile.source @@ -1,5 +1,5 @@ # Build Prysm in a stock Go build container -FROM golang:1.22-bookworm AS builder +FROM golang:1.23-bookworm AS builder # Here only to avoid build-time errors ARG DOCKER_TAG diff --git a/prysm/docker-entrypoint-vc.sh b/prysm/docker-entrypoint-vc.sh index 5706c6a5..bdf93e6f 100755 --- a/prysm/docker-entrypoint-vc.sh +++ b/prysm/docker-entrypoint-vc.sh @@ -47,17 +47,30 @@ fi # Web3signer URL if [ "${WEB3SIGNER}" = "true" ]; then - __w3s_url="--validators-external-signer-url http://web3signer:9000 --validators-external-signer-public-keys http://web3signer:9000/api/v1/eth2/publicKeys" + __w3s_url="--validators-external-signer-url http://web3signer:9000 \ + --validators-external-signer-public-keys http://web3signer:9000/api/v1/eth2/publicKeys \ + --validators-external-signer-key-file=/var/lib/prysm/w3s-keys.txt" + + if [ ! -f /var/lib/prysm/w3s-keys.txt ]; then + touch /var/lib/prysm/w3s-keys.txt + fi +else + __w3s_url="--web --wallet-password-file /var/lib/prysm/password.txt" +fi + +# Distributed attestation aggregation +if [ "${ENABLE_DIST_ATTESTATION_AGGR}" = "true" ]; then + __att_aggr="--distributed" else - __w3s_url="" + __att_aggr="" fi if [ "${DEFAULT_GRAFFITI}" = "true" ]; then # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__w3s_url} ${__mev_boost} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} ${__w3s_url} ${__mev_boost} ${__doppel} ${__att_aggr} ${VC_EXTRAS} else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__w3s_url} ${__mev_boost} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__w3s_url} ${__mev_boost} ${__doppel} ${__att_aggr} ${VC_EXTRAS} fi diff --git a/reth/docker-entrypoint.sh b/reth/docker-entrypoint.sh index a1cea933..f45c640c 100755 --- a/reth/docker-entrypoint.sh +++ b/reth/docker-entrypoint.sh @@ -74,17 +74,39 @@ case ${LOG_LEVEL} in ;; esac +__static="" +if [ -n "${STATIC_DIR}" ] && [ ! "${STATIC_DIR}" = ".nada" ]; then + echo "Using separate static files directory at ${STATIC_DIR}." + __static="--datadir.static-files /var/lib/static" +fi + if [ "${ARCHIVE_NODE}" = "true" ]; then echo "Reth archive node without pruning" __prune="" else __prune="--full" -fi + if [ ! -f "/var/lib/reth/reth.toml" ]; then # Configure ssv, rocketpool, stakewise contracts +# Word splitting is desired for the command line parameters +# shellcheck disable=SC2086 + reth init ${__network} --datadir /var/lib/reth ${__static} + cat <> /var/lib/reth/reth.toml -__static="" -if [ -n "${STATIC_DIR}" ] && [ ! "${STATIC_DIR}" = ".nada" ]; then - echo "Using separate static files directory at ${STATIC_DIR}." - __static="--datadir.static-files /var/lib/static" +[prune] +block_interval = 5 + +[prune.segments] +sender_recovery = "full" + +[prune.segments.receipts] +before = 0 + +[prune.segments.account_history] +distance = 10064 + +[prune.segments.storage_history] +distance = 10064 +EOF + fi fi if [ -f /var/lib/reth/prune-marker ]; then diff --git a/ssv-dkg.yml b/ssv-dkg.yml index bf3d5c5b..19b745f8 100644 --- a/ssv-dkg.yml +++ b/ssv-dkg.yml @@ -18,7 +18,7 @@ services: ssv-dkg: restart: "unless-stopped" - image: bloxstaking/ssv-dkg:${SSV_DKG_TAG:-latest} + image: ${SSV_DKG_REPO:-ssvlabs/ssv-dkg}:${SSV_DKG_TAG:-latest} volumes: - ./ssv-config:/config - ssv-dkg-tls:/ssl diff --git a/ssv.yml b/ssv.yml index 0df77f1c..cbf79e6a 100644 --- a/ssv.yml +++ b/ssv.yml @@ -9,7 +9,7 @@ x-logging: &logging services: ssv-node: restart: "unless-stopped" - image: bloxstaking/ssv-node:${SSV_NODE_TAG:-latest} + image: ${SSV_NODE_REPO:-ssvlabs/ssv-node}:${SSV_NODE_TAG:-latest} user: 12000:12000 volumes: - ./ssv-config:/config diff --git a/staking-deposit-cli/docker-entrypoint.sh b/staking-deposit-cli/docker-entrypoint.sh index 8ce9fb34..731e1984 100755 --- a/staking-deposit-cli/docker-entrypoint.sh +++ b/staking-deposit-cli/docker-entrypoint.sh @@ -8,6 +8,7 @@ set -Eeuo pipefail ARGS=() foundu=0 foundf=0 +foundnonint=0 uid=1000 folder="validator_keys" for var in "$@"; do @@ -19,6 +20,10 @@ for var in "$@"; do foundf=1 continue fi + if [ "$var" = '--non_interactive' ]; then + foundnonint=1 + continue + fi if [ "$foundu" = '1' ]; then foundu=0 if ! [[ $var =~ ^[0-9]+$ ]] ; then @@ -36,6 +41,16 @@ for var in "$@"; do ARGS+=("$var") done +for i in "${!ARGS[@]}"; do + if [ "${ARGS[$i]}" = '/app/staking_deposit/deposit.py' ]; then + if [ "$foundnonint" = '1' ]; then + # the flag should be before the command + ARGS=("${ARGS[@]:0:$i+1}" "--non_interactive" "${ARGS[@]:$i+1}") + fi + break + fi +done + su-exec depcli "${ARGS[@]}" if [[ "$*" =~ "generate-bls-to-execution-change" ]]; then diff --git a/teku-allin1.yml b/teku-allin1.yml index 3db9fd66..fa59e2c9 100644 --- a/teku-allin1.yml +++ b/teku-allin1.yml @@ -46,9 +46,14 @@ services: - WEB3SIGNER=${WEB3SIGNER:-false} - EMBEDDED_VC=true - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} + - IPV6=${IPV6:-false} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/teku-cl-only.yml b/teku-cl-only.yml index f62386d9..2c907aa4 100644 --- a/teku-cl-only.yml +++ b/teku-cl-only.yml @@ -46,10 +46,12 @@ services: - WEB3SIGNER=false - NETWORK=${NETWORK} - IPV6=${IPV6:-false} - - CL_P2P_PORT=${CL_P2P_PORT:-9000} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/teku-vc-only.yml b/teku-vc-only.yml index 50f19310..ef8c3c3b 100644 --- a/teku-vc-only.yml +++ b/teku-vc-only.yml @@ -35,6 +35,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: diff --git a/teku.yml b/teku.yml index 3682f501..01dee369 100644 --- a/teku.yml +++ b/teku.yml @@ -46,10 +46,12 @@ services: - EMBEDDED_VC=false - NETWORK=${NETWORK} - IPV6=${IPV6:-false} - - CL_P2P_PORT=${CL_P2P_PORT:-9000} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: @@ -105,6 +107,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: diff --git a/teku/Dockerfile.source b/teku/Dockerfile.source index 073105b3..23de739e 100644 --- a/teku/Dockerfile.source +++ b/teku/Dockerfile.source @@ -1,5 +1,5 @@ # Build Teku in a stock Ubuntu container -FROM eclipse-temurin:21-jdk-jammy AS builder +FROM eclipse-temurin:21-jdk-noble AS builder # This is here to avoid build-time complaints ARG DOCKER_TAG @@ -21,7 +21,7 @@ RUN bash -c "git clone ${SRC_REPO} teku \ && ./gradlew installDist" # Pull all binaries into a second stage deploy Ubuntu container -FROM eclipse-temurin:21-jre-jammy +FROM eclipse-temurin:21-jre-noble ARG USER=teku ARG UID=10002 @@ -30,6 +30,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install ca-certificates \ tzdata \ git \ + adduser \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/teku/docker-entrypoint-vc.sh b/teku/docker-entrypoint-vc.sh index 79b4c8d3..347ca797 100755 --- a/teku/docker-entrypoint-vc.sh +++ b/teku/docker-entrypoint-vc.sh @@ -79,12 +79,19 @@ else __w3s_url="" fi +# Distributed attestation aggregation +if [ "${ENABLE_DIST_ATTESTATION_AGGR}" = "true" ]; then + __att_aggr="--Xobol-dvt-integration-enabled=true" +else + __att_aggr="" +fi + if [ "${DEFAULT_GRAFFITI}" = "true" ]; then # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__w3s_url} ${__mev_boost} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} ${__w3s_url} ${__mev_boost} ${__doppel} ${__att_aggr} ${VC_EXTRAS} else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} "--validators-graffiti=${GRAFFITI}" ${__w3s_url} ${__mev_boost} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} "--validators-graffiti=${GRAFFITI}" ${__w3s_url} ${__mev_boost} ${__doppel} ${__att_aggr} ${VC_EXTRAS} fi diff --git a/teku/docker-entrypoint.sh b/teku/docker-entrypoint.sh index 3c0488bf..68570c63 100755 --- a/teku/docker-entrypoint.sh +++ b/teku/docker-entrypoint.sh @@ -133,7 +133,25 @@ fi if [ "${IPV6}" = "true" ]; then echo "Configuring Teku to listen on IPv6 ports" - __ipv6="--p2p-interface 0.0.0.0,:: --p2p-port-ipv6 ${CL_P2P_PORT:-9000}" + __ipv6="--p2p-interface 0.0.0.0,:: --p2p-port-ipv6 ${CL_IPV6_P2P_PORT:-9090}" +# ENR discovery on v6 is not yet working, likely too few peers. Manual for now + __ipv4_pattern="^([0-9]{1,3}\.){3}[0-9]{1,3}$" + __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start + set +e + __public_v4=$(curl -s -4 ifconfig.me) + __public_v6=$(curl -s -6 ifconfig.me) + set -e + __valid_v4=0 + if [[ "$__public_v4" =~ $__ipv4_pattern ]]; then + __valid_v4=1 + fi + if [[ "$__public_v6" =~ $__ipv6_pattern ]]; then + if [ "${__valid_v4}" -eq 1 ]; then + __ipv6+=" --p2p-advertised-ips ${__public_v4},${__public_v6}" + else + __ipv6+=" --p2p-advertised-ip ${__public_v6}" + fi + fi else __ipv6="" fi diff --git a/traefik-aws.yml b/traefik-aws.yml index 39cf51b5..4154491a 100644 --- a/traefik-aws.yml +++ b/traefik-aws.yml @@ -56,3 +56,7 @@ services: volumes: certs: + +networks: + default: + enable_ipv6: ${IPV6:-false} diff --git a/traefik-cf.yml b/traefik-cf.yml index e4066e56..29914e27 100644 --- a/traefik-cf.yml +++ b/traefik-cf.yml @@ -53,7 +53,7 @@ services: restart: "unless-stopped" environment: - LOG_LEVEL=${LOG_LEVEL:-info} - - 'CONFIG={"settings": [{"provider": "cloudflare", "zone_identifier": "${CF_ZONE_ID}", "domain": "${DOMAIN}", "host": "${DDNS_SUBDOMAIN}", "ttl": 1, "token": "${CF_DNS_API_TOKEN}", "proxied": ${DDNS_PROXY}, "ip_version": "ipv4"}]}' + - 'CONFIG={"settings": [{"provider": "cloudflare", "zone_identifier": "${CF_ZONE_ID}", "domain": "${DDNS_SUBDOMAIN}.${DOMAIN}", "ttl": 1, "token": "${CF_DNS_API_TOKEN}", "proxied": ${DDNS_PROXY}, "ip_version": "ipv4"}]}' volumes: - /etc/localtime:/etc/localtime:ro <<: *logging @@ -69,3 +69,7 @@ services: volumes: certs: + +networks: + default: + enable_ipv6: ${IPV6:-false} diff --git a/vc-utils/keymanager.sh b/vc-utils/keymanager.sh index d8a62482..3243c2f8 100755 --- a/vc-utils/keymanager.sh +++ b/vc-utils/keymanager.sh @@ -485,6 +485,40 @@ validator-list() { fi } +validator-count() { + __api_path=eth/v1/keystores + if [ "${WEB3SIGNER}" = "true" ]; then + __token=NIL + __vc_api_container=${__api_container} + __api_container=web3signer + __vc_api_port=${__api_port} + __api_port=9000 + __vc_api_tls=${__api_tls} + __api_tls=false + else + get-token + fi + __validator-list-call + key_count=$(echo "$__result" | jq -r '.data | length') + echo "Validator keys loaded into ${__service}: $key_count" + + if [ "${WEB3SIGNER}" = "true" ]; then + get-token + __api_path=eth/v1/remotekeys + __api_container=${__vc_api_container} + __service=${__vc_service} + __api_port=${__vc_api_port} + __api_tls=${__vc_api_tls} + __validator-list-call + remote_key_count=$(echo "$__result" | jq -r '.data | length') + echo "Remote Validator keys registered with ${__service}: $remote_key_count" + if [ "${key_count}" -ne "${remote_key_count}" ]; then + echo "WARNING: The number of keys loaded into Web3signer and registered with the validator client differ." + echo "Please run \"./ethd keys register\"" + fi + fi +} + validator-delete() { if [ -z "${__pubkey}" ]; then echo "Please specify a validator public key to delete, or \"all\"" @@ -764,6 +798,14 @@ and secrets directories into .eth/validator_keys instead." fi if [ "$__eth2_val_tools" -eq 0 ] && [ "$__justone" -eq 0 ]; then while true; do + __passfile=${__keyfile/.json/.txt} + if [ -f "$__passfile" ]; then + echo "Password file is found: $__passfile" + __password=$(< "$__passfile") + break + else + echo "Password file $__passfile not found." + fi read -srp "Please enter the password for your validator key stored in $__keyfile with public key $__pubkey: " __password echo read -srp "Please re-enter the password: " __password2 @@ -886,11 +928,7 @@ and secrets directories into .eth/validator_keys instead." __api_port=${__vc_api_port} __api_tls=${__vc_api_tls} - if [ -z "${PRYSM:+x}" ]; then - jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value, "url": $url_value}]' <<< '{}' >/tmp/apidata.txt - else - jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value}]' <<< '{}' >/tmp/apidata.txt - fi + jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value, "url": $url_value}]' <<< '{}' >/tmp/apidata.txt get-token __api_data=@/tmp/apidata.txt @@ -991,11 +1029,7 @@ validator-register() { __w3s_pubkeys="$(echo "$__result" | jq -r '.data[].validating_pubkey')" while IFS= read -r __pubkey; do - if [ -z "${PRYSM:+x}" ]; then - jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value, "url": $url_value}]' <<< '{}' >/tmp/apidata.txt - else - jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value}]' <<< '{}' >/tmp/apidata.txt - fi + jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value, "url": $url_value}]' <<< '{}' >/tmp/apidata.txt __api_data=@/tmp/apidata.txt __api_path=eth/v1/remotekeys @@ -1064,6 +1098,8 @@ usage() { echo "Call keymanager with an ACTION, one of:" echo " list" echo " Lists the public keys of all validators currently loaded into your validator client" + echo " count" + echo " Counts the number of keys currently loaded into your validator client" echo " import" echo " Import all keystore*.json in .eth/validator_keys while loading slashing protection data" echo " in slashing_protection*.json files that match the public key(s) of the imported validator(s)" @@ -1214,6 +1250,9 @@ case "$3" in register) validator-register ;; + count) + validator-count + ;; get-recipient) __pubkey=$4 recipient-get