diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 769b889de4d..a80470cf167 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -54,6 +54,20 @@ jobs: done echo "skip_ci=$SKIP_CI" >> $GITHUB_OUTPUT + lockbud: + name: lockbud + runs-on: ubuntu-latest + container: + image: sigmaprime/lockbud:latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install dependencies + run: apt update && apt install -y cmake + - name: Generate code coverage + run: | + cargo lockbud -k deadlock -b -l tokio_util + target-branch-check: name: target-branch-check runs-on: ubuntu-latest @@ -173,8 +187,19 @@ jobs: channel: stable cache-target: release bins: cargo-nextest + - name: Create CI logger dir + run: mkdir ${{ runner.temp }}/network_test_logs - name: Run network tests for all known forks run: make test-network + env: + TEST_FEATURES: portable,ci_logger + CI_LOGGER_DIR: ${{ runner.temp }}/network_test_logs + - name: Upload logs + uses: actions/upload-artifact@v4 + with: + name: network_test_logs + path: ${{ runner.temp }}/network_test_logs + slasher-tests: name: slasher-tests needs: [check-labels] @@ -395,7 +420,7 @@ jobs: channel: stable cache-target: release - name: Run Makefile to trigger the bash script - run: make cli + run: make cli-local # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. test-suite-success: @@ -422,6 +447,7 @@ jobs: 'cargo-udeps', 'compile-with-beta-compiler', 'cli-check', + 'lockbud', ] steps: - uses: actions/checkout@v4 diff --git a/Cargo.lock b/Cargo.lock index 35691ceada9..ca96f085ecd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -155,9 +155,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-consensus" -version = "0.3.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a70d19a83dfee0cd4b16d28d2fc1c822a9a55935c672259dd8165e342c4147" +checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -187,9 +187,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.3.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecd43a1ce87109f4d64efeff2b6fd0d7ff06afe93c4ffeb4e23bbb34d77ce84" +checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -204,9 +204,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb865df835f851b367ae439d6c82b117ded971628c8888b24fed411a290e38a" +checksum = "411aff151f2a73124ee473708e82ed51b2535f68928b6a1caa8bc1246ae6f7cd" dependencies = [ "alloy-rlp", "arbitrary", @@ -218,7 +218,7 @@ dependencies = [ "getrandom", "hex-literal", "itoa", - "k256 0.13.3", + "k256 0.13.4", "keccak-asm", "proptest", "proptest-derive", @@ -322,9 +322,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arbitrary" @@ -482,9 +482,9 @@ checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -561,7 +561,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.36", + "rustix 0.38.37", "slab", "tracing", "windows-sys 0.59.0", @@ -654,9 +654,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "axum" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" dependencies = [ "async-trait", "axum-core", @@ -680,7 +680,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -688,9 +688,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" dependencies = [ "async-trait", "bytes", @@ -701,7 +701,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", "tracing", @@ -788,12 +788,12 @@ dependencies = [ "int_to_bytes", "itertools 0.10.5", "kzg", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", "maplit", "merkle_proof", + "metrics", "oneshot_broadcast", "operation_pool", "parking_lot 0.12.3", @@ -864,9 +864,9 @@ dependencies = [ "fnv", "futures", "itertools 0.10.5", - "lighthouse_metrics", "lighthouse_network", "logging", + "metrics", "num_cpus", "parking_lot 0.12.3", "serde", @@ -1119,9 +1119,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -1202,9 +1202,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.18" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -1328,9 +1328,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.17" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", "clap_derive", @@ -1338,9 +1338,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.17" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", @@ -1351,9 +1351,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1402,13 +1402,15 @@ dependencies = [ "genesis", "http_api", "http_metrics", - "lighthouse_metrics", + "kzg", "lighthouse_network", + "metrics", "monitoring_api", "network", "operation_pool", "sensitive_url", "serde", + "serde_json", "serde_yaml", "slasher", "slasher_service", @@ -2380,7 +2382,7 @@ dependencies = [ "bytes", "ed25519-dalek", "hex", - "k256 0.13.3", + "k256 0.13.4", "log", "rand", "serde", @@ -2490,9 +2492,9 @@ dependencies = [ "ethereum_ssz_derive", "execution_layer", "futures", - "lighthouse_metrics", "logging", "merkle_proof", + "metrics", "parking_lot 0.12.3", "sensitive_url", "serde", @@ -2528,6 +2530,7 @@ version = "0.1.0" dependencies = [ "account_utils", "bytes", + "derivative", "eth2_keystore", "ethereum_serde_utils", "ethereum_ssz", @@ -2616,6 +2619,7 @@ dependencies = [ "discv5", "eth2_config", "ethereum_ssz", + "kzg", "logging", "pretty_reqwest_error", "reqwest", @@ -2988,10 +2992,10 @@ dependencies = [ "jsonwebtoken", "keccak-hash", "kzg", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", + "metrics", "parking_lot 0.12.3", "pretty_reqwest_error", "rand", @@ -2999,6 +3003,7 @@ dependencies = [ "sensitive_url", "serde", "serde_json", + "sha2 0.9.9", "slog", "slot_clock", "ssz_types", @@ -3188,7 +3193,7 @@ dependencies = [ "beacon_chain", "ethereum_ssz", "ethereum_ssz_derive", - "lighthouse_metrics", + "metrics", "proto_array", "slog", "state_processing", @@ -3315,7 +3320,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", ] @@ -3861,11 +3866,11 @@ dependencies = [ "futures", "genesis", "hex", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "lru", + "metrics", "network", "operation_pool", "parking_lot 0.12.3", @@ -3895,11 +3900,11 @@ name = "http_metrics" version = "0.1.0" dependencies = [ "beacon_chain", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "malloc_utils", + "metrics", "reqwest", "serde", "slog", @@ -4012,13 +4017,15 @@ dependencies = [ "hyper 1.4.1", "pin-project-lite", "tokio", + "tower 0.4.13", + "tower-service", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -4359,9 +4366,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa 0.16.9", @@ -4382,9 +4389,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422fbc7ff2f2f5bdffeb07718e5a5324dca72b0c9293d50df4026652385e3314" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -4408,7 +4415,6 @@ dependencies = [ "c-kzg", "criterion", "derivative", - "eth2_network_config", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", @@ -4816,7 +4822,7 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -4888,7 +4894,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -5026,11 +5032,11 @@ dependencies = [ "eth2_network_config", "ethereum_hashing", "futures", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "malloc_utils", + "metrics", "sensitive_url", "serde", "serde_json", @@ -5048,13 +5054,6 @@ dependencies = [ "validator_manager", ] -[[package]] -name = "lighthouse_metrics" -version = "0.2.0" -dependencies = [ - "prometheus", -] - [[package]] name = "lighthouse_network" version = "0.2.0" @@ -5078,11 +5077,11 @@ dependencies = [ "itertools 0.10.5", "libp2p", "libp2p-mplex", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", "lru_cache", + "metrics", "parking_lot 0.12.3", "prometheus-client", "quickcheck", @@ -5188,7 +5187,7 @@ name = "logging" version = "0.2.0" dependencies = [ "chrono", - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "serde", "serde_json", @@ -5244,7 +5243,7 @@ name = "malloc_utils" version = "0.1.0" dependencies = [ "libc", - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "tikv-jemalloc-ctl", "tikv-jemallocator", @@ -5360,6 +5359,13 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "metrics" +version = "0.2.0" +dependencies = [ + "prometheus", +] + [[package]] name = "migrations_internals" version = "2.2.0" @@ -5458,8 +5464,8 @@ name = "monitoring_api" version = "0.1.0" dependencies = [ "eth2", - "lighthouse_metrics", "lighthouse_version", + "metrics", "regex", "reqwest", "sensitive_url", @@ -5624,10 +5630,12 @@ dependencies = [ "async-channel", "beacon_chain", "beacon_processor", + "bls", "delay_map", "derivative", "error-chain", "eth2", + "eth2_network_config", "ethereum_ssz", "execution_layer", "fnv", @@ -5637,14 +5645,16 @@ dependencies = [ "hex", "igd-next", "itertools 0.10.5", - "lighthouse_metrics", + "kzg", "lighthouse_network", "logging", "lru_cache", "matches", + "metrics", "operation_pool", "parking_lot 0.12.3", "rand", + "serde_json", "slog", "slog-async", "slog-term", @@ -5940,8 +5950,8 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.10.5", - "lighthouse_metrics", "maplit", + "metrics", "parking_lot 0.12.3", "rand", "rayon", @@ -6080,7 +6090,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.4", "smallvec", "windows-targets 0.52.6", ] @@ -6150,9 +6160,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c73c26c01b8c87956cea613c907c9d6ecffd8d18a2a5908e5de0adfaa185cea" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", "thiserror", @@ -6289,7 +6299,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.36", + "rustix 0.38.37", "tracing", "windows-sys 0.59.0", ] @@ -6337,9 +6347,9 @@ dependencies = [ [[package]] name = "postgres-types" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02048d9e032fb3cc3413bbf7b83a15d84a5d419778e2628751896d856498eee9" +checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" dependencies = [ "bytes", "fallible-iterator", @@ -6363,9 +6373,9 @@ dependencies = [ [[package]] name = "pq-sys" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92c30dd81695321846d4dfe348da67b1752ebb61cd1549d203a7b57e323c435" +checksum = "f6cc05d7ea95200187117196eee9edd0644424911821aeb28a18ce60ea0b8793" dependencies = [ "vcpkg", ] @@ -6430,7 +6440,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -6625,7 +6635,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -6642,7 +6652,7 @@ dependencies = [ "rand", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "slab", "thiserror", "tinyvec", @@ -6777,9 +6787,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.3" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4760ad04a88ef77075ba86ba9ea79b919e6bab29c1764c5747237cd6eaedcaa" +checksum = "074373f3e7e5d27d8741d19512232adb47be8622d3daef3a45bcae72050c3d2a" dependencies = [ "libc", ] @@ -6795,9 +6805,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -7151,9 +7161,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.36" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -7190,9 +7200,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "ring 0.17.8", @@ -7424,9 +7434,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -7651,9 +7661,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d79b758b7cb2085612b11a235055e485605a5103faccdd633f35bd7aee69dd" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", "cfg-if", @@ -7724,6 +7734,7 @@ dependencies = [ "eth2_network_config", "execution_layer", "futures", + "kzg", "node_test_rig", "parking_lot 0.12.3", "rayon", @@ -7760,12 +7771,12 @@ dependencies = [ "filesystem", "flate2", "libmdbx", - "lighthouse_metrics", "lmdb-rkv", "lmdb-rkv-sys", "logging", "lru", "maplit", + "metrics", "parking_lot 0.12.3", "rand", "rayon", @@ -7921,7 +7932,7 @@ dependencies = [ name = "slot_clock" version = "0.2.0" dependencies = [ - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "types", ] @@ -8049,8 +8060,8 @@ dependencies = [ "int_to_bytes", "integer-sqrt", "itertools 0.10.5", - "lighthouse_metrics", "merkle_proof", + "metrics", "rand", "rayon", "safe_arith", @@ -8090,8 +8101,8 @@ dependencies = [ "ethereum_ssz_derive", "itertools 0.10.5", "leveldb", - "lighthouse_metrics", "lru", + "metrics", "parking_lot 0.12.3", "redb", "safe_arith", @@ -8302,11 +8313,12 @@ version = "0.1.0" dependencies = [ "async-channel", "futures", - "lighthouse_metrics", "logging", + "metrics", "slog", "sloggers", "tokio", + "tracing", ] [[package]] @@ -8318,7 +8330,7 @@ dependencies = [ "cfg-if", "fastrand", "once_cell", - "rustix 0.38.36", + "rustix 0.38.37", "windows-sys 0.59.0", ] @@ -8348,7 +8360,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.36", + "rustix 0.38.37", "windows-sys 0.48.0", ] @@ -8387,18 +8399,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", @@ -8600,9 +8612,9 @@ dependencies = [ [[package]] name = "tokio-postgres" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03adcf0147e203b6032c0b2d30be1415ba03bc348901f3ff1cc0df6a733e60c3" +checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb" dependencies = [ "async-trait", "byteorder", @@ -8690,7 +8702,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -8715,9 +8727,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -8739,6 +8751,21 @@ dependencies = [ "tokio", "tower-layer", "tower-service", +] + +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tower-layer", + "tower-service", "tracing", ] @@ -9012,15 +9039,15 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -9033,9 +9060,9 @@ checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "universal-hash" @@ -9140,11 +9167,11 @@ dependencies = [ "hyper 1.4.1", "itertools 0.10.5", "libsecp256k1", - "lighthouse_metrics", "lighthouse_version", "lockfile", "logging", "malloc_utils", + "metrics", "monitoring_api", "parking_lot 0.12.3", "rand", @@ -9308,7 +9335,7 @@ dependencies = [ "bytes", "eth2", "headers", - "lighthouse_metrics", + "metrics", "safe_arith", "serde", "serde_array_query", @@ -9518,7 +9545,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.5.3", + "redox_syscall 0.5.4", "wasite", "web-sys", ] diff --git a/Cargo.toml b/Cargo.toml index 125231ad20e..7094ff60776 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,7 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", - "common/lighthouse_metrics", + "common/metrics", "common/lighthouse_version", "common/lockfile", "common/logging", @@ -95,7 +95,7 @@ resolver = "2" edition = "2021" [workspace.dependencies] -alloy-primitives = "0.8" +alloy-primitives = { version = "0.8", features = ["rlp", "getrandom"] } alloy-rlp = "0.3.4" alloy-consensus = "0.3.0" anyhow = "1" @@ -141,6 +141,7 @@ milhouse = "0.3" num_cpus = "1" parking_lot = "0.12" paste = "1" +prometheus = "0.13" quickcheck = "1" quickcheck_macros = "1" quote = "1" @@ -213,7 +214,7 @@ gossipsub = { path = "beacon_node/lighthouse_network/gossipsub/" } http_api = { path = "beacon_node/http_api" } int_to_bytes = { path = "consensus/int_to_bytes" } kzg = { path = "crypto/kzg" } -lighthouse_metrics = { path = "common/lighthouse_metrics" } +metrics = { path = "common/metrics" } lighthouse_network = { path = "beacon_node/lighthouse_network" } lighthouse_version = { path = "common/lighthouse_version" } lockfile = { path = "common/lockfile" } diff --git a/Makefile b/Makefile index 671eacbe2d3..0e1600a1959 100644 --- a/Makefile +++ b/Makefile @@ -183,7 +183,7 @@ test-exec-engine: # test vectors. test: test-release -# Updates the CLI help text pages in the Lighthouse book, building with Docker. +# Updates the CLI help text pages in the Lighthouse book, building with Docker (primarily for Windows users). cli: docker run --rm --user=root \ -v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \ @@ -204,7 +204,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ + cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ -D clippy::fn_to_numeric_cast_any \ -D clippy::manual_let_else \ -D clippy::large_stack_frames \ diff --git a/README.md b/README.md index 11a87b81fef..4b22087bcdc 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ as the canonical staking deposit contract address. The [Lighthouse Book](https://lighthouse-book.sigmaprime.io) contains information for users and developers. -The Lighthouse team maintains a blog at [lighthouse-blog.sigmaprime.io][blog] which contains periodic +The Lighthouse team maintains a blog at [https://blog.sigmaprime.io/tag/lighthouse][blog] which contains periodic progress updates, roadmap insights and interesting findings. ## Branches diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 0dc941df90c..b0fa0131808 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -42,7 +42,7 @@ hex = { workspace = true } int_to_bytes = { workspace = true } itertools = { workspace = true } kzg = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } lru = { workspace = true } diff --git a/beacon_node/beacon_chain/benches/benches.rs b/beacon_node/beacon_chain/benches/benches.rs index 4a29be90251..b2f17062dce 100644 --- a/beacon_node/beacon_chain/benches/benches.rs +++ b/beacon_node/beacon_chain/benches/benches.rs @@ -1,11 +1,11 @@ use std::sync::Arc; use beacon_chain::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; +use beacon_chain::test_utils::get_kzg; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use bls::Signature; -use eth2_network_config::TRUSTED_SETUP_BYTES; -use kzg::{Kzg, KzgCommitment, TrustedSetup}; +use kzg::KzgCommitment; use types::{ beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, @@ -35,11 +35,7 @@ fn all_benches(c: &mut Criterion) { type E = MainnetEthSpec; let spec = Arc::new(E::default_spec()); - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) - .map_err(|e| format!("Unable to read trusted setup file: {}", e)) - .expect("should have trusted setup"); - let kzg = Arc::new(Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg")); - + let kzg = get_kzg(&spec); for blob_count in [1, 2, 3, 6] { let kzg = kzg.clone(); let (signed_block, blob_sidecars) = create_test_block_and_blobs::(blob_count, &spec); diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 491271d6a9e..9ee0b01df36 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -1144,13 +1144,14 @@ pub fn verify_propagation_slot_range( let current_fork = spec.fork_name_at_slot::(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?); - let earliest_permissible_slot = if !current_fork.deneb_enabled() { - one_epoch_prior - // EIP-7045 - } else { + + let earliest_permissible_slot = if current_fork.deneb_enabled() { + // EIP-7045 one_epoch_prior .epoch(E::slots_per_epoch()) .start_slot(E::slots_per_epoch()) + } else { + one_epoch_prior }; if attestation_slot < earliest_permissible_slot { diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index ace5f0be74a..b76dba88fd0 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -1,5 +1,5 @@ use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BlockProcessStatus}; -use execution_layer::{ExecutionLayer, ExecutionPayloadBody}; +use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; use slog::{crit, debug, error, Logger}; use std::collections::HashMap; use std::sync::Arc; @@ -57,7 +57,7 @@ struct BodiesByRange { struct BlockParts { blinded_block: Box>, header: Box>, - body: Option>>, + body: Option>>, } impl BlockParts { @@ -711,6 +711,7 @@ mod tests { use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; use execution_layer::test_utils::Block; + use std::sync::Arc; use std::sync::LazyLock; use tokio::sync::mpsc; use types::{ @@ -725,7 +726,7 @@ mod tests { fn get_harness( validator_count: usize, - spec: ChainSpec, + spec: Arc, ) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) .spec(spec) @@ -756,6 +757,7 @@ mod tests { spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); spec.electra_fork_epoch = Some(Epoch::new(electra_fork_epoch as u64)); + let spec = Arc::new(spec); let harness = get_harness(VALIDATOR_COUNT, spec.clone()); // go to bellatrix fork diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9b9614ef891..5b91286baae 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -22,7 +22,7 @@ pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; use crate::data_availability_checker::{ Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker, - DataColumnsToPublish, + DataColumnReconstructionResult, }; use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::early_attester_cache::EarlyAttesterCache; @@ -371,7 +371,7 @@ type ReqRespPreImportCache = HashMap>>; /// Represents the "Beacon Chain" component of Ethereum 2.0. Allows import of blocks and block /// operations and chooses a canonical head. pub struct BeaconChain { - pub spec: ChainSpec, + pub spec: Arc, /// Configuration for `BeaconChain` runtime behaviour. pub config: ChainConfig, /// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB. @@ -497,7 +497,7 @@ pub struct BeaconChain { /// they are collected and combined. pub data_availability_checker: Arc>, /// The KZG trusted setup used by this chain. - pub kzg: Option>, + pub kzg: Arc, } pub enum BeaconBlockResponseWrapper { @@ -2620,11 +2620,7 @@ impl BeaconChain { /// Check if the current slot is greater than or equal to the Capella fork epoch. pub fn current_slot_is_post_capella(&self) -> Result { let current_fork = self.spec.fork_name_at_slot::(self.slot()?); - if let ForkName::Base | ForkName::Altair | ForkName::Bellatrix = current_fork { - Ok(false) - } else { - Ok(true) - } + Ok(current_fork.capella_enabled()) } /// Import a BLS to execution change to the op pool. @@ -2741,7 +2737,10 @@ impl BeaconChain { // If the block is relevant, add it to the filtered chain segment. Ok(_) => filtered_chain_segment.push((block_root, block)), // If the block is already known, simply ignore this block. - Err(BlockError::BlockIsAlreadyKnown(_)) => continue, + // + // Note that `check_block_relevancy` is incapable of returning + // `DuplicateImportStatusUnknown` so we don't need to handle that case here. + Err(BlockError::DuplicateFullyImported(_)) => continue, // If the block is the genesis block, simply ignore this block. Err(BlockError::GenesisBlock) => continue, // If the block is is for a finalized slot, simply ignore this block. @@ -2887,7 +2886,7 @@ impl BeaconChain { } } } - Err(BlockError::BlockIsAlreadyKnown(block_root)) => { + Err(BlockError::DuplicateFullyImported(block_root)) => { debug!(self.log, "Ignoring already known blocks while processing chain segment"; "block_root" => ?block_root); @@ -2978,6 +2977,7 @@ impl BeaconChain { pub async fn process_gossip_blob( self: &Arc, blob: GossipVerifiedBlob, + publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { let block_root = blob.block_root(); @@ -2988,7 +2988,7 @@ impl BeaconChain { .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(blob.block_root())); + return Err(BlockError::DuplicateFullyImported(blob.block_root())); } // No need to process and import blobs beyond the PeerDAS epoch. @@ -3004,7 +3004,9 @@ impl BeaconChain { } } - let r = self.check_gossip_blob_availability_and_import(blob).await; + let r = self + .check_gossip_blob_availability_and_import(blob, publish_fn) + .await; self.remove_notified(&block_root, r) } @@ -3013,13 +3015,8 @@ impl BeaconChain { pub async fn process_gossip_data_columns( self: &Arc, data_columns: Vec>, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + publish_fn: impl FnOnce() -> Result<(), BlockError>, + ) -> Result { let Ok((slot, block_root)) = data_columns .iter() .map(|c| (c.slot(), c.block_root())) @@ -3038,13 +3035,18 @@ impl BeaconChain { .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlockError::DuplicateFullyImported(block_root)); } let r = self - .check_gossip_data_columns_availability_and_import(slot, block_root, data_columns) + .check_gossip_data_columns_availability_and_import( + slot, + block_root, + data_columns, + publish_fn, + ) .await; - self.remove_notified_custody_columns(&block_root, r) + self.remove_notified(&block_root, r) } /// Cache the blobs in the processing cache, process it, then evict it from the cache if it was @@ -3062,7 +3064,7 @@ impl BeaconChain { .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlockError::DuplicateFullyImported(block_root)); } // Reject RPC blobs referencing unknown parents. Otherwise we allow potentially invalid data @@ -3103,13 +3105,7 @@ impl BeaconChain { pub async fn process_rpc_custody_columns( self: &Arc, custody_columns: DataColumnSidecarList, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { let Ok((slot, block_root)) = custody_columns .iter() .map(|c| (c.slot(), c.block_root())) @@ -3128,7 +3124,7 @@ impl BeaconChain { .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlockError::DuplicateFullyImported(block_root)); } // Reject RPC columns referencing unknown parents. Otherwise we allow potentially invalid data @@ -3147,7 +3143,67 @@ impl BeaconChain { let r = self .check_rpc_custody_columns_availability_and_import(slot, block_root, custody_columns) .await; - self.remove_notified_custody_columns(&block_root, r) + self.remove_notified(&block_root, r) + } + + pub async fn reconstruct_data_columns( + self: &Arc, + block_root: Hash256, + ) -> Result< + Option<( + AvailabilityProcessingStatus, + DataColumnSidecarList, + )>, + BlockError, + > { + // As of now we only reconstruct data columns on supernodes, so if the block is already + // available on a supernode, there's no need to reconstruct as the node must already have + // all columns. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Ok(None); + } + + let data_availability_checker = self.data_availability_checker.clone(); + + let result = self + .task_executor + .spawn_blocking_handle( + move || data_availability_checker.reconstruct_data_columns(&block_root), + "reconstruct_data_columns", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin)??; + + match result { + DataColumnReconstructionResult::Success((availability, data_columns_to_publish)) => { + let Some(slot) = data_columns_to_publish.first().map(|d| d.slot()) else { + // This should be unreachable because empty result would return `RecoveredColumnsNotImported` instead of success. + return Ok(None); + }; + + let r = self + .process_availability(slot, availability, || Ok(())) + .await; + self.remove_notified(&block_root, r) + .map(|availability_processing_status| { + Some((availability_processing_status, data_columns_to_publish)) + }) + } + DataColumnReconstructionResult::NotStarted(reason) + | DataColumnReconstructionResult::RecoveredColumnsNotImported(reason) => { + // We use metric here because logging this would be *very* noisy. + metrics::inc_counter_vec( + &metrics::KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL, + &[reason], + ); + Ok(None) + } + } } /// Remove any block components from the *processing cache* if we no longer require them. If the @@ -3165,23 +3221,6 @@ impl BeaconChain { r } - /// Remove any block components from the *processing cache* if we no longer require them. If the - /// block was imported full or erred, we no longer require them. - fn remove_notified_custody_columns

( - &self, - block_root: &Hash256, - r: Result<(AvailabilityProcessingStatus, P), BlockError>, - ) -> Result<(AvailabilityProcessingStatus, P), BlockError> { - let has_missing_components = matches!( - r, - Ok((AvailabilityProcessingStatus::MissingComponents(_, _), _)) - ); - if !has_missing_components { - self.reqresp_pre_import_cache.write().remove(block_root); - } - r - } - /// Wraps `process_block` in logic to cache the block's commitments in the processing cache /// and evict if the block was imported or errored. pub async fn process_block_with_early_caching>( @@ -3226,7 +3265,7 @@ impl BeaconChain { unverified_block: B, notify_execution_layer: NotifyExecutionLayer, block_source: BlockImportSource, - publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, + publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -3408,7 +3447,8 @@ impl BeaconChain { let availability = self .data_availability_checker .put_pending_executed_block(block)?; - self.process_availability(slot, availability).await + self.process_availability(slot, availability, || Ok(())) + .await } /// Checks if the provided blob can make any cached blocks available, and imports immediately @@ -3416,6 +3456,7 @@ impl BeaconChain { async fn check_gossip_blob_availability_and_import( self: &Arc, blob: GossipVerifiedBlob, + publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { let slot = blob.slot(); if let Some(slasher) = self.slasher.as_ref() { @@ -3423,7 +3464,8 @@ impl BeaconChain { } let availability = self.data_availability_checker.put_gossip_blob(blob)?; - self.process_availability(slot, availability).await + self.process_availability(slot, availability, publish_fn) + .await } /// Checks if the provided data column can make any cached blocks available, and imports immediately @@ -3433,26 +3475,22 @@ impl BeaconChain { slot: Slot, block_root: Hash256, data_columns: Vec>, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + publish_fn: impl FnOnce() -> Result<(), BlockError>, + ) -> Result { if let Some(slasher) = self.slasher.as_ref() { for data_colum in &data_columns { slasher.accept_block_header(data_colum.signed_block_header()); } } - let (availability, data_columns_to_publish) = self - .data_availability_checker - .put_gossip_data_columns(slot, block_root, data_columns)?; + let availability = self.data_availability_checker.put_gossip_data_columns( + slot, + block_root, + data_columns, + )?; - self.process_availability(slot, availability) + self.process_availability(slot, availability, publish_fn) .await - .map(|result| (result, data_columns_to_publish)) } /// Checks if the provided blobs can make any cached blocks available, and imports immediately @@ -3491,7 +3529,8 @@ impl BeaconChain { .data_availability_checker .put_rpc_blobs(block_root, epoch, blobs)?; - self.process_availability(slot, availability).await + self.process_availability(slot, availability, || Ok(())) + .await } /// Checks if the provided columns can make any cached blocks available, and imports immediately @@ -3501,13 +3540,7 @@ impl BeaconChain { slot: Slot, block_root: Hash256, custody_columns: DataColumnSidecarList, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { // Need to scope this to ensure the lock is dropped before calling `process_availability` // Even an explicit drop is not enough to convince the borrow checker. { @@ -3532,16 +3565,14 @@ impl BeaconChain { // This slot value is purely informative for the consumers of // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. - let (availability, data_columns_to_publish) = - self.data_availability_checker.put_rpc_custody_columns( - block_root, - slot.epoch(T::EthSpec::slots_per_epoch()), - custody_columns, - )?; + let availability = self.data_availability_checker.put_rpc_custody_columns( + block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + custody_columns, + )?; - self.process_availability(slot, availability) + self.process_availability(slot, availability, || Ok(())) .await - .map(|result| (result, data_columns_to_publish)) } /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` @@ -3552,9 +3583,11 @@ impl BeaconChain { self: &Arc, slot: Slot, availability: Availability, + publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { match availability { Availability::Available(block) => { + publish_fn()?; // Block is fully available, import into fork choice self.import_available_block(block).await } @@ -3837,6 +3870,8 @@ impl BeaconChain { } if let Some(data_columns) = data_columns { + // TODO(das): `available_block includes all sampled columns, but we only need to store + // custody columns. To be clarified in spec. if !data_columns.is_empty() { debug!( self.log, "Writing data_columns to store"; @@ -5537,10 +5572,15 @@ impl BeaconChain { ) } BeaconState::Deneb(_) => { - let (payload, kzg_commitments, maybe_blobs_and_proofs, execution_payload_value) = - block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .deconstruct(); + let ( + payload, + kzg_commitments, + maybe_blobs_and_proofs, + _maybe_requests, + execution_payload_value, + ) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); ( BeaconBlock::Deneb(BeaconBlockDeneb { @@ -5575,10 +5615,15 @@ impl BeaconChain { ) } BeaconState::Electra(_) => { - let (payload, kzg_commitments, maybe_blobs_and_proofs, execution_payload_value) = - block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .deconstruct(); + let ( + payload, + kzg_commitments, + maybe_blobs_and_proofs, + maybe_requests, + execution_payload_value, + ) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); ( BeaconBlock::Electra(BeaconBlockElectra { @@ -5603,6 +5648,8 @@ impl BeaconChain { bls_to_execution_changes: bls_to_execution_changes.into(), blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, + execution_requests: maybe_requests + .ok_or(BlockProductionError::MissingExecutionRequests)?, }, }), maybe_blobs_and_proofs, @@ -5683,10 +5730,8 @@ impl BeaconChain { let kzg_proofs = Vec::from(proofs); - let kzg = self - .kzg - .as_ref() - .ok_or(BlockProductionError::TrustedSetupNotInitialized)?; + let kzg = self.kzg.as_ref(); + kzg_utils::validate_blobs::( kzg, expected_kzg_commitments, @@ -5929,26 +5974,23 @@ impl BeaconChain { payload_attributes } else { let prepare_slot_fork = self.spec.fork_name_at_slot::(prepare_slot); - let withdrawals = match prepare_slot_fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix => None, - ForkName::Capella | ForkName::Deneb | ForkName::Electra => { - let chain = self.clone(); - self.spawn_blocking_handle( - move || { - chain.get_expected_withdrawals(&forkchoice_update_params, prepare_slot) - }, - "prepare_beacon_proposer_withdrawals", - ) - .await? - .map(Some)? - } + + let withdrawals = if prepare_slot_fork.capella_enabled() { + let chain = self.clone(); + self.spawn_blocking_handle( + move || chain.get_expected_withdrawals(&forkchoice_update_params, prepare_slot), + "prepare_beacon_proposer_withdrawals", + ) + .await? + .map(Some)? + } else { + None }; - let parent_beacon_block_root = match prepare_slot_fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => None, - ForkName::Deneb | ForkName::Electra => { - Some(pre_payload_attributes.parent_beacon_block_root) - } + let parent_beacon_block_root = if prepare_slot_fork.deneb_enabled() { + Some(pre_payload_attributes.parent_beacon_block_root) + } else { + None }; let payload_attributes = PayloadAttributes::new( @@ -6094,27 +6136,27 @@ impl BeaconChain { // `execution_engine_forkchoice_lock` apart from the one here. let forkchoice_lock = execution_layer.execution_engine_forkchoice_lock().await; - let (head_block_root, head_hash, justified_hash, finalized_hash) = if let Some(head_hash) = - params.head_hash - { - ( - params.head_root, - head_hash, - params - .justified_hash - .unwrap_or_else(ExecutionBlockHash::zero), - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // The head block does not have an execution block hash. We must check to see if we - // happen to be the proposer of the transition block, in which case we still need to - // send forkchoice_updated. - match self.spec.fork_name_at_slot::(next_slot) { - // We are pre-bellatrix; no need to update the EL. - ForkName::Base | ForkName::Altair => return Ok(()), - _ => { + let (head_block_root, head_hash, justified_hash, finalized_hash) = + if let Some(head_hash) = params.head_hash { + ( + params.head_root, + head_hash, + params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) + } else { + // The head block does not have an execution block hash. We must check to see if we + // happen to be the proposer of the transition block, in which case we still need to + // send forkchoice_updated. + if self + .spec + .fork_name_at_slot::(next_slot) + .bellatrix_enabled() + { // We are post-bellatrix if let Some(payload_attributes) = execution_layer .payload_attributes(next_slot, params.head_root) @@ -6148,9 +6190,10 @@ impl BeaconChain { // We are not a proposer, no need to update the EL. return Ok(()); } + } else { + return Ok(()); } - } - }; + }; let forkchoice_updated_response = execution_layer .notify_forkchoice_updated( @@ -6993,7 +7036,6 @@ impl BeaconChain { .finalized_checkpoint() .epoch .sync_committee_period(&self.spec)?; - self.light_client_server_cache.get_light_client_bootstrap( &self.store, block_root, diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index e4646d62882..743748a76d9 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -115,13 +115,6 @@ pub enum GossipBlobError { index: u64, }, - /// `Kzg` struct hasn't been initialized. This is an internal error. - /// - /// ## Peer scoring - /// - /// The peer isn't faulty, This is an internal error. - KzgNotInitialized, - /// The kzg verification failed. /// /// ## Peer scoring @@ -559,11 +552,9 @@ pub fn validate_blob_sidecar_for_gossip( } // Kzg verification for gossip blob sidecar - let kzg = chain - .kzg - .as_ref() - .ok_or(GossipBlobError::KzgNotInitialized)?; - let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar, kzg, seen_timestamp) + let kzg = chain.kzg.as_ref(); + + let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar.clone(), kzg, seen_timestamp) .map_err(GossipBlobError::KzgError)?; let blob_sidecar = &kzg_verified_blob.blob; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 55547aaa18c..527462ab64c 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -49,18 +49,14 @@ #![allow(clippy::result_large_err)] use crate::beacon_snapshot::PreProcessingSnapshot; -use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob, GossipVerifiedBlobList}; -use crate::block_verification_types::{ - AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock, -}; +use crate::blob_verification::GossipBlobError; +use crate::block_verification_types::{AsBlock, BlockImportData, RpcBlock}; use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock}; -use crate::data_column_verification::{ - GossipDataColumnError, GossipVerifiedDataColumn, GossipVerifiedDataColumnList, -}; +use crate::data_column_verification::GossipDataColumnError; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ - is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, + validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, + NotifyExecutionLayer, PayloadNotifier, }; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; @@ -71,18 +67,17 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; -use eth2::types::{BlockGossip, EventKind, PublishBlockRequest}; +use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; -use lighthouse_metrics::TryExt; +use metrics::TryExt; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; -use slog::{debug, error, warn, Logger}; +use slog::{debug, error, Logger}; use slot_clock::SlotClock; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, @@ -98,14 +93,12 @@ use std::io::Write; use std::sync::Arc; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; -use types::data_column_sidecar::DataColumnSidecarError; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSubnetId, Epoch, - EthSpec, ExecutionBlockHash, FullPayload, Hash256, InconsistentFork, KzgProofs, PublicKey, - PublicKeyBytes, RelativeEpoch, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, - Slot, + data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, + BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, + Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; -use types::{BlobSidecar, ExecPayload}; pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, @@ -187,12 +180,18 @@ pub enum BlockError { /// It's unclear if this block is valid, but it conflicts with finality and shouldn't be /// imported. NotFinalizedDescendant { block_parent_root: Hash256 }, - /// Block is already known, no need to re-import. + /// Block is already known and valid, no need to re-import. /// /// ## Peer scoring /// /// The block is valid and we have already imported a block with this hash. - BlockIsAlreadyKnown(Hash256), + DuplicateFullyImported(Hash256), + /// Block has already been seen on gossip but has not necessarily finished being imported. + /// + /// ## Peer scoring + /// + /// The block could be valid, or invalid. We don't know. + DuplicateImportStatusUnknown(Hash256), /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// /// ## Peer scoring @@ -704,123 +703,57 @@ pub struct ExecutionPendingBlock { pub payload_verification_handle: PayloadVerificationHandle, } -pub trait IntoGossipVerifiedBlockContents: Sized { +pub trait IntoGossipVerifiedBlock: Sized { fn into_gossip_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockContentsError>; - fn inner_block(&self) -> &SignedBeaconBlock; + ) -> Result, BlockError>; + fn inner_block(&self) -> Arc>; } -impl IntoGossipVerifiedBlockContents for GossipVerifiedBlockContents { +impl IntoGossipVerifiedBlock for GossipVerifiedBlock { fn into_gossip_verified_block( self, _chain: &BeaconChain, - ) -> Result, BlockContentsError> { + ) -> Result, BlockError> { Ok(self) } - fn inner_block(&self) -> &SignedBeaconBlock { - self.0.block.as_block() + fn inner_block(&self) -> Arc> { + self.block_cloned() } } -impl IntoGossipVerifiedBlockContents for PublishBlockRequest { +impl IntoGossipVerifiedBlock for Arc> { fn into_gossip_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockContentsError> { - let (block, blobs) = self.deconstruct(); - let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); - - let (gossip_verified_blobs, gossip_verified_data_columns) = if peer_das_enabled { - let gossip_verified_data_columns = - build_gossip_verified_data_columns(chain, &block, blobs.map(|(_, blobs)| blobs))?; - (None, gossip_verified_data_columns) - } else { - let gossip_verified_blobs = build_gossip_verified_blobs(chain, &block, blobs)?; - (gossip_verified_blobs, None) - }; - - let gossip_verified_block = GossipVerifiedBlock::new(block, chain)?; - - Ok(( - gossip_verified_block, - gossip_verified_blobs, - gossip_verified_data_columns, - )) + ) -> Result, BlockError> { + GossipVerifiedBlock::new(self, chain) } - fn inner_block(&self) -> &SignedBeaconBlock { - self.signed_block() + fn inner_block(&self) -> Arc> { + self.clone() } } -#[allow(clippy::type_complexity)] -fn build_gossip_verified_blobs( - chain: &BeaconChain, - block: &Arc>>, - blobs: Option<(KzgProofs, BlobsList)>, -) -> Result>, BlockContentsError> { - blobs - .map(|(kzg_proofs, blobs)| { - let mut gossip_verified_blobs = vec![]; - for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { - let _timer = - metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION); - let blob = BlobSidecar::new(i, blob, block, *kzg_proof) - .map_err(BlockContentsError::BlobSidecarError)?; - drop(_timer); - let gossip_verified_blob = - GossipVerifiedBlob::new(Arc::new(blob), i as u64, chain)?; - gossip_verified_blobs.push(gossip_verified_blob); - } - let gossip_verified_blobs = VariableList::from(gossip_verified_blobs); - Ok::<_, BlockContentsError>(gossip_verified_blobs) - }) - .transpose() -} - -fn build_gossip_verified_data_columns( +pub fn build_blob_data_column_sidecars( chain: &BeaconChain, block: &SignedBeaconBlock>, - blobs: Option>, -) -> Result>, BlockContentsError> { - blobs - // Only attempt to build data columns if blobs is non empty to avoid skewing the metrics. - .filter(|b| !b.is_empty()) - .map(|blobs| { - // NOTE: we expect KZG to be initialized if the blobs are present - let kzg = chain - .kzg - .as_ref() - .ok_or(BlockContentsError::DataColumnError( - GossipDataColumnError::KzgNotInitialized, - ))?; - - let mut timer = metrics::start_timer_vec( - &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, - &[&blobs.len().to_string()], - ); - let sidecars = blobs_to_data_column_sidecars(&blobs, block, kzg, &chain.spec) - .discard_timer_on_break(&mut timer)?; - drop(timer); - let mut gossip_verified_data_columns = vec![]; - for sidecar in sidecars { - let subnet = DataColumnSubnetId::from_column_index::( - sidecar.index as usize, - &chain.spec, - ); - let column = GossipVerifiedDataColumn::new(sidecar, subnet.into(), chain)?; - gossip_verified_data_columns.push(column); - } - let gossip_verified_data_columns = RuntimeVariableList::new( - gossip_verified_data_columns, - chain.spec.number_of_columns, - ) - .map_err(DataColumnSidecarError::SszError)?; - Ok::<_, BlockContentsError>(gossip_verified_data_columns) - }) - .transpose() + blobs: BlobsList, +) -> Result, DataColumnSidecarError> { + // Only attempt to build data columns if blobs is non empty to avoid skewing the metrics. + if blobs.is_empty() { + return Ok(vec![]); + } + + let mut timer = metrics::start_timer_vec( + &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, + &[&blobs.len().to_string()], + ); + let sidecars = blobs_to_data_column_sidecars(&blobs, block, &chain.kzg, &chain.spec) + .discard_timer_on_break(&mut timer)?; + drop(timer); + Ok(sidecars) } /// Implemented on types that can be converted into a `ExecutionPendingBlock`. @@ -920,7 +853,7 @@ impl GossipVerifiedBlock { // already know this block. let fork_choice_read_lock = chain.canonical_head.fork_choice_read_lock(); if fork_choice_read_lock.contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlockError::DuplicateFullyImported(block_root)); } // Do not process a block that doesn't descend from the finalized root. @@ -1054,7 +987,9 @@ impl GossipVerifiedBlock { SeenBlock::Slashable => { return Err(BlockError::Slashable); } - SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown(block_root)), + SeenBlock::Duplicate => { + return Err(BlockError::DuplicateImportStatusUnknown(block_root)) + } SeenBlock::UniqueNonSlashable => {} }; @@ -1453,28 +1388,6 @@ impl ExecutionPendingBlock { } let payload_verification_status = payload_notifier.notify_new_payload().await?; - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let block_hash_opt = block - .message() - .body() - .execution_payload() - .map(|full_payload| full_payload.block_hash()); - - // Ensure the block is a candidate for optimistic import. - if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? - { - warn!( - chain.log, - "Rejecting optimistic block"; - "block_hash" => ?block_hash_opt, - "msg" => "the execution engine is not synced" - ); - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); - } - } - Ok(PayloadVerificationOutcome { payload_verification_status, is_valid_merge_transition_block, @@ -1902,7 +1815,7 @@ pub fn check_block_relevancy( .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlockError::DuplicateFullyImported(block_root)); } Ok(block_root) diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 707dfa56d84..420c83081c7 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -1,19 +1,14 @@ -use crate::blob_verification::{GossipBlobError, GossipVerifiedBlobList}; -use crate::block_verification::BlockError; use crate::data_availability_checker::AvailabilityCheckError; pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; -use crate::data_column_verification::{ - CustodyDataColumn, CustodyDataColumnList, GossipDataColumnError, GossipVerifiedDataColumnList, -}; +use crate::data_column_verification::{CustodyDataColumn, CustodyDataColumnList}; use crate::eth1_finalization_cache::Eth1FinalizationData; -use crate::{get_block_root, GossipVerifiedBlock, PayloadVerificationOutcome}; +use crate::{get_block_root, PayloadVerificationOutcome}; use derivative::Derivative; use ssz_types::VariableList; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; use std::sync::Arc; -use types::blob_sidecar::{self, BlobIdentifier, FixedBlobSidecarList}; -use types::data_column_sidecar::{self}; +use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; use types::{ BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, @@ -390,67 +385,6 @@ impl BlockImportData { } } -pub type GossipVerifiedBlockContents = ( - GossipVerifiedBlock, - Option>, - Option>, -); - -#[derive(Debug)] -pub enum BlockContentsError { - BlockError(BlockError), - BlobError(GossipBlobError), - BlobSidecarError(blob_sidecar::BlobSidecarError), - DataColumnError(GossipDataColumnError), - DataColumnSidecarError(data_column_sidecar::DataColumnSidecarError), -} - -impl From for BlockContentsError { - fn from(value: BlockError) -> Self { - Self::BlockError(value) - } -} - -impl From for BlockContentsError { - fn from(value: GossipBlobError) -> Self { - Self::BlobError(value) - } -} - -impl From for BlockContentsError { - fn from(value: GossipDataColumnError) -> Self { - Self::DataColumnError(value) - } -} - -impl From for BlockContentsError { - fn from(value: data_column_sidecar::DataColumnSidecarError) -> Self { - Self::DataColumnSidecarError(value) - } -} - -impl std::fmt::Display for BlockContentsError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - BlockContentsError::BlockError(err) => { - write!(f, "BlockError({})", err) - } - BlockContentsError::BlobError(err) => { - write!(f, "BlobError({})", err) - } - BlockContentsError::BlobSidecarError(err) => { - write!(f, "BlobSidecarError({:?})", err) - } - BlockContentsError::DataColumnError(err) => { - write!(f, "DataColumnError({:?})", err) - } - BlockContentsError::DataColumnSidecarError(err) => { - write!(f, "DataColumnSidecarError({:?})", err) - } - } - } -} - /// Trait for common block operations. pub trait AsBlock { fn slot(&self) -> Slot; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index d38530b9049..5f1e94fc8c6 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -93,7 +93,7 @@ pub struct BeaconChainBuilder { light_client_server_tx: Option>>, head_tracker: Option, validator_pubkey_cache: Option>, - spec: ChainSpec, + spec: Arc, chain_config: ChainConfig, log: Option, beacon_graffiti: GraffitiOrigin, @@ -101,7 +101,7 @@ pub struct BeaconChainBuilder { // Pending I/O batch that is constructed during building and should be executed atomically // alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called. pending_io_batch: Vec, - kzg: Option>, + kzg: Arc, task_executor: Option, validator_monitor_config: Option, import_all_data_columns: bool, @@ -120,7 +120,7 @@ where /// /// The `_eth_spec_instance` parameter is only supplied to make concrete the `E` trait. /// This should generally be either the `MinimalEthSpec` or `MainnetEthSpec` types. - pub fn new(_eth_spec_instance: E) -> Self { + pub fn new(_eth_spec_instance: E, kzg: Arc) -> Self { Self { store: None, store_migrator_config: None, @@ -137,13 +137,13 @@ where light_client_server_tx: None, head_tracker: None, validator_pubkey_cache: None, - spec: E::default_spec(), + spec: Arc::new(E::default_spec()), chain_config: ChainConfig::default(), log: None, beacon_graffiti: GraffitiOrigin::default(), slasher: None, pending_io_batch: vec![], - kzg: None, + kzg, task_executor: None, validator_monitor_config: None, import_all_data_columns: false, @@ -154,7 +154,7 @@ where /// /// This method should generally be called immediately after `Self::new` to ensure components /// are started with a consistent spec. - pub fn custom_spec(mut self, spec: ChainSpec) -> Self { + pub fn custom_spec(mut self, spec: Arc) -> Self { self.spec = spec; self } @@ -694,11 +694,6 @@ where self } - pub fn kzg(mut self, kzg: Option>) -> Self { - self.kzg = kzg; - self - } - /// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied. /// /// An error will be returned at runtime if all required parameters have not been configured. @@ -989,6 +984,7 @@ where store, self.import_all_data_columns, self.spec, + log.new(o!("service" => "data_availability_checker")), ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), @@ -1157,7 +1153,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { #[cfg(test)] mod test { use super::*; - use crate::test_utils::EphemeralHarnessType; + use crate::test_utils::{get_kzg, EphemeralHarnessType}; use ethereum_hashing::hash; use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, @@ -1188,8 +1184,12 @@ mod test { MinimalEthSpec, MemoryStore, MemoryStore, - > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) - .unwrap(); + > = HotColdDB::open_ephemeral( + StoreConfig::default(), + ChainSpec::minimal().into(), + log.clone(), + ) + .unwrap(); let spec = MinimalEthSpec::default_spec(); let genesis_state = interop_genesis_state( @@ -1204,7 +1204,9 @@ mod test { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let runtime = TestRuntime::default(); - let chain = Builder::new(MinimalEthSpec) + let kzg = get_kzg(&spec); + + let chain = Builder::new(MinimalEthSpec, kzg) .logger(log.clone()) .store(Arc::new(store)) .task_executor(runtime.task_executor.clone()) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 470cee713fa..047764d705c 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -2,10 +2,12 @@ use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, Kzg use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, }; -use crate::data_availability_checker::overflow_lru_cache::DataAvailabilityCheckerInner; -use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; +use crate::data_availability_checker::overflow_lru_cache::{ + DataAvailabilityCheckerInner, ReconstructColumnsDecision, +}; +use crate::{metrics, BeaconChain, BeaconChainTypes, BeaconStore}; use kzg::Kzg; -use slog::{debug, error}; +use slog::{debug, error, Logger}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; @@ -24,14 +26,15 @@ mod overflow_lru_cache; mod state_lru_cache; use crate::data_column_verification::{ - verify_kzg_for_data_column_list, CustodyDataColumn, GossipVerifiedDataColumn, - KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, + verify_kzg_for_data_column, verify_kzg_for_data_column_list, CustodyDataColumn, + GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, +}; +use crate::metrics::{ + KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES, }; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; -pub use self::overflow_lru_cache::DataColumnsToPublish; - /// The LRU Cache stores `PendingComponents` which can store up to /// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So /// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this @@ -69,8 +72,18 @@ pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); pub struct DataAvailabilityChecker { availability_cache: Arc>, slot_clock: T::SlotClock, - kzg: Option>, + kzg: Arc, spec: Arc, + log: Logger, +} + +pub type AvailabilityAndReconstructedColumns = (Availability, DataColumnSidecarList); + +#[derive(Debug)] +pub enum DataColumnReconstructionResult { + Success(AvailabilityAndReconstructedColumns), + NotStarted(&'static str), + RecoveredColumnsNotImported(&'static str), } /// This type is returned after adding a block / blob to the `DataAvailabilityChecker`. @@ -97,25 +110,27 @@ impl Debug for Availability { impl DataAvailabilityChecker { pub fn new( slot_clock: T::SlotClock, - kzg: Option>, + kzg: Arc, store: BeaconStore, import_all_data_columns: bool, - spec: ChainSpec, + spec: Arc, + log: Logger, ) -> Result { - let spec = Arc::new(spec); let custody_subnet_count = if import_all_data_columns { spec.data_column_sidecar_subnet_count as usize } else { spec.custody_requirement as usize }; - let custody_column_count = - custody_subnet_count.saturating_mul(spec.data_columns_per_subnet()); + let subnet_sampling_size = + std::cmp::max(custody_subnet_count, spec.samples_per_slot as usize); + let sampling_column_count = + subnet_sampling_size.saturating_mul(spec.data_columns_per_subnet()); let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY, store, - custody_column_count, + sampling_column_count, spec.clone(), )?; Ok(Self { @@ -123,13 +138,12 @@ impl DataAvailabilityChecker { slot_clock, kzg, spec, + log, }) } - pub fn get_custody_columns_count(&self) -> usize { - self.availability_cache - .custody_subnet_count() - .saturating_mul(self.spec.data_columns_per_subnet()) + pub fn get_sampling_column_count(&self) -> usize { + self.availability_cache.sampling_column_count() } /// Checks if the block root is currenlty in the availability cache awaiting import because @@ -142,9 +156,9 @@ impl DataAvailabilityChecker { .get_execution_valid_block(block_root) } - /// Return the set of imported blob indexes for `block_root`. Returns None if there is no block + /// Return the set of cached blob indexes for `block_root`. Returns None if there is no block /// component for `block_root`. - pub fn imported_blob_indexes(&self, block_root: &Hash256) -> Option> { + pub fn cached_blob_indexes(&self, block_root: &Hash256) -> Option> { self.availability_cache .peek_pending_components(block_root, |components| { components.map(|components| { @@ -157,9 +171,9 @@ impl DataAvailabilityChecker { }) } - /// Return the set of imported custody column indexes for `block_root`. Returns None if there is + /// Return the set of cached custody column indexes for `block_root`. Returns None if there is /// no block component for `block_root`. - pub fn imported_custody_column_indexes(&self, block_root: &Hash256) -> Option> { + pub fn cached_data_column_indexes(&self, block_root: &Hash256) -> Option> { self.availability_cache .peek_pending_components(block_root, |components| { components.map(|components| components.get_cached_data_columns_indices()) @@ -190,21 +204,23 @@ impl DataAvailabilityChecker { epoch: Epoch, blobs: FixedBlobSidecarList, ) -> Result, AvailabilityCheckError> { - let Some(kzg) = self.kzg.as_ref() else { - return Err(AvailabilityCheckError::KzgNotInitialized); - }; - let seen_timestamp = self .slot_clock .now_duration() .ok_or(AvailabilityCheckError::SlotClockError)?; - let verified_blobs = - KzgVerifiedBlobList::new(Vec::from(blobs).into_iter().flatten(), kzg, seen_timestamp) - .map_err(AvailabilityCheckError::Kzg)?; + // Note: currently not reporting which specific blob is invalid because we fetch all blobs + // from the same peer for both lookup and range sync. + + let verified_blobs = KzgVerifiedBlobList::new( + Vec::from(blobs).into_iter().flatten(), + &self.kzg, + seen_timestamp, + ) + .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache - .put_kzg_verified_blobs(block_root, epoch, verified_blobs) + .put_kzg_verified_blobs(block_root, epoch, verified_blobs, &self.log) } /// Put a list of custody columns received via RPC into the availability cache. This performs KZG @@ -215,28 +231,26 @@ impl DataAvailabilityChecker { block_root: Hash256, epoch: Epoch, custody_columns: DataColumnSidecarList, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { - let Some(kzg) = self.kzg.as_ref() else { - return Err(AvailabilityCheckError::KzgNotInitialized); - }; - + ) -> Result, AvailabilityCheckError> { // TODO(das): report which column is invalid for proper peer scoring - // TODO(das): batch KZG verification here + // TODO(das): batch KZG verification here, but fallback into checking each column + // individually to report which column(s) are invalid. let verified_custody_columns = custody_columns .into_iter() .map(|column| { + let index = column.index; Ok(KzgVerifiedCustodyDataColumn::from_asserted_custody( - KzgVerifiedDataColumn::new(column, kzg).map_err(AvailabilityCheckError::Kzg)?, + KzgVerifiedDataColumn::new(column, &self.kzg) + .map_err(|e| AvailabilityCheckError::InvalidColumn(index, e))?, )) }) .collect::, AvailabilityCheckError>>()?; self.availability_cache.put_kzg_verified_data_columns( - kzg, block_root, epoch, verified_custody_columns, + &self.log, ) } @@ -253,6 +267,7 @@ impl DataAvailabilityChecker { gossip_blob.block_root(), gossip_blob.epoch(), vec![gossip_blob.into_inner()], + &self.log, ) } @@ -267,11 +282,7 @@ impl DataAvailabilityChecker { slot: Slot, block_root: Hash256, gossip_data_columns: Vec>, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { - let Some(kzg) = self.kzg.as_ref() else { - return Err(AvailabilityCheckError::KzgNotInitialized); - }; + ) -> Result, AvailabilityCheckError> { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); let custody_columns = gossip_data_columns @@ -280,10 +291,10 @@ impl DataAvailabilityChecker { .collect::>(); self.availability_cache.put_kzg_verified_data_columns( - kzg, block_root, epoch, custody_columns, + &self.log, ) } @@ -294,7 +305,7 @@ impl DataAvailabilityChecker { executed_block: AvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError> { self.availability_cache - .put_pending_executed_block(executed_block) + .put_pending_executed_block(executed_block, &self.log) } pub fn remove_pending_components(&self, block_root: Hash256) { @@ -314,12 +325,8 @@ impl DataAvailabilityChecker { let (block_root, block, blobs, data_columns) = block.deconstruct(); if self.blobs_required_for_block(&block) { return if let Some(blob_list) = blobs.as_ref() { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_blob_list(blob_list.iter(), kzg) - .map_err(AvailabilityCheckError::Kzg)?; + verify_kzg_for_blob_list(blob_list.iter(), &self.kzg) + .map_err(AvailabilityCheckError::InvalidBlobs)?; Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, @@ -334,17 +341,12 @@ impl DataAvailabilityChecker { } if self.data_columns_required_for_block(&block) { return if let Some(data_column_list) = data_columns.as_ref() { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_data_column_list( + verify_kzg_for_data_column_list_with_scoring( data_column_list .iter() .map(|custody_column| custody_column.as_data_column()), - kzg, - ) - .map_err(AvailabilityCheckError::Kzg)?; + &self.kzg, + )?; Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, @@ -395,11 +397,8 @@ impl DataAvailabilityChecker { // verify kzg for all blobs at once if !all_blobs.is_empty() { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_blob_list(all_blobs.iter(), kzg)?; + verify_kzg_for_blob_list(all_blobs.iter(), &self.kzg) + .map_err(AvailabilityCheckError::InvalidBlobs)?; } let all_data_columns = blocks @@ -415,11 +414,8 @@ impl DataAvailabilityChecker { // verify kzg for all data columns at once if !all_data_columns.is_empty() { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_data_column_list(all_data_columns.iter(), kzg)?; + // TODO: Need to also attribute which specific block is faulty + verify_kzg_for_data_column_list_with_scoring(all_data_columns.iter(), &self.kzg)?; } for block in blocks { @@ -529,6 +525,92 @@ impl DataAvailabilityChecker { block_cache_size: self.availability_cache.block_cache_size(), } } + + pub fn reconstruct_data_columns( + &self, + block_root: &Hash256, + ) -> Result, AvailabilityCheckError> { + let pending_components = match self + .availability_cache + .check_and_set_reconstruction_started(block_root) + { + ReconstructColumnsDecision::Yes(pending_components) => pending_components, + ReconstructColumnsDecision::No(reason) => { + return Ok(DataColumnReconstructionResult::NotStarted(reason)); + } + }; + + metrics::inc_counter(&KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS); + let timer = metrics::start_timer(&metrics::DATA_AVAILABILITY_RECONSTRUCTION_TIME); + + let all_data_columns = KzgVerifiedCustodyDataColumn::reconstruct_columns( + &self.kzg, + &pending_components.verified_data_columns, + &self.spec, + ) + .map_err(|e| { + error!( + self.log, + "Error reconstructing data columns"; + "block_root" => ?block_root, + "error" => ?e + ); + self.availability_cache + .handle_reconstruction_failure(block_root); + metrics::inc_counter(&KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES); + AvailabilityCheckError::ReconstructColumnsError(e) + })?; + + // Check indices from cache again to make sure we don't publish components we've already received. + let Some(existing_column_indices) = self.cached_data_column_indexes(block_root) else { + return Ok(DataColumnReconstructionResult::RecoveredColumnsNotImported( + "block already imported", + )); + }; + + let data_columns_to_publish = all_data_columns + .into_iter() + .filter(|d| !existing_column_indices.contains(&d.index())) + .collect::>(); + + let Some(slot) = data_columns_to_publish + .first() + .map(|d| d.as_data_column().slot()) + else { + return Ok(DataColumnReconstructionResult::RecoveredColumnsNotImported( + "No new columns to import and publish", + )); + }; + + metrics::stop_timer(timer); + metrics::inc_counter_by( + &metrics::DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS, + data_columns_to_publish.len() as u64, + ); + + debug!(self.log, "Reconstructed columns"; + "count" => data_columns_to_publish.len(), + "block_root" => ?block_root, + "slot" => slot, + ); + + self.availability_cache + .put_kzg_verified_data_columns( + *block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + data_columns_to_publish.clone(), + &self.log, + ) + .map(|availability| { + DataColumnReconstructionResult::Success(( + availability, + data_columns_to_publish + .into_iter() + .map(|d| d.clone_arc()) + .collect::>(), + )) + }) + } } /// Helper struct to group data availability checker metrics. @@ -621,6 +703,32 @@ async fn availability_cache_maintenance_service( } } +fn verify_kzg_for_data_column_list_with_scoring<'a, E: EthSpec, I>( + data_column_iter: I, + kzg: &'a Kzg, +) -> Result<(), AvailabilityCheckError> +where + I: Iterator>> + Clone, +{ + let Err(batch_err) = verify_kzg_for_data_column_list(data_column_iter.clone(), kzg) else { + return Ok(()); + }; + + let data_columns = data_column_iter.collect::>(); + // Find which column is invalid. If len is 1 or 0 continue to default case below. + // If len > 1 at least one column MUST fail. + if data_columns.len() > 1 { + for data_column in data_columns { + if let Err(e) = verify_kzg_for_data_column(data_column.clone(), kzg) { + return Err(AvailabilityCheckError::InvalidColumn(data_column.index, e)); + } + } + } + + // len 0 should never happen + Err(AvailabilityCheckError::InvalidColumn(0, batch_err)) +} + /// A fully available block that is ready to be imported into fork choice. #[derive(Clone, Debug, PartialEq)] pub struct AvailableBlock { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index 79793d6dc29..dbfa00e6e22 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -1,11 +1,11 @@ use kzg::{Error as KzgError, KzgCommitment}; -use types::{BeaconStateError, Hash256}; +use types::{BeaconStateError, ColumnIndex, Hash256}; #[derive(Debug)] pub enum Error { - Kzg(KzgError), - KzgNotInitialized, - KzgVerificationFailed, + InvalidBlobs(KzgError), + InvalidColumn(ColumnIndex, KzgError), + ReconstructColumnsError(KzgError), KzgCommitmentMismatch { blob_commitment: KzgCommitment, block_commitment: KzgCommitment, @@ -36,8 +36,7 @@ pub enum ErrorCategory { impl Error { pub fn category(&self) -> ErrorCategory { match self { - Error::KzgNotInitialized - | Error::SszTypes(_) + Error::SszTypes(_) | Error::MissingBlobs | Error::MissingCustodyColumns | Error::StoreError(_) @@ -48,11 +47,12 @@ impl Error { | Error::UnableToDetermineImportRequirement | Error::RebuildingStateCaches(_) | Error::SlotClockError => ErrorCategory::Internal, - Error::Kzg(_) + Error::InvalidBlobs { .. } + | Error::InvalidColumn { .. } + | Error::ReconstructColumnsError { .. } | Error::BlobIndexInvalid(_) | Error::DataColumnIndexInvalid(_) - | Error::KzgCommitmentMismatch { .. } - | Error::KzgVerificationFailed => ErrorCategory::Malicious, + | Error::KzgCommitmentMismatch { .. } => ErrorCategory::Malicious, } } } @@ -80,9 +80,3 @@ impl From for Error { Self::BlockReplayError(value) } } - -impl From for Error { - fn from(value: KzgError) -> Self { - Self::Kzg(value) - } -} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index d30fdd6a47b..54b7f9dc00c 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -6,23 +6,19 @@ use crate::block_verification_types::{ }; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; use crate::data_column_verification::KzgVerifiedCustodyDataColumn; -use crate::metrics; use crate::BeaconChainTypes; -use kzg::Kzg; use lru::LruCache; use parking_lot::RwLock; +use slog::{debug, Logger}; use ssz_types::{FixedVector, VariableList}; -use std::collections::HashSet; use std::num::NonZeroUsize; use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, + Hash256, SignedBeaconBlock, }; -pub type DataColumnsToPublish = Option>; - /// This represents the components of a partially available block /// /// The blobs are all gossip and kzg verified. @@ -40,7 +36,7 @@ pub struct PendingComponents { pub enum BlockImportRequirement { AllBlobs, - CustodyColumns(usize), + ColumnSampling(usize), } impl PendingComponents { @@ -95,7 +91,7 @@ impl PendingComponents { /// block. /// /// This corresponds to the number of commitments that are present in a block. - pub fn num_expected_blobs(&self) -> Option { + pub fn block_kzg_commitments_count(&self) -> Option { self.get_cached_block() .as_ref() .map(|b| b.get_commitments().len()) @@ -203,21 +199,61 @@ impl PendingComponents { /// /// Returns `true` if both the block exists and the number of received blobs / custody columns /// matches the number of expected blobs / custody columns. - pub fn is_available(&self, block_import_requirement: &BlockImportRequirement) -> bool { + pub fn is_available( + &self, + block_import_requirement: &BlockImportRequirement, + log: &Logger, + ) -> bool { + let block_kzg_commitments_count_opt = self.block_kzg_commitments_count(); + match block_import_requirement { - BlockImportRequirement::AllBlobs => self - .num_expected_blobs() - .map_or(false, |num_expected_blobs| { - num_expected_blobs == self.num_received_blobs() - }), - BlockImportRequirement::CustodyColumns(num_expected_columns) => { - let num_received_data_columns = self.num_received_data_columns(); + BlockImportRequirement::AllBlobs => { + let received_blobs = self.num_received_blobs(); + let expected_blobs_msg = block_kzg_commitments_count_opt + .as_ref() + .map(|num| num.to_string()) + .unwrap_or("unknown".to_string()); + + debug!(log, + "Component(s) added to data availability checker"; + "block_root" => ?self.block_root, + "received_block" => block_kzg_commitments_count_opt.is_some(), + "received_blobs" => received_blobs, + "expected_blobs" => expected_blobs_msg, + ); + + block_kzg_commitments_count_opt.map_or(false, |num_expected_blobs| { + num_expected_blobs == received_blobs + }) + } + BlockImportRequirement::ColumnSampling(num_expected_columns) => { // No data columns when there are 0 blobs - self.num_expected_blobs() - .map_or(false, |num_expected_blobs| { - num_expected_blobs == 0 - || *num_expected_columns == num_received_data_columns - }) + let expected_columns_opt = block_kzg_commitments_count_opt.map(|blob_count| { + if blob_count > 0 { + *num_expected_columns + } else { + 0 + } + }); + + let expected_columns_msg = expected_columns_opt + .as_ref() + .map(|num| num.to_string()) + .unwrap_or("unknown".to_string()); + + let num_received_columns = self.num_received_data_columns(); + + debug!(log, + "Component(s) added to data availability checker"; + "block_root" => ?self.block_root, + "received_block" => block_kzg_commitments_count_opt.is_some(), + "received_columns" => num_received_columns, + "expected_columns" => expected_columns_msg, + ); + + expected_columns_opt.map_or(false, |num_expected_columns| { + num_expected_columns == num_received_columns + }) } } } @@ -281,7 +317,7 @@ impl PendingComponents { }; (Some(VariableList::new(verified_blobs)?), None) } - BlockImportRequirement::CustodyColumns(_) => { + BlockImportRequirement::ColumnSampling(_) => { let verified_data_columns = verified_data_columns .into_iter() .map(|d| d.into_inner()) @@ -310,10 +346,6 @@ impl PendingComponents { ))) } - pub fn reconstruction_started(&mut self) { - self.reconstruction_started = true; - } - /// Returns the epoch of the block if it is cached, otherwise returns the epoch of the first blob. pub fn epoch(&self) -> Option { self.executed_block @@ -352,28 +384,37 @@ pub struct DataAvailabilityCheckerInner { /// This cache holds a limited number of states in memory and reconstructs them /// from disk when necessary. This is necessary until we merge tree-states state_cache: StateLRUCache, - /// The number of data columns the node is custodying. - custody_column_count: usize, + /// The number of data columns the node is sampling via subnet sampling. + sampling_column_count: usize, spec: Arc, } +// This enum is only used internally within the crate in the reconstruction function to improve +// readability, so it's OK to not box the variant value, and it shouldn't impact memory much with +// the current usage, as it's deconstructed immediately. +#[allow(clippy::large_enum_variant)] +pub(crate) enum ReconstructColumnsDecision { + Yes(PendingComponents), + No(&'static str), +} + impl DataAvailabilityCheckerInner { pub fn new( capacity: NonZeroUsize, beacon_store: BeaconStore, - custody_column_count: usize, + sampling_column_count: usize, spec: Arc, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec.clone()), - custody_column_count, + sampling_column_count, spec, }) } - pub fn custody_subnet_count(&self) -> usize { - self.custody_column_count + pub fn sampling_column_count(&self) -> usize { + self.sampling_column_count } /// Returns true if the block root is known, without altering the LRU ordering @@ -439,41 +480,20 @@ impl DataAvailabilityCheckerInner { ) -> Result { let peer_das_enabled = self.spec.is_peer_das_enabled_for_epoch(epoch); if peer_das_enabled { - Ok(BlockImportRequirement::CustodyColumns( - self.custody_column_count, + Ok(BlockImportRequirement::ColumnSampling( + self.sampling_column_count, )) } else { Ok(BlockImportRequirement::AllBlobs) } } - /// Potentially trigger reconstruction if: - /// - Our custody requirement is all columns - /// - We >= 50% of columns, but not all columns - fn should_reconstruct( - &self, - block_import_requirement: &BlockImportRequirement, - pending_components: &PendingComponents, - ) -> bool { - let BlockImportRequirement::CustodyColumns(num_expected_columns) = block_import_requirement - else { - return false; - }; - - let num_of_columns = self.spec.number_of_columns; - let has_missing_columns = pending_components.verified_data_columns.len() < num_of_columns; - - has_missing_columns - && !pending_components.reconstruction_started - && *num_expected_columns == num_of_columns - && pending_components.verified_data_columns.len() >= num_of_columns / 2 - } - pub fn put_kzg_verified_blobs>>( &self, block_root: Hash256, epoch: Epoch, kzg_verified_blobs: I, + log: &Logger, ) -> Result, AvailabilityCheckError> { let mut fixed_blobs = FixedVector::default(); @@ -495,7 +515,7 @@ impl DataAvailabilityCheckerInner { pending_components.merge_blobs(fixed_blobs); let block_import_requirement = self.block_import_requirement(epoch)?; - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); @@ -513,12 +533,11 @@ impl DataAvailabilityCheckerInner { I: IntoIterator>, >( &self, - kzg: &Kzg, block_root: Hash256, epoch: Epoch, kzg_verified_data_columns: I, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { + log: &Logger, + ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); // Grab existing entry or create a new entry. @@ -532,64 +551,67 @@ impl DataAvailabilityCheckerInner { let block_import_requirement = self.block_import_requirement(epoch)?; - // Potentially trigger reconstruction if: - // - Our custody requirement is all columns - // - We >= 50% of columns - let data_columns_to_publish = - if self.should_reconstruct(&block_import_requirement, &pending_components) { - pending_components.reconstruction_started(); - - let timer = metrics::start_timer(&metrics::DATA_AVAILABILITY_RECONSTRUCTION_TIME); - - let existing_column_indices = pending_components - .verified_data_columns - .iter() - .map(|d| d.index()) - .collect::>(); - - // Will only return an error if: - // - < 50% of columns - // - There are duplicates - let all_data_columns = KzgVerifiedCustodyDataColumn::reconstruct_columns( - kzg, - pending_components.verified_data_columns.as_slice(), - &self.spec, - )?; - - let data_columns_to_publish = all_data_columns - .iter() - .filter(|d| !existing_column_indices.contains(&d.index())) - .map(|d| d.clone_arc()) - .collect::>(); - - pending_components.verified_data_columns = all_data_columns; - - metrics::stop_timer(timer); - metrics::inc_counter_by( - &metrics::DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS, - data_columns_to_publish.len() as u64, - ); - - Some(data_columns_to_publish) - } else { - None - }; - - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components - .make_available(block_import_requirement, &self.spec, |diet_block| { - self.state_cache.recover_pending_executed_block(diet_block) - }) - .map(|availability| (availability, data_columns_to_publish)) + pending_components.make_available(block_import_requirement, &self.spec, |diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) } else { write_lock.put(block_root, pending_components); - Ok(( - Availability::MissingComponents(block_root), - data_columns_to_publish, - )) + Ok(Availability::MissingComponents(block_root)) + } + } + + /// Check whether data column reconstruction should be attempted. + /// + /// Potentially trigger reconstruction if: + /// - Our custody requirement is all columns (supernode), and we haven't got all columns + /// - We have >= 50% of columns, but not all columns + /// - Reconstruction hasn't been started for the block + /// + /// If reconstruction is required, returns `PendingComponents` which contains the + /// components to be used as inputs to reconstruction, otherwise returns a `reason`. + pub fn check_and_set_reconstruction_started( + &self, + block_root: &Hash256, + ) -> ReconstructColumnsDecision { + let mut write_lock = self.critical.write(); + let Some(pending_components) = write_lock.get_mut(block_root) else { + // Block may have been imported as it does not exist in availability cache. + return ReconstructColumnsDecision::No("block already imported"); + }; + + // If we're sampling all columns, it means we must be custodying all columns. + let custody_column_count = self.sampling_column_count(); + let total_column_count = self.spec.number_of_columns; + let received_column_count = pending_components.verified_data_columns.len(); + + if pending_components.reconstruction_started { + return ReconstructColumnsDecision::No("already started"); + } + if custody_column_count != total_column_count { + return ReconstructColumnsDecision::No("not required for full node"); + } + if received_column_count == self.spec.number_of_columns { + return ReconstructColumnsDecision::No("all columns received"); + } + if received_column_count < total_column_count / 2 { + return ReconstructColumnsDecision::No("not enough columns"); + } + + pending_components.reconstruction_started = true; + ReconstructColumnsDecision::Yes(pending_components.clone()) + } + + /// This could mean some invalid data columns made it through to the `DataAvailabilityChecker`. + /// In this case, we remove all data columns in `PendingComponents`, reset reconstruction + /// status so that we can attempt to retrieve columns from peers again. + pub fn handle_reconstruction_failure(&self, block_root: &Hash256) { + if let Some(pending_components_mut) = self.critical.write().get_mut(block_root) { + pending_components_mut.verified_data_columns = vec![]; + pending_components_mut.reconstruction_started = false; } } @@ -598,6 +620,7 @@ impl DataAvailabilityCheckerInner { pub fn put_pending_executed_block( &self, executed_block: AvailabilityPendingExecutedBlock, + log: &Logger, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); let block_root = executed_block.import_data.block_root; @@ -619,7 +642,7 @@ impl DataAvailabilityCheckerInner { // Check if we have all components and entire set is consistent. let block_import_requirement = self.block_import_requirement(epoch)?; - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); @@ -702,7 +725,7 @@ mod test { fn get_store_with_spec( db_path: &TempDir, - spec: ChainSpec, + spec: Arc, log: Logger, ) -> Arc, BeaconNodeBackend>> { let hot_path = db_path.path().join("hot_db"); @@ -739,6 +762,7 @@ mod test { spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); spec.capella_fork_epoch = Some(capella_fork_epoch); spec.deneb_fork_epoch = Some(deneb_fork_epoch); + let spec = Arc::new(spec); let chain_store = get_store_with_spec::(db_path, spec.clone(), log.clone()); let validators_keypairs = @@ -886,7 +910,7 @@ mod test { let log = test_logger(); let chain_db_path = tempdir().expect("should get temp dir"); let harness = get_deneb_chain(log.clone(), &chain_db_path).await; - let spec = Arc::new(harness.spec.clone()); + let spec = harness.spec.clone(); let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); let cache = Arc::new( @@ -920,7 +944,7 @@ mod test { ); assert!(cache.critical.read().is_empty(), "cache should be empty"); let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); if blobs_expected == 0 { assert!( @@ -959,7 +983,7 @@ mod test { for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone()) + .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); if blob_index == blobs_expected - 1 { assert!(matches!(availability, Availability::Available(_))); @@ -986,7 +1010,7 @@ mod test { for gossip_blob in blobs { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone()) + .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); assert_eq!( availability, @@ -996,7 +1020,7 @@ mod test { assert_eq!(cache.critical.read().len(), 1); } let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); assert!( matches!(availability, Availability::Available(_)), @@ -1064,7 +1088,7 @@ mod test { // put the block in the cache let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); // grab the diet block from the cache for later testing diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index f4a5feaee2a..a4e83b27514 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -52,12 +52,6 @@ pub enum GossipDataColumnError { data_column_slot: Slot, parent_slot: Slot, }, - /// `Kzg` struct hasn't been initialized. This is an internal error. - /// - /// ## Peer scoring - /// - /// The peer isn't faulty, This is an internal error. - KzgNotInitialized, /// The kzg verification failed. /// /// ## Peer scoring @@ -133,6 +127,25 @@ pub enum GossipDataColumnError { slot: Slot, index: ColumnIndex, }, + /// Data column index must be between 0 and `NUMBER_OF_COLUMNS` (exclusive). + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + InvalidColumnIndex(u64), + /// Data column not expected for a block with empty kzg commitments. + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + UnexpectedDataColumn, + /// The data column length must be equal to the number of commitments/proofs, otherwise the + /// sidecar is invalid. + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + InconsistentCommitmentsOrProofLength, } impl From for GossipDataColumnError { @@ -300,10 +313,7 @@ impl KzgVerifiedCustodyDataColumn { kzg: &Kzg, partial_set_of_columns: &[Self], spec: &ChainSpec, - ) -> Result, KzgError> { - // Will only return an error if: - // - < 50% of columns - // - There are duplicates + ) -> Result>, KzgError> { let all_data_columns = reconstruct_data_columns( kzg, &partial_set_of_columns @@ -315,10 +325,8 @@ impl KzgVerifiedCustodyDataColumn { Ok(all_data_columns .into_iter() - .map(|d| { - KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { - data: d, - }) + .map(|data| { + KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { data }) }) .collect::>()) } @@ -373,7 +381,7 @@ pub fn validate_data_column_sidecar_for_gossip( chain: &BeaconChain, ) -> Result, GossipDataColumnError> { let column_slot = data_column.slot(); - + verify_data_column_sidecar(&data_column, &chain.spec)?; verify_index_matches_subnet(&data_column, subnet, &chain.spec)?; verify_sidecar_not_from_future_slot(chain, column_slot)?; verify_slot_greater_than_latest_finalized_slot(chain, column_slot)?; @@ -382,12 +390,8 @@ pub fn validate_data_column_sidecar_for_gossip( let parent_block = verify_parent_block_and_finalized_descendant(data_column.clone(), chain)?; verify_slot_higher_than_parent(&parent_block, column_slot)?; verify_proposer_and_signature(&data_column, &parent_block, chain)?; - - let kzg = chain - .kzg - .clone() - .ok_or(GossipDataColumnError::KzgNotInitialized)?; - let kzg_verified_data_column = verify_kzg_for_data_column(data_column.clone(), &kzg) + let kzg = &chain.kzg; + let kzg_verified_data_column = verify_kzg_for_data_column(data_column.clone(), kzg) .map_err(GossipDataColumnError::InvalidKzgProof)?; chain @@ -406,6 +410,26 @@ pub fn validate_data_column_sidecar_for_gossip( }) } +/// Verify if the data column sidecar is valid. +fn verify_data_column_sidecar( + data_column: &DataColumnSidecar, + spec: &ChainSpec, +) -> Result<(), GossipDataColumnError> { + if data_column.index >= spec.number_of_columns as u64 { + return Err(GossipDataColumnError::InvalidColumnIndex(data_column.index)); + } + if data_column.kzg_commitments.is_empty() { + return Err(GossipDataColumnError::UnexpectedDataColumn); + } + if data_column.column.len() != data_column.kzg_commitments.len() + || data_column.column.len() != data_column.kzg_proofs.len() + { + return Err(GossipDataColumnError::InconsistentCommitmentsOrProofLength); + } + + Ok(()) +} + // Verify that this is the first column sidecar received for the tuple: // (block_header.slot, block_header.proposer_index, column_sidecar.index) fn verify_is_first_sidecar( @@ -623,3 +647,55 @@ fn verify_sidecar_not_from_future_slot( } Ok(()) } + +#[cfg(test)] +mod test { + use crate::data_column_verification::{ + validate_data_column_sidecar_for_gossip, GossipDataColumnError, + }; + use crate::test_utils::BeaconChainHarness; + use types::{DataColumnSidecar, EthSpec, ForkName, MainnetEthSpec}; + + type E = MainnetEthSpec; + + #[tokio::test] + async fn empty_data_column_sidecars_fails_validation() { + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + harness.advance_slot(); + + let slot = harness.get_current_slot(); + let state = harness.get_current_state(); + let ((block, _blobs_opt), _state) = harness + .make_block_with_modifier(state, slot, |block| { + *block.body_mut().blob_kzg_commitments_mut().unwrap() = vec![].into(); + }) + .await; + + let index = 0; + let column_sidecar = DataColumnSidecar:: { + index, + column: vec![].into(), + kzg_commitments: vec![].into(), + kzg_proofs: vec![].into(), + signed_block_header: block.signed_block_header(), + kzg_commitments_inclusion_proof: block + .message() + .body() + .kzg_commitments_merkle_proof() + .unwrap(), + }; + + let result = + validate_data_column_sidecar_for_gossip(column_sidecar.into(), index, &harness.chain); + assert!(matches!( + result.err(), + Some(GossipDataColumnError::UnexpectedDataColumn) + )); + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 994ac79af7e..a26d7553163 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -291,10 +291,10 @@ pub enum BlockProductionError { TokioJoin(JoinError), BeaconChain(BeaconChainError), InvalidPayloadFork, - TrustedSetupNotInitialized, InvalidBlockVariant(String), KzgError(kzg::Error), FailedToBuildBlobSidecars(String), + MissingExecutionRequests, } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 2252d5b9c92..276262085eb 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -10,6 +10,7 @@ use state_processing::per_block_processing::get_new_eth1_data; use std::cmp::Ordering; use std::collections::HashMap; use std::marker::PhantomData; +use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use store::{DBColumn, Error as StoreError, StoreItem}; use task_executor::TaskExecutor; @@ -284,7 +285,7 @@ where ssz_container: &SszEth1, config: Eth1Config, log: &Logger, - spec: ChainSpec, + spec: Arc, ) -> Result { let backend = Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, log.clone(), spec)?; @@ -355,7 +356,7 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { bytes: &[u8], config: Eth1Config, log: Logger, - spec: ChainSpec, + spec: Arc, ) -> Result; } @@ -413,7 +414,7 @@ impl Eth1ChainBackend for DummyEth1ChainBackend { _bytes: &[u8], _config: Eth1Config, _log: Logger, - _spec: ChainSpec, + _spec: Arc, ) -> Result { Ok(Self(PhantomData)) } @@ -441,7 +442,7 @@ impl CachingEth1Backend { /// Instantiates `self` with empty caches. /// /// Does not connect to the eth1 node or start any tasks to keep the cache updated. - pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result { + pub fn new(config: Eth1Config, log: Logger, spec: Arc) -> Result { Ok(Self { core: HttpService::new(config, log.clone(), spec) .map_err(|e| format!("Failed to create eth1 http service: {:?}", e))?, @@ -596,7 +597,7 @@ impl Eth1ChainBackend for CachingEth1Backend { bytes: &[u8], config: Eth1Config, log: Logger, - spec: ChainSpec, + spec: Arc, ) -> Result { let inner = HttpService::from_bytes(bytes, config, log.clone(), spec)?; Ok(Self { @@ -752,7 +753,8 @@ mod test { let log = test_logger(); Eth1Chain::new( - CachingEth1Backend::new(eth1_config, log, MainnetEthSpec::default_spec()).unwrap(), + CachingEth1Backend::new(eth1_config, log, Arc::new(MainnetEthSpec::default_spec())) + .unwrap(), ) } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b9b98bfbc00..f2420eea0d2 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -277,9 +277,7 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } .into()), None => { - if allow_optimistic_import == AllowOptimisticImport::Yes - && is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? - { + if allow_optimistic_import == AllowOptimisticImport::Yes { debug!( chain.log, "Optimistically importing merge transition block"; @@ -297,36 +295,6 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } } -/// Check to see if a block with the given parameters is valid to be imported optimistically. -pub async fn is_optimistic_candidate_block( - chain: &Arc>, - block_slot: Slot, - block_parent_root: Hash256, -) -> Result { - let current_slot = chain.slot()?; - let inner_chain = chain.clone(); - - // Use a blocking task to check if the block is an optimistic candidate. Interacting - // with the `fork_choice` lock in an async task can block the core executor. - chain - .spawn_blocking_handle( - move || { - inner_chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_candidate_block( - current_slot, - block_slot, - &block_parent_root, - &inner_chain.spec, - ) - }, - "validate_merge_block_optimistic_candidate", - ) - .await? - .map_err(BeaconChainError::from) -} - /// Validate the gossip block's execution_payload according to the checks described here: /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block pub fn validate_execution_payload_for_gossip( diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index 42a1aa1a0b4..4373164d62f 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -242,6 +242,7 @@ mod tests { use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; use execution_layer::EngineCapabilities; use slog::info; + use std::sync::Arc; use std::sync::LazyLock; use std::time::Duration; use types::{ChainSpec, Graffiti, Keypair, MinimalEthSpec, GRAFFITI_BYTES_LEN}; @@ -253,7 +254,7 @@ mod tests { fn get_harness( validator_count: usize, - spec: ChainSpec, + spec: Arc, chain_config: Option, ) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) @@ -272,7 +273,7 @@ mod tests { #[tokio::test] async fn check_graffiti_without_el_version_support() { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let harness = get_harness(VALIDATOR_COUNT, spec, None); // modify execution engine so it doesn't support engine_getClientVersionV1 method let mock_execution_layer = harness.mock_execution_layer.as_ref().unwrap(); @@ -313,7 +314,7 @@ mod tests { #[tokio::test] async fn check_graffiti_with_el_version_support() { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let harness = get_harness(VALIDATOR_COUNT, spec, None); let found_graffiti_bytes = harness.chain.graffiti_calculator.get_graffiti(None).await.0; @@ -355,7 +356,7 @@ mod tests { #[tokio::test] async fn check_graffiti_with_validator_specified_value() { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let harness = get_harness(VALIDATOR_COUNT, spec, None); let graffiti_str = "nice graffiti bro"; diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 1372211b175..a23b6ddc1e5 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -94,7 +94,9 @@ impl BeaconChain { // Blobs are stored per block, and data columns are each stored individually let n_blob_ops_per_block = if self.spec.is_peer_das_scheduled() { - self.data_availability_checker.get_custody_columns_count() + // TODO(das): `available_block includes all sampled columns, but we only need to store + // custody columns. To be clarified in spec PR. + self.data_availability_checker.get_sampling_column_count() } else { 1 }; diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index c2355e6f4f2..91c1098f81f 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -290,8 +290,7 @@ pub fn reconstruct_data_columns( mod test { use crate::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; use bls::Signature; - use eth2_network_config::TRUSTED_SETUP_BYTES; - use kzg::{Kzg, KzgCommitment, TrustedSetup}; + use kzg::{trusted_setup::get_trusted_setup, Kzg, KzgCommitment, TrustedSetup}; use types::{ beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, @@ -377,7 +376,7 @@ mod test { } fn get_kzg() -> Kzg { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg") diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 7bfb5b08beb..b89c00e0af4 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -75,9 +75,9 @@ pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{ - get_block_root, BlockError, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, - IntoExecutionPendingBlock, IntoGossipVerifiedBlockContents, PayloadVerificationOutcome, - PayloadVerificationStatus, + build_blob_data_column_sidecars, get_block_root, BlockError, ExecutionPayloadError, + ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, IntoGossipVerifiedBlock, + PayloadVerificationOutcome, PayloadVerificationStatus, }; pub use block_verification_types::AvailabilityPendingExecutedBlock; pub use block_verification_types::ExecutedBlock; diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index 87d0564177d..fe7afb78e0c 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,25 +1,19 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; -use eth2::types::light_client_update::CurrentSyncCommitteeProofLen; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; use slog::{debug, error, Logger}; use ssz::Decode; -use ssz_types::FixedVector; use std::num::NonZeroUsize; use std::sync::Arc; use store::DBColumn; use store::KeyValueStore; use tree_hash::TreeHash; -use types::light_client_update::{ - FinalizedRootProofLen, NextSyncCommitteeProofLen, CURRENT_SYNC_COMMITTEE_INDEX, - FINALIZED_ROOT_INDEX, NEXT_SYNC_COMMITTEE_INDEX, -}; use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, EthSpec, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, - LightClientUpdate, Slot, SyncAggregate, SyncCommittee, + LightClientUpdate, MerkleProof, Slot, SyncAggregate, SyncCommittee, }; /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the @@ -69,17 +63,14 @@ impl LightClientServerCache { block_post_state: &mut BeaconState, ) -> Result<(), BeaconChainError> { let _timer = metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES); - + let fork_name = spec.fork_name_at_slot::(block.slot()); // Only post-altair - if spec.fork_name_at_slot::(block.slot()) == ForkName::Base { - return Ok(()); + if fork_name.altair_enabled() { + // Persist in memory cache for a descendent block + let cached_data = LightClientCachedData::from_state(block_post_state)?; + self.prev_block_cache.lock().put(block_root, cached_data); } - // Persist in memory cache for a descendent block - - let cached_data = LightClientCachedData::from_state(block_post_state)?; - self.prev_block_cache.lock().put(block_root, cached_data); - Ok(()) } @@ -430,16 +421,12 @@ impl Default for LightClientServerCache { } } -type FinalityBranch = FixedVector; -type NextSyncCommitteeBranch = FixedVector; -type CurrentSyncCommitteeBranch = FixedVector; - #[derive(Clone)] struct LightClientCachedData { finalized_checkpoint: Checkpoint, - finality_branch: FinalityBranch, - next_sync_committee_branch: NextSyncCommitteeBranch, - current_sync_committee_branch: CurrentSyncCommitteeBranch, + finality_branch: MerkleProof, + next_sync_committee_branch: MerkleProof, + current_sync_committee_branch: MerkleProof, next_sync_committee: Arc>, current_sync_committee: Arc>, finalized_block_root: Hash256, @@ -447,17 +434,18 @@ struct LightClientCachedData { impl LightClientCachedData { fn from_state(state: &mut BeaconState) -> Result { + let (finality_branch, next_sync_committee_branch, current_sync_committee_branch) = ( + state.compute_finalized_root_proof()?, + state.compute_current_sync_committee_proof()?, + state.compute_next_sync_committee_proof()?, + ); Ok(Self { finalized_checkpoint: state.finalized_checkpoint(), - finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), + finality_branch, next_sync_committee: state.next_sync_committee()?.clone(), current_sync_committee: state.current_sync_committee()?.clone(), - next_sync_committee_branch: state - .compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)? - .into(), - current_sync_committee_branch: state - .compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)? - .into(), + next_sync_committee_branch, + current_sync_committee_branch, finalized_block_root: state.finalized_checkpoint().root, }) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index f15b46fc4bf..f73775d678f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -2,7 +2,7 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use bls::FixedBytesExtended; -pub use lighthouse_metrics::*; +pub use metrics::*; use slot_clock::SlotClock; use std::sync::LazyLock; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; @@ -1887,6 +1887,31 @@ pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> ) }); +pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "kzg_data_column_reconstruction_attempts", + "Count of times data column reconstruction has been attempted", + ) + }); + +pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "kzg_data_column_reconstruction_failures", + "Count of times data column reconstruction has failed", + ) + }); + +pub static KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "kzg_data_column_reconstruction_incomplete_total", + "Count of times data column reconstruction attempts did not result in an import", + &["reason"], + ) + }); + /* * light_client server metrics */ diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index 601241dd8ad..9b59a8f85b1 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -6,6 +6,7 @@ use crate::observed_block_producers::ProposalKey; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; +use std::sync::Arc; use types::{BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, Slot}; #[derive(Debug, PartialEq)] @@ -74,13 +75,13 @@ pub struct ObservedDataSidecars { finalized_slot: Slot, /// Stores all received data indices for a given `(ValidatorIndex, Slot)` tuple. items: HashMap>, - spec: ChainSpec, + spec: Arc, _phantom: PhantomData, } impl ObservedDataSidecars { /// Instantiates `Self` with `finalized_slot == 0`. - pub fn new(spec: ChainSpec) -> Self { + pub fn new(spec: Arc) -> Self { Self { finalized_slot: Slot::new(0), items: HashMap::new(), @@ -167,7 +168,7 @@ mod tests { #[test] fn pruning() { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let mut cache = ObservedDataSidecars::>::new(spec); assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); @@ -306,7 +307,7 @@ mod tests { #[test] fn simple_observations() { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let mut cache = ObservedDataSidecars::>::new(spec); // Slot 0, index 0 diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index f966fcb313b..5594881bf3b 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -18,7 +18,6 @@ use crate::{ }; use bls::get_withdrawal_credentials; use eth2::types::SignedBlockContentsTuple; -use eth2_network_config::TRUSTED_SETUP_BYTES; use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ auth::JwtKey, @@ -31,6 +30,7 @@ use execution_layer::{ use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; +use kzg::trusted_setup::get_trusted_setup; use kzg::{Kzg, TrustedSetup}; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; @@ -43,13 +43,15 @@ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slog::{o, Drain, Logger}; use slog_async::Async; -use slog_term::{FullFormat, TermDecorator}; +use slog_term::{FullFormat, PlainSyncDecorator, TermDecorator}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; +use std::fs::{File, OpenOptions}; +use std::io::BufWriter; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, LazyLock}; @@ -69,6 +71,8 @@ use types::{typenum::U4294967296, *}; pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // Environment variable to read if `fork_from_env` feature is enabled. pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; +// Environment variable to read if `ci_logger` feature is enabled. +pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR"; // Default target aggregators to set during testing, this ensures an aggregator at each slot. // @@ -76,22 +80,40 @@ pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // a different value. pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::MAX; -pub static KZG: LazyLock> = LazyLock::new(|| { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) +static KZG: LazyLock> = LazyLock::new(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); let kzg = Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg"); Arc::new(kzg) }); -pub static KZG_PEERDAS: LazyLock> = LazyLock::new(|| { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) +static KZG_PEERDAS: LazyLock> = LazyLock::new(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); let kzg = Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg"); Arc::new(kzg) }); +static KZG_NO_PRECOMP: LazyLock> = LazyLock::new(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + let kzg = Kzg::new_from_trusted_setup_no_precomp(trusted_setup).expect("should create kzg"); + Arc::new(kzg) +}); + +pub fn get_kzg(spec: &ChainSpec) -> Arc { + if spec.eip7594_fork_epoch.is_some() { + KZG_PEERDAS.clone() + } else if spec.deneb_fork_epoch.is_some() { + KZG.clone() + } else { + KZG_NO_PRECOMP.clone() + } +} + pub type BaseHarnessType = Witness, E, THotStore, TColdStore>; @@ -189,7 +211,7 @@ pub fn test_spec() -> ChainSpec { pub struct Builder { eth_spec_instance: T::EthSpec, - spec: Option, + spec: Option>, validator_keypairs: Option>, withdrawal_keypairs: Vec>, chain_config: Option, @@ -384,12 +406,12 @@ where self.spec_or_default(None) } - pub fn spec(self, spec: ChainSpec) -> Self { + pub fn spec(self, spec: Arc) -> Self { self.spec_or_default(Some(spec)) } - pub fn spec_or_default(mut self, spec: Option) -> Self { - self.spec = Some(spec.unwrap_or_else(test_spec::)); + pub fn spec_or_default(mut self, spec: Option>) -> Self { + self.spec = Some(spec.unwrap_or_else(|| Arc::new(test_spec::()))); self } @@ -529,12 +551,13 @@ where let validator_keypairs = self .validator_keypairs .expect("cannot build without validator keypairs"); - let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + + let kzg = get_kzg(&spec); let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); let chain_config = self.chain_config.unwrap_or_default(); - let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) + let mut builder = BeaconChainBuilder::new(self.eth_spec_instance, kzg.clone()) .logger(log.clone()) .custom_spec(spec.clone()) .store(self.store.expect("cannot build without store")) @@ -553,8 +576,7 @@ where log.clone(), 5, ))) - .validator_monitor_config(validator_monitor_config) - .kzg(kzg); + .validator_monitor_config(validator_monitor_config); builder = if let Some(mutator) = self.initial_mutator { mutator(builder) @@ -609,7 +631,7 @@ pub fn mock_execution_layer_from_parts( HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); - let kzg_opt = spec.deneb_fork_epoch.map(|_| KZG.clone()); + let kzg = get_kzg(spec); MockExecutionLayer::new( task_executor, @@ -619,7 +641,7 @@ pub fn mock_execution_layer_from_parts( prague_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec.clone(), - kzg_opt, + Some(kzg), ) } @@ -637,7 +659,7 @@ pub struct BeaconChainHarness { pub withdrawal_keypairs: Vec>, pub chain: Arc>, - pub spec: ChainSpec, + pub spec: Arc, pub shutdown_receiver: Arc>>, pub runtime: TestRuntime, @@ -867,7 +889,7 @@ where let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); // If we produce two blocks for the same slot, they hash up to the same value and - // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce + // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); @@ -929,7 +951,7 @@ where let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); // If we produce two blocks for the same slot, they hash up to the same value and - // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce + // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); @@ -2739,15 +2761,55 @@ pub struct MakeAttestationOptions { pub fork: Fork, } -pub fn build_log(level: slog::Level, enabled: bool) -> Logger { - let decorator = TermDecorator::new().build(); - let drain = FullFormat::new(decorator).build().fuse(); - let drain = Async::new(drain).build().fuse(); +pub enum LoggerType { + Test, + // The logs are output to files for each test. + CI, + // No logs will be printed. + Null, +} - if enabled { - Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - Logger::root(drain.filter(|_| false).fuse(), o!()) +fn ci_decorator() -> PlainSyncDecorator> { + let log_dir = std::env::var(CI_LOGGER_DIR_ENV_VAR).unwrap_or_else(|e| { + panic!("{CI_LOGGER_DIR_ENV_VAR} env var must be defined when using ci_logger: {e:?}"); + }); + let fork_name = std::env::var(FORK_NAME_ENV_VAR) + .map(|s| format!("{s}_")) + .unwrap_or_default(); + // The current test name can be got via the thread name. + let test_name = std::thread::current() + .name() + .unwrap() + .to_string() + // Colons are not allowed in files that are uploaded to GitHub Artifacts. + .replace("::", "_"); + let log_path = format!("/{log_dir}/{fork_name}{test_name}.log"); + let file = OpenOptions::new() + .create(true) + .append(true) + .open(log_path) + .unwrap(); + let file = BufWriter::new(file); + PlainSyncDecorator::new(file) +} + +pub fn build_log(level: slog::Level, logger_type: LoggerType) -> Logger { + match logger_type { + LoggerType::Test => { + let drain = FullFormat::new(TermDecorator::new().build()).build().fuse(); + let drain = Async::new(drain).chan_size(10_000).build().fuse(); + Logger::root(drain.filter_level(level).fuse(), o!()) + } + LoggerType::CI => { + let drain = FullFormat::new(ci_decorator()).build().fuse(); + let drain = Async::new(drain).chan_size(10_000).build().fuse(); + Logger::root(drain.filter_level(level).fuse(), o!()) + } + LoggerType::Null => { + let drain = FullFormat::new(TermDecorator::new().build()).build().fuse(); + let drain = Async::new(drain).build().fuse(); + Logger::root(drain.filter(|_| false).fuse(), o!()) + } } } @@ -2849,9 +2911,10 @@ pub fn generate_rand_block_and_data_columns( SignedBeaconBlock>, Vec>>, ) { + let kzg = get_kzg(spec); let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng); let blob: BlobsList = blobs.into_iter().map(|b| b.blob).collect::>().into(); - let data_columns = blobs_to_data_column_sidecars(&blob, &block, &KZG_PEERDAS, spec).unwrap(); + let data_columns = blobs_to_data_column_sidecars(&blob, &block, &kzg, spec).unwrap(); (block, data_columns) } diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 917c20bfa5a..877c297a3b7 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -232,7 +232,8 @@ mod test { fn get_store() -> BeaconStore { Arc::new( - HotColdDB::open_ephemeral(<_>::default(), E::default_spec(), test_logger()).unwrap(), + HotColdDB::open_ephemeral(<_>::default(), Arc::new(E::default_spec()), test_logger()) + .unwrap(), ) } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index e1f2cbb284f..0b121356b9d 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -86,7 +86,7 @@ async fn produces_attestations_from_attestation_simulator_service() { let expected_miss_metrics_count = 0; let expected_hit_metrics_count = num_blocks_produced - UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64; - lighthouse_metrics::gather().iter().for_each(|mf| { + metrics::gather().iter().for_each(|mf| { if hit_prometheus_metrics.contains(&mf.get_name()) { assert_eq!( mf.get_metric()[0].get_counter().get_value() as u64, diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 335884d57a9..e168cbb6f4d 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -18,7 +18,7 @@ use ssz_types::BitVector; use state_processing::{ per_block_processing::errors::AttestationValidationError, per_slot_processing, }; -use std::sync::LazyLock; +use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; use types::{ signed_aggregate_and_proof::SignedAggregateAndProofRefMut, @@ -47,6 +47,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness BeaconChainHarness (BeaconChainHarness>, ChainSpec) { +) -> (BeaconChainHarness>, Arc) { let mut spec = E::default_spec(); spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH as u64)); + let spec = Arc::new(spec); let validator_keypairs = KEYPAIRS[0..validator_count].to_vec(); let genesis_state = interop_genesis_state( @@ -357,22 +359,24 @@ impl GossipTester { } pub fn earliest_valid_attestation_slot(&self) -> Slot { - let offset = match self.harness.spec.fork_name_at_epoch(self.epoch()) { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - // Subtract an additional slot since the harness will be exactly on the start of the - // slot and the propagation tolerance will allow an extra slot. - E::slots_per_epoch() + 1 - } + let offset = if self + .harness + .spec + .fork_name_at_epoch(self.epoch()) + .deneb_enabled() + { // EIP-7045 - ForkName::Deneb | ForkName::Electra => { - let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64(); - if epoch_slot_offset != 0 { - E::slots_per_epoch() + epoch_slot_offset - } else { - // Here the propagation tolerance will cause the cutoff to be an entire epoch earlier - 2 * E::slots_per_epoch() - } + let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64(); + if epoch_slot_offset != 0 { + E::slots_per_epoch() + epoch_slot_offset + } else { + // Here the propagation tolerance will cause the cutoff to be an entire epoch earlier + 2 * E::slots_per_epoch() } + } else { + // Subtract an additional slot since the harness will be exactly on the start of the + // slot and the propagation tolerance will allow an extra slot. + E::slots_per_epoch() + 1 }; self.slot() diff --git a/beacon_node/beacon_chain/tests/bellatrix.rs b/beacon_node/beacon_chain/tests/bellatrix.rs index 027082c11c9..5bd3452623a 100644 --- a/beacon_node/beacon_chain/tests/bellatrix.rs +++ b/beacon_node/beacon_chain/tests/bellatrix.rs @@ -49,7 +49,7 @@ async fn merge_with_terminal_block_hash_override() { spec.terminal_block_hash = genesis_pow_block_hash; let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) + .spec(spec.into()) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() @@ -106,7 +106,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { let mut execution_payloads = vec![]; let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) + .spec(spec.into()) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index faa4d74a182..d239f5089aa 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -976,7 +976,7 @@ async fn block_gossip_verification() { harness .chain - .process_gossip_blob(gossip_verified) + .process_gossip_blob(gossip_verified, || Ok(())) .await .expect("should import valid gossip verified blob"); } @@ -1173,7 +1173,7 @@ async fn block_gossip_verification() { assert!( matches!( unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), - BlockError::BlockIsAlreadyKnown(_), + BlockError::DuplicateImportStatusUnknown(_), ), "should register any valid signature against the proposer, even if the block failed later verification" ); @@ -1201,7 +1201,7 @@ async fn block_gossip_verification() { .verify_block_for_gossip(block.clone()) .await .expect_err("should error when processing known block"), - BlockError::BlockIsAlreadyKnown(_) + BlockError::DuplicateImportStatusUnknown(_) ), "the second proposal by this validator should be rejected" ); @@ -1247,7 +1247,7 @@ async fn verify_block_for_gossip_slashing_detection() { .unwrap(); harness .chain - .process_gossip_blob(verified_blob) + .process_gossip_blob(verified_blob, || Ok(())) .await .unwrap(); } @@ -1354,7 +1354,7 @@ async fn add_base_block_to_altair_chain() { spec.altair_fork_epoch = Some(Epoch::new(1)); let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.into()) .keypairs(KEYPAIRS[..].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() @@ -1489,7 +1489,7 @@ async fn add_altair_block_to_base_chain() { spec.altair_fork_epoch = None; let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.into()) .keypairs(KEYPAIRS[..].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() @@ -1622,7 +1622,7 @@ async fn import_duplicate_block_unrealized_justification() { let spec = MainnetEthSpec::default_spec(); let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.into()) .keypairs(KEYPAIRS[..].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index c8fd2637f0c..ac97a95721d 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -39,7 +39,7 @@ async fn base_altair_bellatrix_capella() { spec.capella_fork_epoch = Some(capella_fork_epoch); let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) + .spec(spec.into()) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 1261e2d53ea..31e69f0524e 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -12,7 +12,7 @@ type E = MinimalEthSpec; /// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. #[tokio::test] async fn blob_sidecar_event_on_process_gossip_blob() { - let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec())); let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .deterministic_keypairs(8) @@ -25,7 +25,7 @@ async fn blob_sidecar_event_on_process_gossip_blob() { let mut blob_event_receiver = event_handler.subscribe_blob_sidecar(); // build and process a gossip verified blob - let kzg = harness.chain.kzg.as_ref().unwrap(); + let kzg = harness.chain.kzg.as_ref(); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let sidecar = BlobSidecar::random_valid(&mut rng, kzg) .map(Arc::new) @@ -35,7 +35,7 @@ async fn blob_sidecar_event_on_process_gossip_blob() { let _ = harness .chain - .process_gossip_blob(gossip_verified_blob) + .process_gossip_blob(gossip_verified_blob, || Ok(())) .await .unwrap(); @@ -46,7 +46,7 @@ async fn blob_sidecar_event_on_process_gossip_blob() { /// Verifies that a blob event is emitted when blobs are received via RPC. #[tokio::test] async fn blob_sidecar_event_on_process_rpc_blobs() { - let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec())); let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .deterministic_keypairs(8) @@ -59,7 +59,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { let mut blob_event_receiver = event_handler.subscribe_blob_sidecar(); // build and process multiple rpc blobs - let kzg = harness.chain.kzg.as_ref().unwrap(); + let kzg = harness.chain.kzg.as_ref(); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let mut blob_1 = BlobSidecar::random_valid(&mut rng, kzg).unwrap(); diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 432c5506705..44fb298d6c6 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -30,7 +30,7 @@ type TestHarness = BeaconChainHarness>; type HotColdDB = store::HotColdDB, BeaconNodeBackend>; fn get_store(db_path: &TempDir) -> Arc { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); let blobs_path = db_path.path().join("blobs_db"); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index b455c3bace4..1325875a275 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,20 +1,14 @@ #![cfg(not(debug_assertions))] -use beacon_chain::otb_verification_service::{ - load_optimistic_transition_blocks, validate_optimistic_transition_blocks, - OptimisticTransitionBlock, -}; use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, - test_utils::ExecutionBlockGenerator, ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; @@ -57,7 +51,7 @@ impl InvalidPayloadRig { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.into()) .chain_config(ChainConfig { reconstruct_historic_states: true, ..ChainConfig::default() @@ -1270,552 +1264,6 @@ async fn attesting_to_optimistic_head() { get_aggregated_by_slot_and_root().unwrap(); } -/// A helper struct to build out a chain of some configurable length which undergoes the merge -/// transition. -struct OptimisticTransitionSetup { - blocks: Vec>>, - execution_block_generator: ExecutionBlockGenerator, -} - -impl OptimisticTransitionSetup { - async fn new(num_blocks: usize, ttd: u64) -> Self { - let mut spec = E::default_spec(); - spec.terminal_total_difficulty = Uint256::from(ttd); - let mut rig = InvalidPayloadRig::new_with_spec(spec).enable_attestations(); - rig.move_to_terminal_block(); - - let mut blocks = Vec::with_capacity(num_blocks); - for _ in 0..num_blocks { - let root = rig.import_block(Payload::Valid).await; - let block = rig.harness.chain.get_block(&root).await.unwrap().unwrap(); - blocks.push(Arc::new(block)); - } - - let execution_block_generator = rig - .harness - .mock_execution_layer - .as_ref() - .unwrap() - .server - .execution_block_generator() - .clone(); - - Self { - blocks, - execution_block_generator, - } - } -} - -/// Build a chain which has optimistically imported a transition block. -/// -/// The initial chain will be built with respect to `block_ttd`, whilst the `rig` which imports the -/// chain will operate with respect to `rig_ttd`. This allows for testing mismatched TTDs. -async fn build_optimistic_chain( - block_ttd: u64, - rig_ttd: u64, - num_blocks: usize, -) -> InvalidPayloadRig { - let OptimisticTransitionSetup { - blocks, - execution_block_generator, - } = OptimisticTransitionSetup::new(num_blocks, block_ttd).await; - // Build a brand-new testing harness. We will apply the blocks from the previous harness to - // this one. - let mut spec = E::default_spec(); - spec.terminal_total_difficulty = Uint256::from(rig_ttd); - let rig = InvalidPayloadRig::new_with_spec(spec); - - let spec = &rig.harness.chain.spec; - let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); - - // Ensure all the execution blocks from the first rig are available in the second rig. - *mock_execution_layer.server.execution_block_generator() = execution_block_generator; - - // Make the execution layer respond `SYNCING` to all `newPayload` requests. - mock_execution_layer - .server - .all_payloads_syncing_on_new_payload(true); - // Make the execution layer respond `SYNCING` to all `forkchoiceUpdated` requests. - mock_execution_layer - .server - .all_payloads_syncing_on_forkchoice_updated(); - // Make the execution layer respond `None` to all `getBlockByHash` requests. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_none(); - - let current_slot = std::cmp::max( - blocks[0].slot() + spec.safe_slots_to_import_optimistically, - num_blocks.into(), - ); - rig.harness.set_current_slot(current_slot); - - for block in blocks { - rig.harness - .chain - .process_block( - block.canonical_root(), - block, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ) - .await - .unwrap(); - } - - rig.harness.chain.recompute_head_at_current_slot().await; - - // Make the execution layer respond normally to `getBlockByHash` requests. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - // Perform some sanity checks to ensure that the transition happened exactly where we expected. - let pre_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(0), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let pre_transition_block = rig - .harness - .chain - .get_block(&pre_transition_block_root) - .await - .unwrap() - .unwrap(); - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - assert_eq!( - pre_transition_block_root, - post_transition_block.parent_root(), - "the blocks form a single chain" - ); - assert!( - pre_transition_block - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "the block *has not* undergone the merge transition" - ); - assert!( - !post_transition_block - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "the block *has* undergone the merge transition" - ); - - // Assert that the transition block was optimistically imported. - // - // Note: we're using the "fallback" check for optimistic status, so if the block was - // pre-finality then we'll just use the optimistic status of the finalized block. - assert!( - rig.harness - .chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_or_invalid_block(&post_transition_block_root) - .unwrap(), - "the transition block should be imported optimistically" - ); - - // Get the mock execution layer to respond to `getBlockByHash` requests normally again. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - rig -} - -#[tokio::test] -async fn optimistic_transition_block_valid_unfinalized() { - let ttd = 42; - let num_blocks = 16_usize; - let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - valid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should validate fine"); - // now that the transition block has been validated, it should have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert!( - otbs.is_empty(), - "The valid optimistic transition block should have been removed from the database", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_valid_finalized() { - let ttd = 42; - let num_blocks = 130_usize; - let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - > post_transition_block.slot(), - "the transition block should be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - valid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should validate fine"); - // now that the transition block has been validated, it should have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert!( - otbs.is_empty(), - "The valid optimistic transition block should have been removed from the database", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_unfinalized() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 22_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It shouldn't be known as invalid yet - assert!(!rig - .execution_status(post_transition_block_root) - .is_invalid()); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It should be marked invalid now - assert!(rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 22_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It shouldn't be known as invalid yet - assert!(!rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // Make the execution layer respond `None` to all `getBlockByHash` requests to simulate a - // syncing EE. - let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_none(); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - - // It should still be marked as optimistic. - assert!(rig - .execution_status(post_transition_block_root) - .is_strictly_optimistic()); - - // the optimistic merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The optimistic merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // Allow the EL to respond to `getBlockByHash`, as if it has finished syncing. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It should be marked invalid now - assert!(rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_finalized() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 130_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - > post_transition_block.slot(), - "the transition block should be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered yet. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should invalidate merge transition block and shutdown the client"); - - // The beacon chain should have triggered a shutdown. - assert_eq!( - rig.harness.shutdown_reasons(), - vec![ShutdownReason::Failure( - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON - )] - ); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - /// Helper for running tests where we generate a chain with an invalid head and then a /// `fork_block` to recover it. struct InvalidHeadSetup { diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 323f4f38eb2..be7045c54a9 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -32,7 +32,7 @@ fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { }; let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) + .spec(Arc::new(spec)) .keypairs(KEYPAIRS.to_vec()) .fresh_ephemeral_store() .chain_config(chain_config) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 7b524b5a3f9..facd2e3546e 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -7,8 +7,8 @@ use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::SyncCommitteeStrategy; use beacon_chain::test_utils::{ - mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, - BlockStrategy, DiskHarnessType, KZG, + get_kzg, mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, + BlockStrategy, DiskHarnessType, }; use beacon_chain::{ data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError, @@ -69,7 +69,7 @@ fn get_store_generic( &blobs_path, |_, _, _| Ok(()), config, - spec, + spec.into(), log, ) .expect("disk store should initialize") @@ -164,7 +164,7 @@ async fn light_client_bootstrap_test() { .unwrap() .unwrap(); - let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + let kzg = get_kzg(&spec); let mock = mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); @@ -180,9 +180,9 @@ async fn light_client_bootstrap_test() { let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) .store(store.clone()) - .custom_spec(test_spec::()) + .custom_spec(test_spec::().into()) .task_executor(harness.chain.task_executor.clone()) .logger(log.clone()) .weak_subjectivity_state( @@ -203,17 +203,9 @@ async fn light_client_bootstrap_test() { 1, ))) .execution_layer(Some(mock.el)) - .kzg(kzg) .build() .expect("should build"); - let current_state = harness.get_current_state(); - - if ForkName::Electra == current_state.fork_name_unchecked() { - // TODO(electra) fix beacon state `compute_merkle_proof` - return; - } - let finalized_checkpoint = beacon_chain .canonical_head .cached_head() @@ -299,7 +291,7 @@ async fn light_client_updates_test() { .unwrap() .unwrap(); - let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + let kzg = get_kzg(&spec); let mock = mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); @@ -324,9 +316,9 @@ async fn light_client_updates_test() { let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) .store(store.clone()) - .custom_spec(test_spec::()) + .custom_spec(test_spec::().into()) .task_executor(harness.chain.task_executor.clone()) .logger(log.clone()) .weak_subjectivity_state( @@ -347,7 +339,6 @@ async fn light_client_updates_test() { 1, ))) .execution_layer(Some(mock.el)) - .kzg(kzg) .build() .expect("should build"); @@ -355,11 +346,6 @@ async fn light_client_updates_test() { let current_state = harness.get_current_state(); - if ForkName::Electra == current_state.fork_name_unchecked() { - // TODO(electra) fix beacon state `compute_merkle_proof` - return; - } - // calculate the sync period from the previous slot let sync_period = (current_state.slot() - Slot::new(1)) .epoch(E::slots_per_epoch()) @@ -2519,7 +2505,7 @@ async fn pruning_test( } #[tokio::test] -async fn garbage_collect_temp_states_from_failed_block() { +async fn garbage_collect_temp_states_from_failed_block_on_startup() { let db_path = tempdir().unwrap(); // Wrap these functions to ensure the variables are dropped before we try to open another @@ -2576,6 +2562,61 @@ async fn garbage_collect_temp_states_from_failed_block() { assert_eq!(store.iter_temporary_state_roots().unwrap().count(), 0); } +#[tokio::test] +async fn garbage_collect_temp_states_from_failed_block_on_finalization() { + let db_path = tempdir().unwrap(); + + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let slots_per_epoch = E::slots_per_epoch(); + + let genesis_state = harness.get_current_state(); + let block_slot = Slot::new(2 * slots_per_epoch); + let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; + + let (mut block, _) = (*signed_block).clone().deconstruct(); + + // Mutate the block to make it invalid, and re-sign it. + *block.state_root_mut() = Hash256::repeat_byte(0xff); + let proposer_index = block.proposer_index() as usize; + let block = Arc::new(block.sign( + &harness.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &harness.spec, + )); + + // The block should be rejected, but should store a bunch of temporary states. + harness.set_current_slot(block_slot); + harness + .process_block_result((block, None)) + .await + .unwrap_err(); + + assert_eq!( + store.iter_temporary_state_roots().count(), + block_slot.as_usize() - 1 + ); + + // Finalize the chain without the block, which should result in pruning of all temporary states. + let blocks_required_to_finalize = 3 * slots_per_epoch; + harness.advance_slot(); + harness + .extend_chain( + blocks_required_to_finalize as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Check that the finalization migration ran. + assert_ne!(store.get_split_slot(), 0); + + // Check that temporary states have been pruned. + assert_eq!(store.iter_temporary_state_roots().count(), 0); +} + #[tokio::test] async fn weak_subjectivity_sync_easy() { let num_initial_slots = E::slots_per_epoch() * 11; @@ -2683,7 +2724,8 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { let store = get_store(&temp2); let spec = test_spec::(); let seconds_per_slot = spec.seconds_per_slot; - let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + + let kzg = get_kzg(&spec); let mock = mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); @@ -2697,9 +2739,9 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { ); slot_clock.set_slot(harness.get_current_slot().as_u64()); - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) .store(store.clone()) - .custom_spec(test_spec::()) + .custom_spec(test_spec::().into()) .task_executor(harness.chain.task_executor.clone()) .logger(log.clone()) .weak_subjectivity_state( @@ -2720,7 +2762,6 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { 1, ))) .execution_layer(Some(mock.el)) - .kzg(kzg) .build() .expect("should build"); @@ -3167,7 +3208,7 @@ async fn revert_minority_fork_on_resume() { let db_path1 = tempdir().unwrap(); let store1 = get_store_generic(&db_path1, StoreConfig::default(), spec1.clone()); let harness1 = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec1) + .spec(spec1.clone().into()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store1) .mock_execution_layer() @@ -3177,7 +3218,7 @@ async fn revert_minority_fork_on_resume() { let db_path2 = tempdir().unwrap(); let store2 = get_store_generic(&db_path2, StoreConfig::default(), spec2.clone()); let harness2 = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec2.clone()) + .spec(spec2.clone().into()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store2) .mock_execution_layer() @@ -3273,7 +3314,7 @@ async fn revert_minority_fork_on_resume() { let resume_store = get_store_generic(&db_path1, StoreConfig::default(), spec2.clone()); let resumed_harness = TestHarness::builder(MinimalEthSpec) - .spec(spec2) + .spec(spec2.clone().into()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .resumed_disk_store(resume_store) .override_store_mutator(Box::new(move |mut builder| { diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index f8da2e8da1c..d1b3139d42c 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -30,7 +30,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness= 1 that +/// seems reasonable. +const MIN_QUEUE_LEN: usize = 128; + /// Maximum number of queued items that will be stored before dropping them pub struct BeaconProcessorQueueLengths { aggregate_queue: usize, @@ -130,6 +135,7 @@ pub struct BeaconProcessorQueueLengths { lc_bootstrap_queue: usize, lc_optimistic_update_queue: usize, lc_finality_update_queue: usize, + lc_update_range_queue: usize, api_request_p0_queue: usize, api_request_p1_queue: usize, } @@ -155,9 +161,15 @@ impl BeaconProcessorQueueLengths { aggregate_queue: 4096, unknown_block_aggregate_queue: 1024, // Capacity for a full slot's worth of attestations if subscribed to all subnets - attestation_queue: active_validator_count / slots_per_epoch, + attestation_queue: std::cmp::max( + active_validator_count / slots_per_epoch, + MIN_QUEUE_LEN, + ), // Capacity for a full slot's worth of attestations if subscribed to all subnets - unknown_block_attestation_queue: active_validator_count / slots_per_epoch, + unknown_block_attestation_queue: std::cmp::max( + active_validator_count / slots_per_epoch, + MIN_QUEUE_LEN, + ), sync_message_queue: 2048, sync_contribution_queue: 1024, gossip_voluntary_exit_queue: 4096, @@ -191,6 +203,7 @@ impl BeaconProcessorQueueLengths { lc_bootstrap_queue: 1024, lc_optimistic_update_queue: 512, lc_finality_update_queue: 512, + lc_update_range_queue: 512, api_request_p0_queue: 1024, api_request_p1_queue: 1024, }) @@ -611,6 +624,7 @@ pub enum Work { LightClientBootstrapRequest(BlockingFn), LightClientOptimisticUpdateRequest(BlockingFn), LightClientFinalityUpdateRequest(BlockingFn), + LightClientUpdatesByRangeRequest(BlockingFn), ApiRequestP0(BlockingOrAsync), ApiRequestP1(BlockingOrAsync), } @@ -662,6 +676,7 @@ pub enum WorkType { LightClientBootstrapRequest, LightClientOptimisticUpdateRequest, LightClientFinalityUpdateRequest, + LightClientUpdatesByRangeRequest, ApiRequestP0, ApiRequestP1, } @@ -712,6 +727,7 @@ impl Work { WorkType::LightClientOptimisticUpdateRequest } Work::LightClientFinalityUpdateRequest(_) => WorkType::LightClientFinalityUpdateRequest, + Work::LightClientUpdatesByRangeRequest(_) => WorkType::LightClientUpdatesByRangeRequest, Work::UnknownBlockAttestation { .. } => WorkType::UnknownBlockAttestation, Work::UnknownBlockAggregate { .. } => WorkType::UnknownBlockAggregate, Work::UnknownBlockSamplingRequest { .. } => WorkType::UnknownBlockSamplingRequest, @@ -891,6 +907,7 @@ impl BeaconProcessor { let mut lc_optimistic_update_queue = FifoQueue::new(queue_lengths.lc_optimistic_update_queue); let mut lc_finality_update_queue = FifoQueue::new(queue_lengths.lc_finality_update_queue); + let mut lc_update_range_queue = FifoQueue::new(queue_lengths.lc_update_range_queue); let mut api_request_p0_queue = FifoQueue::new(queue_lengths.api_request_p0_queue); let mut api_request_p1_queue = FifoQueue::new(queue_lengths.api_request_p1_queue); @@ -1368,6 +1385,9 @@ impl BeaconProcessor { Work::LightClientFinalityUpdateRequest { .. } => { lc_finality_update_queue.push(work, work_id, &self.log) } + Work::LightClientUpdatesByRangeRequest { .. } => { + lc_update_range_queue.push(work, work_id, &self.log) + } Work::UnknownBlockAttestation { .. } => { unknown_block_attestation_queue.push(work) } @@ -1459,6 +1479,7 @@ impl BeaconProcessor { WorkType::LightClientFinalityUpdateRequest => { lc_finality_update_queue.len() } + WorkType::LightClientUpdatesByRangeRequest => lc_update_range_queue.len(), WorkType::ApiRequestP0 => api_request_p0_queue.len(), WorkType::ApiRequestP1 => api_request_p1_queue.len(), }; @@ -1611,7 +1632,8 @@ impl BeaconProcessor { | Work::GossipBlsToExecutionChange(process_fn) | Work::LightClientBootstrapRequest(process_fn) | Work::LightClientOptimisticUpdateRequest(process_fn) - | Work::LightClientFinalityUpdateRequest(process_fn) => { + | Work::LightClientFinalityUpdateRequest(process_fn) + | Work::LightClientUpdatesByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } }; @@ -1686,3 +1708,21 @@ impl Drop for SendOnDrop { } } } + +#[cfg(test)] +mod tests { + use super::*; + use types::{BeaconState, ChainSpec, Eth1Data, ForkName, MainnetEthSpec}; + + #[test] + fn min_queue_len() { + // State with no validators. + let spec = ForkName::latest().make_genesis_spec(ChainSpec::mainnet()); + let genesis_time = 0; + let state = BeaconState::::new(genesis_time, Eth1Data::default(), &spec); + assert_eq!(state.validators().len(), 0); + let queue_lengths = BeaconProcessorQueueLengths::from_state(&state, &spec).unwrap(); + assert_eq!(queue_lengths.attestation_queue, MIN_QUEUE_LEN); + assert_eq!(queue_lengths.unknown_block_attestation_queue, MIN_QUEUE_LEN); + } +} diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index 0a7bdba18d1..fc8c712f4e7 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 88ae650e729..21a6e42cc50 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -20,6 +20,7 @@ types = { workspace = true } eth2_config = { workspace = true } slot_clock = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } error-chain = { workspace = true } slog = { workspace = true } tokio = { workspace = true } @@ -27,11 +28,12 @@ futures = { workspace = true } dirs = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } +kzg = { workspace = true } sensitive_url = { workspace = true } genesis = { workspace = true } task_executor = { workspace = true } environment = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } time = "0.3.5" directory = { workspace = true } http_api = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 760c683d07a..688d3a35839 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -77,7 +77,7 @@ pub struct ClientBuilder { #[allow(clippy::type_complexity)] store: Option>>, runtime_context: Option>, - chain_spec: Option, + chain_spec: Option>, beacon_chain_builder: Option>, beacon_chain: Option>>, eth1_service: Option, @@ -138,7 +138,7 @@ where } /// Specifies the `ChainSpec`. - pub fn chain_spec(mut self, spec: ChainSpec) -> Self { + pub fn chain_spec(mut self, spec: Arc) -> Self { self.chain_spec = Some(spec); self } @@ -196,7 +196,17 @@ where None }; - let builder = BeaconChainBuilder::new(eth_spec_instance) + let kzg_err_msg = |e| format!("Failed to load trusted setup: {:?}", e); + let trusted_setup = config.trusted_setup.clone(); + let kzg = if spec.is_peer_das_scheduled() { + Kzg::new_from_trusted_setup_das_enabled(trusted_setup).map_err(kzg_err_msg)? + } else if spec.deneb_fork_epoch.is_some() { + Kzg::new_from_trusted_setup(trusted_setup).map_err(kzg_err_msg)? + } else { + Kzg::new_from_trusted_setup_no_precomp(trusted_setup).map_err(kzg_err_msg)? + }; + + let builder = BeaconChainBuilder::new(eth_spec_instance, Arc::new(kzg)) .logger(context.log().clone()) .store(store) .task_executor(context.executor.clone()) @@ -595,10 +605,9 @@ where }; let genesis_state = genesis_service - .wait_for_genesis_state( - Duration::from_millis(ETH1_GENESIS_UPDATE_INTERVAL_MILLIS), - context.eth2_config().spec.clone(), - ) + .wait_for_genesis_state(Duration::from_millis( + ETH1_GENESIS_UPDATE_INTERVAL_MILLIS, + )) .await?; let _ = exit_tx.send(()); @@ -624,20 +633,6 @@ where ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?, }; - let beacon_chain_builder = if let Some(trusted_setup) = config.trusted_setup { - let kzg_err_msg = |e| format!("Failed to load trusted setup: {:?}", e); - - let kzg = if spec.is_peer_das_scheduled() { - Kzg::new_from_trusted_setup_das_enabled(trusted_setup).map_err(kzg_err_msg)? - } else { - Kzg::new_from_trusted_setup(trusted_setup).map_err(kzg_err_msg)? - }; - - beacon_chain_builder.kzg(Some(Arc::new(kzg))) - } else { - beacon_chain_builder - }; - if config.sync_eth1_chain { self.eth1_service = eth1_service_option; } @@ -646,7 +641,7 @@ where } /// Starts the networking stack. - pub async fn network(mut self, config: &NetworkConfig) -> Result { + pub async fn network(mut self, config: Arc) -> Result { let beacon_chain = self .beacon_chain .clone() diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 16000374b22..a25216ff3ec 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -4,6 +4,7 @@ use beacon_chain::TrustedSetup; use beacon_processor::BeaconProcessorConfig; use directory::DEFAULT_ROOT_DIR; use environment::LoggerConfig; +use kzg::trusted_setup::get_trusted_setup; use network::NetworkConfig; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; @@ -75,7 +76,7 @@ pub struct Config { pub chain: beacon_chain::ChainConfig, pub eth1: eth1::Config, pub execution_layer: Option, - pub trusted_setup: Option, + pub trusted_setup: TrustedSetup, pub http_api: http_api::Config, pub http_metrics: http_metrics::Config, pub monitoring_api: Option, @@ -89,6 +90,9 @@ pub struct Config { impl Default for Config { fn default() -> Self { + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) + .expect("Unable to read trusted setup file"); + Self { data_dir: PathBuf::from(DEFAULT_ROOT_DIR), db_name: "chain_db".to_string(), @@ -103,7 +107,7 @@ impl Default for Config { sync_eth1_chain: false, eth1: <_>::default(), execution_layer: None, - trusted_setup: None, + trusted_setup, beacon_graffiti: GraffitiOrigin::default(), http_api: <_>::default(), http_metrics: <_>::default(), diff --git a/beacon_node/client/src/metrics.rs b/beacon_node/client/src/metrics.rs index ebc4fe70a71..e5c07baddc2 100644 --- a/beacon_node/client/src/metrics.rs +++ b/beacon_node/client/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static SYNC_SLOTS_PER_SECOND: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 632188014eb..839d296c768 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -436,7 +436,7 @@ async fn capella_readiness_logging( .snapshot .beacon_state .fork_name_unchecked() - >= ForkName::Capella; + .capella_enabled(); let has_execution_layer = beacon_chain.execution_layer.is_some(); @@ -496,7 +496,7 @@ async fn deneb_readiness_logging( .snapshot .beacon_state .fork_name_unchecked() - >= ForkName::Deneb; + .deneb_enabled(); let has_execution_layer = beacon_chain.execution_layer.is_some(); diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 4910cfd2e1b..50400a77e06 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -25,7 +25,7 @@ logging = { workspace = true } superstruct = { workspace = true } tokio = { workspace = true } state_processing = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } task_executor = { workspace = true } eth2 = { workspace = true } sensitive_url = { workspace = true } diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index 7387642bf4c..1f45346256b 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -9,6 +9,7 @@ use parking_lot::RwLock; use ssz::four_byte_option_impl; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use std::sync::Arc; use superstruct::superstruct; use types::{ChainSpec, DepositTreeSnapshot, Eth1Data}; @@ -51,7 +52,7 @@ pub struct Inner { pub to_finalize: RwLock>, pub config: RwLock, pub remote_head_block: RwLock>, - pub spec: ChainSpec, + pub spec: Arc, } impl Inner { @@ -71,7 +72,7 @@ impl Inner { } /// Recover `Inner` given byte representation of eth1 deposit and block caches. - pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result { + pub fn from_bytes(bytes: &[u8], config: Config, spec: Arc) -> Result { SszEth1Cache::from_ssz_bytes(bytes) .map_err(|e| format!("Ssz decoding error: {:?}", e))? .to_inner(config, spec) @@ -109,7 +110,7 @@ impl SszEth1Cache { } } - pub fn to_inner(&self, config: Config, spec: ChainSpec) -> Result { + pub fn to_inner(&self, config: Config, spec: Arc) -> Result { Ok(Inner { block_cache: RwLock::new(self.block_cache.clone()), deposit_cache: RwLock::new(DepositUpdater { diff --git a/beacon_node/eth1/src/metrics.rs b/beacon_node/eth1/src/metrics.rs index 9a11e7a6920..1df4ba0df9a 100644 --- a/beacon_node/eth1/src/metrics.rs +++ b/beacon_node/eth1/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index e5d60fac49c..71ab98a6a20 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -397,7 +397,7 @@ pub struct Service { impl Service { /// Creates a new service. Does not attempt to connect to the eth1 node. - pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Result { + pub fn new(config: Config, log: Logger, spec: Arc) -> Result { Ok(Self { inner: Arc::new(Inner { block_cache: <_>::default(), @@ -414,6 +414,10 @@ impl Service { }) } + pub fn chain_spec(&self) -> &Arc { + &self.inner.spec + } + pub fn client(&self) -> &HttpJsonRpc { &self.inner.endpoint } @@ -422,7 +426,7 @@ impl Service { pub fn from_deposit_snapshot( config: Config, log: Logger, - spec: ChainSpec, + spec: Arc, deposit_snapshot: &DepositTreeSnapshot, ) -> Result { let deposit_cache = @@ -464,7 +468,7 @@ impl Service { bytes: &[u8], config: Config, log: Logger, - spec: ChainSpec, + spec: Arc, ) -> Result { let inner = Inner::from_bytes(bytes, config, spec)?; Ok(Self { @@ -545,10 +549,11 @@ impl Service { /// Returns the number of deposits with valid signatures that have been observed. pub fn get_valid_signature_count(&self) -> Option { + let highest_safe_block = self.highest_safe_block()?; self.deposits() .read() .cache - .get_valid_signature_count(self.highest_safe_block()?) + .get_valid_signature_count(highest_safe_block) } /// Returns the number of deposits with valid signatures that have been observed, without diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 3ad9b34381a..e442ce48630 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -8,6 +8,7 @@ use logging::test_logger; use merkle_proof::verify_merkle_proof; use sensitive_url::SensitiveUrl; use std::ops::Range; +use std::sync::Arc; use std::time::Duration; use tree_hash::TreeHash; use types::{ @@ -122,8 +123,12 @@ mod eth1_cache { }; let cache_follow_distance = config.cache_follow_distance(); - let service = - Service::new(config, log.clone(), MainnetEthSpec::default_spec()).unwrap(); + let service = Service::new( + config, + log.clone(), + Arc::new(MainnetEthSpec::default_spec()), + ) + .unwrap(); // Create some blocks and then consume them, performing the test `rounds` times. for round in 0..2 { @@ -204,7 +209,7 @@ mod eth1_cache { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -259,7 +264,7 @@ mod eth1_cache { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -310,7 +315,7 @@ mod eth1_cache { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -365,7 +370,7 @@ mod deposit_tree { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -447,7 +452,7 @@ mod deposit_tree { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -694,7 +699,7 @@ mod fast { let anvil_client = eth1.json_rpc_client(); let now = get_block_number(&anvil_client).await; - let spec = MainnetEthSpec::default_spec(); + let spec = Arc::new(MainnetEthSpec::default_spec()); let service = Service::new( Config { endpoint: Eth1Endpoint::NoAuth( @@ -788,8 +793,12 @@ mod persist { block_cache_truncation: None, ..Config::default() }; - let service = - Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec()).unwrap(); + let service = Service::new( + config.clone(), + log.clone(), + Arc::new(MainnetEthSpec::default_spec()), + ) + .unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { @@ -828,9 +837,13 @@ mod persist { // Drop service and recover from bytes drop(service); - let recovered_service = - Service::from_bytes(ð1_bytes, config, log, MainnetEthSpec::default_spec()) - .unwrap(); + let recovered_service = Service::from_bytes( + ð1_bytes, + config, + log, + Arc::new(MainnetEthSpec::default_spec()), + ) + .unwrap(); assert_eq!( recovered_service.block_cache_len(), block_count, diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 93d8086149d..0ef101fae7c 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -35,7 +35,7 @@ slot_clock = { workspace = true } tempfile = { workspace = true } rand = { workspace = true } zeroize = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } ethers-core = { workspace = true } builder_client = { path = "../builder_client" } fork_choice = { workspace = true } @@ -52,3 +52,4 @@ alloy-rlp = { workspace = true } alloy-consensus = { workspace = true } lighthouse_version = { workspace = true } fixed_bytes = { workspace = true } +sha2 = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 8ba8ecfffbc..1c23c8ba665 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -2,8 +2,7 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, }; @@ -18,7 +17,6 @@ use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; use superstruct::superstruct; -use types::execution_payload::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; pub use types::{ Address, BeaconBlockRef, ConsolidationRequest, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, @@ -26,7 +24,7 @@ pub use types::{ }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, KzgProofs, + ExecutionPayloadElectra, ExecutionRequests, KzgProofs, }; use types::{Graffiti, GRAFFITI_BYTES_LEN}; @@ -288,6 +286,8 @@ pub struct GetPayloadResponse { pub blobs_bundle: BlobsBundle, #[superstruct(only(Deneb, Electra), partial_getter(copy))] pub should_override_builder: bool, + #[superstruct(only(Electra))] + pub requests: ExecutionRequests, } impl GetPayloadResponse { @@ -321,7 +321,12 @@ impl From> for ExecutionPayload { } impl From> - for (ExecutionPayload, Uint256, Option>) + for ( + ExecutionPayload, + Uint256, + Option>, + Option>, + ) { fn from(response: GetPayloadResponse) -> Self { match response { @@ -329,21 +334,25 @@ impl From> ExecutionPayload::Bellatrix(inner.execution_payload), inner.block_value, None, + None, ), GetPayloadResponse::Capella(inner) => ( ExecutionPayload::Capella(inner.execution_payload), inner.block_value, None, + None, ), GetPayloadResponse::Deneb(inner) => ( ExecutionPayload::Deneb(inner.execution_payload), inner.block_value, Some(inner.blobs_bundle), + None, ), GetPayloadResponse::Electra(inner) => ( ExecutionPayload::Electra(inner.execution_payload), inner.block_value, Some(inner.blobs_bundle), + Some(inner.requests), ), } } @@ -360,106 +369,25 @@ impl GetPayloadResponse { } } -#[superstruct( - variants(V1, V2), - variant_attributes(derive(Clone, Debug),), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") -)] #[derive(Clone, Debug)] -pub struct ExecutionPayloadBody { +pub struct ExecutionPayloadBodyV1 { pub transactions: Transactions, pub withdrawals: Option>, - #[superstruct(only(V2))] - pub deposit_requests: Option>, - #[superstruct(only(V2))] - pub withdrawal_requests: Option>, - #[superstruct(only(V2))] - pub consolidation_requests: Option>, } -impl ExecutionPayloadBody { - #[allow(clippy::type_complexity)] - pub fn deconstruct( - self, - ) -> ( - Transactions, - Option>, - Option>, - Option>, - Option>, - ) { - match self { - ExecutionPayloadBody::V1(body) => { - (body.transactions, body.withdrawals, None, None, None) - } - ExecutionPayloadBody::V2(body) => ( - body.transactions, - body.withdrawals, - body.deposit_requests, - body.withdrawal_requests, - body.consolidation_requests, - ), - } - } +impl ExecutionPayloadBodyV1 { pub fn to_payload( self, header: ExecutionPayloadHeader, ) -> Result, String> { - let header_fork = header.fork_name_unchecked(); - match &self { - Self::V1(_) => { - if header_fork.electra_enabled() { + match header { + ExecutionPayloadHeader::Bellatrix(header) => { + if self.withdrawals.is_some() { return Err(format!( - "block {} is {} but response is ExecutionPayloadBodyV1. Does the EL support {}?", - header.block_hash(), - header_fork, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, + "block {} is bellatrix but payload body has withdrawals", + header.block_hash )); } - } - Self::V2(_) => {} - } - - let ( - transactions, - withdrawals, - deposit_requests, - withdrawal_requests, - consolidation_requests, - ) = self.deconstruct(); - if !header_fork.capella_enabled() && withdrawals.is_some() { - return Err(format!( - "block {} is {} but payload body has withdrawals", - header.block_hash(), - header_fork - )); - } - if !header_fork.electra_enabled() { - if deposit_requests.is_some() { - return Err(format!( - "block {} is {} but payload body has deposit_requests", - header.block_hash(), - header_fork - )); - } - if withdrawal_requests.is_some() { - return Err(format!( - "block {} is {} but payload body has withdrawal_requests", - header.block_hash(), - header_fork - )); - } - if consolidation_requests.is_some() { - return Err(format!( - "block {} is {} but payload body has consolidation_requests", - header.block_hash(), - header_fork - )); - } - } - - match header { - ExecutionPayloadHeader::Bellatrix(header) => { Ok(ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { parent_hash: header.parent_hash, fee_recipient: header.fee_recipient, @@ -474,108 +402,90 @@ impl ExecutionPayloadBody { extra_data: header.extra_data, base_fee_per_gas: header.base_fee_per_gas, block_hash: header.block_hash, - transactions, + transactions: self.transactions, })) } ExecutionPayloadHeader::Capella(header) => { - let withdrawals = withdrawals.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawals set to null", - header.block_hash, header_fork - ) - })?; - Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions, - withdrawals, - })) + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + })) + } else { + Err(format!( + "block {} is capella but payload body doesn't have withdrawals", + header.block_hash + )) + } } ExecutionPayloadHeader::Deneb(header) => { - let withdrawals = withdrawals.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawals set to null", - header.block_hash, header_fork - ) - })?; - Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions, - withdrawals, - blob_gas_used: header.blob_gas_used, - excess_blob_gas: header.excess_blob_gas, - })) + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) + } else { + Err(format!( + "block {} is post capella but payload body doesn't have withdrawals", + header.block_hash + )) + } } ExecutionPayloadHeader::Electra(header) => { - let withdrawals = withdrawals.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawals set to null", - header.block_hash, header_fork - ) - })?; - let deposit_requests = deposit_requests.ok_or_else(|| { - format!( - "block {} is {} but payload body has deposit_requests set to null", - header.block_hash, header_fork - ) - })?; - let withdrawal_requests = withdrawal_requests.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawal_requests set to null", - header.block_hash, header_fork - ) - })?; - let consolidation_requests = consolidation_requests.ok_or_else(|| { - format!( - "block {} is {} but payload body has consolidation_requests set to null", - header.block_hash, header_fork - ) - })?; - Ok(ExecutionPayload::Electra(ExecutionPayloadElectra { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions, - withdrawals, - blob_gas_used: header.blob_gas_used, - excess_blob_gas: header.excess_blob_gas, - deposit_requests, - withdrawal_requests, - consolidation_requests, - })) + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Electra(ExecutionPayloadElectra { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) + } else { + Err(format!( + "block {} is post capella but payload body doesn't have withdrawals", + header.block_hash + )) + } } } } @@ -592,8 +502,6 @@ pub struct EngineCapabilities { pub forkchoice_updated_v3: bool, pub get_payload_bodies_by_hash_v1: bool, pub get_payload_bodies_by_range_v1: bool, - pub get_payload_bodies_by_hash_v2: bool, - pub get_payload_bodies_by_range_v2: bool, pub get_payload_v1: bool, pub get_payload_v2: bool, pub get_payload_v3: bool, @@ -631,12 +539,6 @@ impl EngineCapabilities { if self.get_payload_bodies_by_range_v1 { response.push(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1); } - if self.get_payload_bodies_by_hash_v2 { - response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2); - } - if self.get_payload_bodies_by_range_v2 { - response.push(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2); - } if self.get_payload_v1 { response.push(ENGINE_GET_PAYLOAD_V1); } @@ -668,6 +570,7 @@ pub enum ClientCode { Lodestar, Nethermind, Nimbus, + TrinExecution, Teku, Prysm, Reth, @@ -686,6 +589,7 @@ impl std::fmt::Display for ClientCode { ClientCode::Lodestar => "LS", ClientCode::Nethermind => "NM", ClientCode::Nimbus => "NB", + ClientCode::TrinExecution => "TE", ClientCode::Teku => "TK", ClientCode::Prysm => "PM", ClientCode::Reth => "RH", @@ -709,6 +613,7 @@ impl TryFrom for ClientCode { "LS" => Ok(Self::Lodestar), "NM" => Ok(Self::Nethermind), "NB" => Ok(Self::Nimbus), + "TE" => Ok(Self::TrinExecution), "TK" => Ok(Self::Teku), "PM" => Ok(Self::Prysm), "RH" => Ok(Self::Reth), diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c497a4a7254..9c2c43bcf7c 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -50,8 +50,6 @@ pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1"; pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; -pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2: &str = "engine_getPayloadBodiesByHashV2"; -pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2: &str = "engine_getPayloadBodiesByRangeV2"; pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; @@ -80,8 +78,6 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, ENGINE_GET_CLIENT_VERSION_V1, ]; @@ -797,6 +793,9 @@ impl HttpJsonRpc { JsonExecutionPayload::V4(new_payload_request_electra.execution_payload.clone().into()), new_payload_request_electra.versioned_hashes, new_payload_request_electra.parent_beacon_block_root, + new_payload_request_electra + .execution_requests_list + .get_execution_requests_list(), ]); let response: JsonPayloadStatusV1 = self @@ -849,7 +848,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V1(response).into()) + JsonGetPayloadResponse::V1(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Capella => { let response: JsonGetPayloadResponseV2 = self @@ -859,7 +860,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V2(response).into()) + JsonGetPayloadResponse::V2(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Base | ForkName::Altair | ForkName::Deneb | ForkName::Electra => Err( Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)), @@ -883,7 +886,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V3(response).into()) + JsonGetPayloadResponse::V3(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Base | ForkName::Altair @@ -912,7 +917,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V4(response).into()) + JsonGetPayloadResponse::V4(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Base | ForkName::Altair @@ -991,7 +998,7 @@ impl HttpJsonRpc { pub async fn get_payload_bodies_by_hash_v1( &self, block_hashes: Vec, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let params = json!([block_hashes]); let response: Vec>> = self @@ -1004,27 +1011,7 @@ impl HttpJsonRpc { Ok(response .into_iter() - .map(|opt_json| opt_json.map(|v1| JsonExecutionPayloadBody::V1(v1).into())) - .collect()) - } - - pub async fn get_payload_bodies_by_hash_v2( - &self, - block_hashes: Vec, - ) -> Result>>, Error> { - let params = json!([block_hashes]); - - let response: Vec>> = self - .rpc_request( - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, - params, - ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?; - - Ok(response - .into_iter() - .map(|opt_json| opt_json.map(|v2| JsonExecutionPayloadBody::V2(v2).into())) + .map(|opt_json| opt_json.map(From::from)) .collect()) } @@ -1032,7 +1019,7 @@ impl HttpJsonRpc { &self, start: u64, count: u64, - ) -> Result>>, Error> { + ) -> Result>>, Error> { #[derive(Serialize)] #[serde(transparent)] struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); @@ -1048,31 +1035,7 @@ impl HttpJsonRpc { Ok(response .into_iter() - .map(|opt_json| opt_json.map(|v1| JsonExecutionPayloadBody::V1(v1).into())) - .collect()) - } - - pub async fn get_payload_bodies_by_range_v2( - &self, - start: u64, - count: u64, - ) -> Result>>, Error> { - #[derive(Serialize)] - #[serde(transparent)] - struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); - - let params = json!([Quantity(start), Quantity(count)]); - let response: Vec>> = self - .rpc_request( - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, - params, - ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?; - - Ok(response - .into_iter() - .map(|opt_json| opt_json.map(|v2| JsonExecutionPayloadBody::V2(v2).into())) + .map(|opt_json| opt_json.map(From::from)) .collect()) } @@ -1099,10 +1062,6 @@ impl HttpJsonRpc { .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1), get_payload_bodies_by_range_v1: capabilities .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), - get_payload_bodies_by_hash_v2: capabilities - .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2), - get_payload_bodies_by_range_v2: capabilities - .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2), get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), @@ -1278,39 +1237,6 @@ impl HttpJsonRpc { } } - pub async fn get_payload_bodies_by_hash( - &self, - block_hashes: Vec, - ) -> Result>>, Error> { - let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.get_payload_bodies_by_hash_v2 { - self.get_payload_bodies_by_hash_v2(block_hashes).await - } else if engine_capabilities.get_payload_bodies_by_hash_v1 { - self.get_payload_bodies_by_hash_v1(block_hashes).await - } else { - Err(Error::RequiredMethodUnsupported( - "engine_getPayloadBodiesByHash", - )) - } - } - - pub async fn get_payload_bodies_by_range( - &self, - start: u64, - count: u64, - ) -> Result>>, Error> { - let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.get_payload_bodies_by_range_v2 { - self.get_payload_bodies_by_range_v2(start, count).await - } else if engine_capabilities.get_payload_bodies_by_range_v1 { - self.get_payload_bodies_by_range_v1(start, count).await - } else { - Err(Error::RequiredMethodUnsupported( - "engine_getPayloadBodiesByRange", - )) - } - } - // automatically selects the latest version of // forkchoice_updated that the execution engine supports pub async fn forkchoice_updated( diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index a05d584cfca..753554c149a 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,11 +1,13 @@ use super::*; use alloy_rlp::RlpEncodable; use serde::{Deserialize, Serialize}; +use ssz::Decode; use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; -use types::{DepositRequest, FixedVector, PublicKeyBytes, Signature, Unsigned, WithdrawalRequest}; +use types::execution_requests::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; +use types::{FixedVector, Unsigned}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -104,14 +106,6 @@ pub struct JsonExecutionPayload { #[superstruct(only(V3, V4))] #[serde(with = "serde_utils::u64_hex_be")] pub excess_blob_gas: u64, - #[superstruct(only(V4))] - pub deposit_requests: VariableList, - #[superstruct(only(V4))] - pub withdrawal_requests: - VariableList, - #[superstruct(only(V4))] - pub consolidation_requests: - VariableList, } impl From> for JsonExecutionPayloadV1 { @@ -214,24 +208,6 @@ impl From> for JsonExecutionPayloadV4 .into(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - deposit_requests: payload - .deposit_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - withdrawal_requests: payload - .withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - consolidation_requests: payload - .consolidation_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), } } } @@ -348,24 +324,6 @@ impl From> for ExecutionPayloadElectra .into(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - deposit_requests: payload - .deposit_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - withdrawal_requests: payload - .withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - consolidation_requests: payload - .consolidation_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), } } } @@ -381,6 +339,71 @@ impl From> for ExecutionPayload { } } +/// This is used to index into the `execution_requests` array. +#[derive(Debug, Copy, Clone)] +enum RequestPrefix { + Deposit, + Withdrawal, + Consolidation, +} + +impl RequestPrefix { + pub fn from_prefix(prefix: u8) -> Option { + match prefix { + 0 => Some(Self::Deposit), + 1 => Some(Self::Withdrawal), + 2 => Some(Self::Consolidation), + _ => None, + } + } +} + +/// Format of `ExecutionRequests` received over the engine api. +/// +/// Array of ssz-encoded requests list encoded as hex bytes. +/// The prefix of the request type is used to index into the array. +/// +/// For e.g. [0xab, 0xcd, 0xef] +/// Here, 0xab are the deposits bytes (prefix and index == 0) +/// 0xcd are the withdrawals bytes (prefix and index == 1) +/// 0xef are the consolidations bytes (prefix and index == 2) +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct JsonExecutionRequests(pub Vec); + +impl TryFrom for ExecutionRequests { + type Error = String; + + fn try_from(value: JsonExecutionRequests) -> Result { + let mut requests = ExecutionRequests::default(); + + for (i, request) in value.0.into_iter().enumerate() { + // hex string + let decoded_bytes = hex::decode(request).map_err(|e| format!("Invalid hex {:?}", e))?; + match RequestPrefix::from_prefix(i as u8) { + Some(RequestPrefix::Deposit) => { + requests.deposits = DepositRequests::::from_ssz_bytes(&decoded_bytes) + .map_err(|e| format!("Failed to decode DepositRequest from EL: {:?}", e))?; + } + Some(RequestPrefix::Withdrawal) => { + requests.withdrawals = WithdrawalRequests::::from_ssz_bytes(&decoded_bytes) + .map_err(|e| { + format!("Failed to decode WithdrawalRequest from EL: {:?}", e) + })?; + } + Some(RequestPrefix::Consolidation) => { + requests.consolidations = + ConsolidationRequests::::from_ssz_bytes(&decoded_bytes).map_err( + |e| format!("Failed to decode ConsolidationRequest from EL: {:?}", e), + )?; + } + None => return Err("Empty requests string".to_string()), + } + } + Ok(requests) + } +} + #[superstruct( variants(V1, V2, V3, V4), variant_attributes( @@ -407,38 +430,42 @@ pub struct JsonGetPayloadResponse { pub blobs_bundle: JsonBlobsBundleV1, #[superstruct(only(V3, V4))] pub should_override_builder: bool, + #[superstruct(only(V4))] + pub requests: JsonExecutionRequests, } -impl From> for GetPayloadResponse { - fn from(json_get_payload_response: JsonGetPayloadResponse) -> Self { +impl TryFrom> for GetPayloadResponse { + type Error = String; + fn try_from(json_get_payload_response: JsonGetPayloadResponse) -> Result { match json_get_payload_response { JsonGetPayloadResponse::V1(response) => { - GetPayloadResponse::Bellatrix(GetPayloadResponseBellatrix { + Ok(GetPayloadResponse::Bellatrix(GetPayloadResponseBellatrix { execution_payload: response.execution_payload.into(), block_value: response.block_value, - }) + })) } JsonGetPayloadResponse::V2(response) => { - GetPayloadResponse::Capella(GetPayloadResponseCapella { + Ok(GetPayloadResponse::Capella(GetPayloadResponseCapella { execution_payload: response.execution_payload.into(), block_value: response.block_value, - }) + })) } JsonGetPayloadResponse::V3(response) => { - GetPayloadResponse::Deneb(GetPayloadResponseDeneb { + Ok(GetPayloadResponse::Deneb(GetPayloadResponseDeneb { execution_payload: response.execution_payload.into(), block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - }) + })) } JsonGetPayloadResponse::V4(response) => { - GetPayloadResponse::Electra(GetPayloadResponseElectra { + Ok(GetPayloadResponse::Electra(GetPayloadResponseElectra { execution_payload: response.execution_payload.into(), block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - }) + requests: response.requests.try_into()?, + })) } } } @@ -754,36 +781,20 @@ impl From for JsonForkchoiceUpdatedV1Response { } } -#[superstruct( - variants(V1, V2), - variant_attributes( - derive(Clone, Debug, Serialize, Deserialize), - serde(bound = "E: EthSpec", rename_all = "camelCase"), - ), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") -)] -#[derive(Clone, Debug, Serialize)] -#[serde(bound = "E: EthSpec", rename_all = "camelCase", untagged)] -pub struct JsonExecutionPayloadBody { +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct JsonExecutionPayloadBodyV1 { #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, pub withdrawals: Option>, - #[superstruct(only(V2))] - pub deposit_requests: Option>, - #[superstruct(only(V2))] - pub withdrawal_requests: - Option>, - #[superstruct(only(V2))] - pub consolidation_requests: - Option>, } -impl From> for JsonExecutionPayloadBodyV1 { - fn from(value: ExecutionPayloadBodyV1) -> Self { +impl From> for ExecutionPayloadBodyV1 { + fn from(value: JsonExecutionPayloadBodyV1) -> Self { Self { transactions: value.transactions, withdrawals: value.withdrawals.map(|json_withdrawals| { - VariableList::from( + Withdrawals::::from( json_withdrawals .into_iter() .map(Into::into) @@ -794,82 +805,12 @@ impl From> for JsonExecutionPayloadBodyV1< } } -impl From> for JsonExecutionPayloadBodyV2 { - fn from(value: ExecutionPayloadBodyV2) -> Self { +impl From> for JsonExecutionPayloadBodyV1 { + fn from(value: ExecutionPayloadBodyV1) -> Self { Self { transactions: value.transactions, - withdrawals: value.withdrawals.map(|json_withdrawals| { - VariableList::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - deposit_requests: value.deposit_requests.map(|receipts| { - VariableList::from(receipts.into_iter().map(Into::into).collect::>()) - }), - withdrawal_requests: value.withdrawal_requests.map(|withdrawal_requests| { - VariableList::from( - withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - consolidation_requests: value.consolidation_requests.map(|consolidation_requests| { - VariableList::from( - consolidation_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - } - } -} - -impl From> for ExecutionPayloadBody { - fn from(value: JsonExecutionPayloadBody) -> Self { - match value { - JsonExecutionPayloadBody::V1(body_v1) => Self::V1(ExecutionPayloadBodyV1 { - transactions: body_v1.transactions, - withdrawals: body_v1.withdrawals.map(|json_withdrawals| { - Withdrawals::::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - }), - JsonExecutionPayloadBody::V2(body_v2) => Self::V2(ExecutionPayloadBodyV2 { - transactions: body_v2.transactions, - withdrawals: body_v2.withdrawals.map(|json_withdrawals| { - Withdrawals::::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - deposit_requests: body_v2.deposit_requests.map(|json_receipts| { - DepositRequests::::from( - json_receipts - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - withdrawal_requests: body_v2.withdrawal_requests.map(|json_withdrawal_requests| { - WithdrawalRequests::::from( - json_withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - consolidation_requests: body_v2.consolidation_requests, + withdrawals: value.withdrawals.map(|withdrawals| { + VariableList::from(withdrawals.into_iter().map(Into::into).collect::>()) }), } } @@ -950,96 +891,3 @@ impl TryFrom for ClientVersionV1 { }) } } - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct JsonDepositRequest { - pub pubkey: PublicKeyBytes, - pub withdrawal_credentials: Hash256, - #[serde(with = "serde_utils::u64_hex_be")] - pub amount: u64, - pub signature: Signature, - #[serde(with = "serde_utils::u64_hex_be")] - pub index: u64, -} - -impl From for JsonDepositRequest { - fn from(deposit: DepositRequest) -> Self { - Self { - pubkey: deposit.pubkey, - withdrawal_credentials: deposit.withdrawal_credentials, - amount: deposit.amount, - signature: deposit.signature, - index: deposit.index, - } - } -} - -impl From for DepositRequest { - fn from(json_deposit: JsonDepositRequest) -> Self { - Self { - pubkey: json_deposit.pubkey, - withdrawal_credentials: json_deposit.withdrawal_credentials, - amount: json_deposit.amount, - signature: json_deposit.signature, - index: json_deposit.index, - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct JsonWithdrawalRequest { - pub source_address: Address, - pub validator_pubkey: PublicKeyBytes, - #[serde(with = "serde_utils::u64_hex_be")] - pub amount: u64, -} - -impl From for JsonWithdrawalRequest { - fn from(withdrawal_request: WithdrawalRequest) -> Self { - Self { - source_address: withdrawal_request.source_address, - validator_pubkey: withdrawal_request.validator_pubkey, - amount: withdrawal_request.amount, - } - } -} - -impl From for WithdrawalRequest { - fn from(json_withdrawal_request: JsonWithdrawalRequest) -> Self { - Self { - source_address: json_withdrawal_request.source_address, - validator_pubkey: json_withdrawal_request.validator_pubkey, - amount: json_withdrawal_request.amount, - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct JsonConsolidationRequest { - pub source_address: Address, - pub source_pubkey: PublicKeyBytes, - pub target_pubkey: PublicKeyBytes, -} - -impl From for JsonConsolidationRequest { - fn from(consolidation_request: ConsolidationRequest) -> Self { - Self { - source_address: consolidation_request.source_address, - source_pubkey: consolidation_request.source_pubkey, - target_pubkey: consolidation_request.target_pubkey, - } - } -} - -impl From for ConsolidationRequest { - fn from(json_consolidation_request: JsonConsolidationRequest) -> Self { - Self { - source_address: json_consolidation_request.source_address, - source_pubkey: json_consolidation_request.source_pubkey, - target_pubkey: json_consolidation_request.target_pubkey, - } - } -} diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 8d2e3d5ad06..318779b7f3e 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -9,7 +9,7 @@ use types::{ }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, + ExecutionPayloadElectra, ExecutionRequests, }; #[superstruct( @@ -43,6 +43,8 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { pub versioned_hashes: Vec, #[superstruct(only(Deneb, Electra))] pub parent_beacon_block_root: Hash256, + #[superstruct(only(Electra))] + pub execution_requests_list: &'block ExecutionRequests, } impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { @@ -183,6 +185,7 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> .map(kzg_commitment_to_versioned_hash) .collect(), parent_beacon_block_root: block_ref.parent_root, + execution_requests_list: &block_ref.body.execution_requests, })), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 648963a320e..f7e490233fe 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -48,7 +48,8 @@ use types::builder_bid::BuilderBid; use types::non_zero_usize::new_non_zero_usize; use types::payload::BlockProductionVersion; use types::{ - AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock, + AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, ExecutionRequests, KzgProofs, + SignedBlindedBeaconBlock, }; use types::{ BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadBellatrix, @@ -112,12 +113,15 @@ impl TryFrom> for ProvenancedPayload BlockProposalContents::PayloadAndBlobs { payload: ExecutionPayloadHeader::Electra(builder_bid.header).into(), block_value: builder_bid.value, kzg_commitments: builder_bid.blob_kzg_commitments, blobs_and_proofs: None, + // TODO(electra): update this with builder api returning the requests + requests: None, }, }; Ok(ProvenancedPayload::Builder( @@ -194,6 +198,8 @@ pub enum BlockProposalContents> { kzg_commitments: KzgCommitments, /// `None` for blinded `PayloadAndBlobs`. blobs_and_proofs: Option<(BlobsList, KzgProofs)>, + // TODO(electra): this should probably be a separate variant/superstruct + requests: Option>, }, } @@ -214,11 +220,13 @@ impl From>> block_value, kzg_commitments, blobs_and_proofs: _, + requests, } => BlockProposalContents::PayloadAndBlobs { payload: payload.execution_payload().into(), block_value, kzg_commitments, blobs_and_proofs: None, + requests, }, } } @@ -230,13 +238,14 @@ impl> TryFrom> type Error = Error; fn try_from(response: GetPayloadResponse) -> Result { - let (execution_payload, block_value, maybe_bundle) = response.into(); + let (execution_payload, block_value, maybe_bundle, maybe_requests) = response.into(); match maybe_bundle { Some(bundle) => Ok(Self::PayloadAndBlobs { payload: execution_payload.into(), block_value, kzg_commitments: bundle.commitments, blobs_and_proofs: Some((bundle.blobs, bundle.proofs)), + requests: maybe_requests, }), None => Ok(Self::Payload { payload: execution_payload.into(), @@ -265,22 +274,25 @@ impl> BlockProposalContents>, Option<(BlobsList, KzgProofs)>, + Option>, Uint256, ) { match self { Self::Payload { payload, block_value, - } => (payload, None, None, block_value), + } => (payload, None, None, None, block_value), Self::PayloadAndBlobs { payload, block_value, kzg_commitments, blobs_and_proofs, + requests, } => ( payload, Some(kzg_commitments), blobs_and_proofs, + requests, block_value, ), } @@ -1772,10 +1784,10 @@ impl ExecutionLayer { pub async fn get_payload_bodies_by_hash( &self, hashes: Vec, - ) -> Result>>, Error> { + ) -> Result>>, Error> { self.engine() .request(|engine: &Engine| async move { - engine.api.get_payload_bodies_by_hash(hashes).await + engine.api.get_payload_bodies_by_hash_v1(hashes).await }) .await .map_err(Box::new) @@ -1786,11 +1798,14 @@ impl ExecutionLayer { &self, start: u64, count: u64, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE); self.engine() .request(|engine: &Engine| async move { - engine.api.get_payload_bodies_by_range(start, count).await + engine + .api + .get_payload_bodies_by_range_v1(start, count) + .await }) .await .map_err(Box::new) @@ -1823,9 +1838,7 @@ impl ExecutionLayer { // Use efficient payload bodies by range method if supported. let capabilities = self.get_engine_capabilities(None).await?; - if capabilities.get_payload_bodies_by_range_v1 - || capabilities.get_payload_bodies_by_range_v2 - { + if capabilities.get_payload_bodies_by_range_v1 { let mut payload_bodies = self.get_payload_bodies_by_range(block_number, 1).await?; if payload_bodies.len() != 1 { diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 184031af4d0..ab1a22677f3 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub const HIT: &str = "hit"; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 6094e0d6960..4deb91e0567 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -652,30 +652,23 @@ impl ExecutionBlockGenerator { withdrawals: pa.withdrawals.clone().into(), blob_gas_used: 0, excess_blob_gas: 0, - // TODO(electra): consider how to test these fields below - deposit_requests: vec![].into(), - withdrawal_requests: vec![].into(), - consolidation_requests: vec![].into(), }), _ => unreachable!(), }, }; - match execution_payload.fork_name() { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => {} - ForkName::Deneb | ForkName::Electra => { - // get random number between 0 and Max Blobs - let mut rng = self.rng.lock(); - let num_blobs = rng.gen::() % (E::max_blobs_per_block() + 1); - let (bundle, transactions) = generate_blobs(num_blobs)?; - for tx in Vec::from(transactions) { - execution_payload - .transactions_mut() - .push(tx) - .map_err(|_| "transactions are full".to_string())?; - } - self.blobs_bundles.insert(id, bundle); + if execution_payload.fork_name().deneb_enabled() { + // get random number between 0 and Max Blobs + let mut rng = self.rng.lock(); + let num_blobs = rng.gen::() % (E::max_blobs_per_block() + 1); + let (bundle, transactions) = generate_blobs(num_blobs)?; + for tx in Vec::from(transactions) { + execution_payload + .transactions_mut() + .push(tx) + .map_err(|_| "transactions are full".to_string())?; } + self.blobs_bundles.insert(id, bundle); } *execution_payload.block_hash_mut() = @@ -862,8 +855,7 @@ pub fn generate_pow_block( #[cfg(test)] mod test { use super::*; - use eth2_network_config::TRUSTED_SETUP_BYTES; - use kzg::TrustedSetup; + use kzg::{trusted_setup::get_trusted_setup, TrustedSetup}; use types::{MainnetEthSpec, MinimalEthSpec}; #[test] @@ -951,8 +943,9 @@ mod test { } fn load_kzg() -> Result { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) - .map_err(|e| format!("Unable to read trusted setup file: {e:?}"))?; + let trusted_setup: TrustedSetup = + serde_json::from_reader(get_trusted_setup().as_slice()) + .map_err(|e| format!("Unable to read trusted setup file: {e:?}"))?; Kzg::new_from_trusted_setup(trusted_setup) .map_err(|e| format!("Failed to load trusted setup: {e:?}")) } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index f36cb9797d3..786ac9ad9c9 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -373,6 +373,8 @@ pub async fn handle_rpc( ))? .into(), should_override_builder: false, + // TODO(electra): add EL requests in mock el + requests: Default::default(), }) .unwrap() } @@ -561,60 +563,11 @@ pub async fn handle_rpc( match maybe_payload { Some(payload) => { - assert!( - !payload.fork_name().electra_enabled(), - "payload bodies V1 is not supported for Electra blocks" - ); - let payload_body = ExecutionPayloadBodyV1 { + let payload_body: ExecutionPayloadBodyV1 = ExecutionPayloadBodyV1 { transactions: payload.transactions().clone(), withdrawals: payload.withdrawals().ok().cloned(), }; - let json_payload_body = JsonExecutionPayloadBody::V1( - JsonExecutionPayloadBodyV1::::from(payload_body), - ); - response.push(Some(json_payload_body)); - } - None => response.push(None), - } - } - - Ok(serde_json::to_value(response).unwrap()) - } - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2 => { - #[derive(Deserialize)] - #[serde(transparent)] - struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] pub u64); - - let start = get_param::(params, 0) - .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? - .0; - let count = get_param::(params, 1) - .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? - .0; - - let mut response = vec![]; - for block_num in start..(start + count) { - let maybe_payload = ctx - .execution_block_generator - .read() - .execution_payload_by_number(block_num); - - match maybe_payload { - Some(payload) => { - // TODO(electra): add testing for: - // deposit_requests - // withdrawal_requests - // consolidation_requests - let payload_body = ExecutionPayloadBodyV2 { - transactions: payload.transactions().clone(), - withdrawals: payload.withdrawals().ok().cloned(), - deposit_requests: payload.deposit_requests().ok().cloned(), - withdrawal_requests: payload.withdrawal_requests().ok().cloned(), - consolidation_requests: payload.consolidation_requests().ok().cloned(), - }; - let json_payload_body = JsonExecutionPayloadBody::V2( - JsonExecutionPayloadBodyV2::::from(payload_body), - ); + let json_payload_body = JsonExecutionPayloadBodyV1::from(payload_body); response.push(Some(json_payload_body)); } None => response.push(None), diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 46830256b09..341daedbc8d 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -20,9 +20,9 @@ use types::builder_bid::{ }; use types::{ Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, FixedBytesExtended, ForkName, ForkVersionedResponse, Hash256, - PublicKeyBytes, Signature, SignedBlindedBeaconBlock, SignedRoot, - SignedValidatorRegistrationData, Slot, Uint256, + ExecutionPayloadHeaderRefMut, ExecutionRequests, FixedBytesExtended, ForkName, + ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, + SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; use types::{ExecutionBlockHash, SecretKey}; use warp::{Filter, Rejection}; @@ -209,7 +209,7 @@ impl BidStuff for BuilderBid { pub struct MockBuilder { el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, - spec: ChainSpec, + spec: Arc, val_registration_cache: Arc>>, builder_sk: SecretKey, operations: Arc>>, @@ -220,7 +220,7 @@ impl MockBuilder { pub fn new_for_testing( mock_el_url: SensitiveUrl, beacon_url: SensitiveUrl, - spec: ChainSpec, + spec: Arc, executor: TaskExecutor, ) -> (Self, (SocketAddr, impl Future)) { let file = NamedTempFile::new().unwrap(); @@ -252,7 +252,7 @@ impl MockBuilder { pub fn new( el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, - spec: ChainSpec, + spec: Arc, ) -> Self { let sk = SecretKey::random(); Self { @@ -479,16 +479,18 @@ pub fn serve( let prev_randao = head_state .get_randao_mix(head_state.current_epoch()) .map_err(|_| reject("couldn't get prev randao"))?; - let expected_withdrawals = match fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix => None, - ForkName::Capella | ForkName::Deneb | ForkName::Electra => Some( + + let expected_withdrawals = if fork.capella_enabled() { + Some( builder .beacon_client .get_expected_withdrawals(&StateId::Head) .await .unwrap() .data, - ), + ) + } else { + None }; let payload_attributes = match fork { @@ -540,10 +542,12 @@ pub fn serve( let mut message = match payload_response_type { crate::GetPayloadResponseType::Full(payload_response) => { - let (payload, _block_value, maybe_blobs_bundle): ( + #[allow(clippy::type_complexity)] + let (payload, _block_value, maybe_blobs_bundle, _maybe_requests): ( ExecutionPayload, Uint256, Option>, + Option>, ) = payload_response.into(); match fork { @@ -591,10 +595,12 @@ pub fn serve( } } crate::GetPayloadResponseType::Blinded(payload_response) => { - let (payload, _block_value, maybe_blobs_bundle): ( + #[allow(clippy::type_complexity)] + let (payload, _block_value, maybe_blobs_bundle, _maybe_requests): ( ExecutionPayload, Uint256, Option>, + Option>, ) = payload_response.into(); match fork { ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index fe847ec3eda..be99b380543 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -47,9 +47,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { forkchoice_updated_v2: true, forkchoice_updated_v3: true, get_payload_bodies_by_hash_v1: true, - get_payload_bodies_by_hash_v2: true, get_payload_bodies_by_range_v1: true, - get_payload_bodies_by_range_v2: true, get_payload_v1: true, get_payload_v2: true, get_payload_v3: true, diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 3347f6c6c2a..3981833a5cb 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -43,7 +43,7 @@ impl Eth1GenesisService { /// Creates a new service. Does not attempt to connect to the Eth1 node. /// /// Modifies the given `config` to make it more suitable to the task of listening to genesis. - pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result { + pub fn new(config: Eth1Config, log: Logger, spec: Arc) -> Result { let config = Eth1Config { // Truncating the block cache makes searching for genesis more // complicated. @@ -100,9 +100,9 @@ impl Eth1GenesisService { pub async fn wait_for_genesis_state( &self, update_interval: Duration, - spec: ChainSpec, ) -> Result, String> { let eth1_service = &self.eth1_service; + let spec = eth1_service.chain_spec(); let log = ð1_service.log; let mut sync_blocks = false; @@ -180,13 +180,13 @@ impl Eth1GenesisService { // Scan the new eth1 blocks, searching for genesis. if let Some(genesis_state) = - self.scan_new_blocks::(&mut highest_processed_block, &spec)? + self.scan_new_blocks::(&mut highest_processed_block, spec)? { info!( log, "Genesis ceremony complete"; "genesis_validators" => genesis_state - .get_active_validator_indices(E::genesis_epoch(), &spec) + .get_active_validator_indices(E::genesis_epoch(), spec) .map_err(|e| format!("Genesis validators error: {:?}", e))? .len(), "genesis_time" => genesis_state.genesis_time(), @@ -203,7 +203,7 @@ impl Eth1GenesisService { let latest_timestamp = self.stats.latest_timestamp.load(Ordering::Relaxed); // Perform some logging. - if timestamp_can_trigger_genesis(latest_timestamp, &spec)? { + if timestamp_can_trigger_genesis(latest_timestamp, spec)? { // Indicate that we are awaiting adequate active validators. if (active_validator_count as u64) < spec.min_genesis_active_validator_count { info!( diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index b5c6d85afeb..6cc7517aa44 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -5,6 +5,7 @@ use eth1_test_rig::{AnvilEth1Instance, DelayThenDeposit, Middleware}; use genesis::{Eth1Config, Eth1GenesisService}; use sensitive_url::SensitiveUrl; use state_processing::is_valid_genesis_state; +use std::sync::Arc; use std::time::Duration; use types::{ test_utils::generate_deterministic_keypair, FixedBytesExtended, Hash256, MinimalEthSpec, @@ -24,7 +25,10 @@ pub fn new_env() -> Environment { fn basic() { let env = new_env(); let log = env.core_context().log().clone(); - let mut spec = env.eth2_config().spec.clone(); + let mut spec = (*env.eth2_config().spec).clone(); + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = 8; + let spec = Arc::new(spec); env.runtime().block_on(async { let eth1 = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()) @@ -60,9 +64,6 @@ fn basic() { // you're experiencing failures, try increasing the update_interval. let update_interval = Duration::from_millis(500); - spec.min_genesis_time = 0; - spec.min_genesis_active_validator_count = 8; - let deposits = (0..spec.min_genesis_active_validator_count + 2) .map(|i| { deposit_contract.deposit_helper::( @@ -79,8 +80,7 @@ fn basic() { let deposit_future = deposit_contract.deposit_multiple(deposits); - let wait_future = - service.wait_for_genesis_state::(update_interval, spec.clone()); + let wait_future = service.wait_for_genesis_state::(update_interval); let state = futures::try_join!(deposit_future, wait_future) .map(|(_, state)| state) diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index f3779f0e4ac..638fe0f2192 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -20,7 +20,7 @@ lighthouse_network = { workspace = true } eth1 = { workspace = true } state_processing = { workspace = true } lighthouse_version = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } warp_utils = { workspace = true } slot_clock = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/http_api/src/build_block_contents.rs b/beacon_node/http_api/src/build_block_contents.rs index 05a6735b327..c2ccb6695eb 100644 --- a/beacon_node/http_api/src/build_block_contents.rs +++ b/beacon_node/http_api/src/build_block_contents.rs @@ -11,11 +11,9 @@ pub fn build_block_contents( BeaconBlockResponseWrapper::Blinded(block) => { Ok(ProduceBlockV3Response::Blinded(block.block)) } - BeaconBlockResponseWrapper::Full(block) => match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => Ok( - ProduceBlockV3Response::Full(FullBlockContents::Block(block.block)), - ), - ForkName::Deneb | ForkName::Electra => { + + BeaconBlockResponseWrapper::Full(block) => { + if fork_name.deneb_enabled() { let BeaconBlockResponse { block, state: _, @@ -37,7 +35,11 @@ pub fn build_block_contents( blobs, }), )) + } else { + Ok(ProduceBlockV3Response::Full(FullBlockContents::Block( + block.block, + ))) } - }, + } } } diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs index 54f2c0efa8d..40b38157365 100644 --- a/beacon_node/http_api/src/builder_states.rs +++ b/beacon_node/http_api/src/builder_states.rs @@ -4,7 +4,7 @@ use safe_arith::SafeArith; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::state_advance::partial_state_advance; use std::sync::Arc; -use types::{BeaconState, EthSpec, ForkName, Slot, Withdrawals}; +use types::{BeaconState, EthSpec, Slot, Withdrawals}; const MAX_EPOCH_LOOKAHEAD: u64 = 2; @@ -53,7 +53,8 @@ fn get_next_withdrawals_sanity_checks( } let fork = chain.spec.fork_name_at_slot::(proposal_slot); - if let ForkName::Base | ForkName::Altair | ForkName::Bellatrix = fork { + + if !fork.capella_enabled() { return Err(warp_utils::reject::custom_bad_request( "the specified state is a pre-capella state.".to_string(), )); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 998114f565e..307584b82d4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -146,13 +146,13 @@ pub struct Config { pub listen_port: u16, pub allow_origin: Option, pub tls_config: Option, - pub spec_fork_name: Option, pub data_dir: PathBuf, pub sse_capacity_multiplier: usize, pub enable_beacon_processor: bool, #[serde(with = "eth2::types::serde_status_code")] pub duplicate_block_status_code: StatusCode, pub enable_light_client_server: bool, + pub target_peers: usize, } impl Default for Config { @@ -163,12 +163,12 @@ impl Default for Config { listen_port: 5052, allow_origin: None, tls_config: None, - spec_fork_name: None, data_dir: PathBuf::from(DEFAULT_ROOT_DIR), sse_capacity_multiplier: 1, enable_beacon_processor: true, duplicate_block_status_code: StatusCode::ACCEPTED, enable_light_client_server: false, + target_peers: 100, } } } @@ -1296,7 +1296,7 @@ pub fn serve( task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, - ProvenancedBlock::local(block_contents), + ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, log, @@ -1338,7 +1338,7 @@ pub fn serve( })?; publish_blocks::publish_block( None, - ProvenancedBlock::local(block_contents), + ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, log, @@ -1373,7 +1373,7 @@ pub fn serve( task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, - ProvenancedBlock::local(block_contents), + ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, log, @@ -1417,7 +1417,7 @@ pub fn serve( })?; publish_blocks::publish_block( None, - ProvenancedBlock::local(block_contents), + ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, log, @@ -2641,7 +2641,6 @@ pub fn serve( ); // GET config/spec - let spec_fork_name = ctx.config.spec_fork_name; let get_config_spec = config_path .and(warp::path("spec")) .and(warp::path::end()) @@ -2651,7 +2650,7 @@ pub fn serve( move |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { let config_and_preset = - ConfigAndPreset::from_chain_spec::(&chain.spec, spec_fork_name); + ConfigAndPreset::from_chain_spec::(&chain.spec, None); Ok(api_types::GenericResponse::from(config_and_preset)) }) }, @@ -2934,8 +2933,16 @@ pub fn serve( let is_optimistic = head_execution_status.is_optimistic_or_invalid(); + // When determining sync status, make an exception for single-node + // testnets with 0 peers. + let sync_state = network_globals.sync_state.read(); + let is_synced = sync_state.is_synced() + || (sync_state.is_stalled() + && network_globals.config.target_peers == 0); + drop(sync_state); + let syncing_data = api_types::SyncingData { - is_syncing: !network_globals.sync_state.read().is_synced(), + is_syncing: !is_synced, is_optimistic, el_offline, head_slot, diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 970eef8dd07..b6a53b26c69 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static HTTP_API_PATHS_TOTAL: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index ad7cb3081ea..fceeb2dd231 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,13 +1,17 @@ use crate::metrics; -use beacon_chain::block_verification_types::{AsBlock, BlockContentsError}; +use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ - AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, - IntoGossipVerifiedBlockContents, NotifyExecutionLayer, + build_blob_data_column_sidecars, AvailabilityProcessingStatus, BeaconChain, BeaconChainError, + BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, NotifyExecutionLayer, +}; +use eth2::types::{ + BlobsBundle, BroadcastValidation, ErrorMessage, ExecutionPayloadAndBlobs, FullPayloadContents, + PublishBlockRequest, SignedBlockContents, }; -use eth2::types::{into_full_block_and_blobs, BroadcastValidation, ErrorMessage}; -use eth2::types::{FullPayloadContents, PublishBlockRequest}; use execution_layer::ProvenancedPayload; use lighthouse_network::{NetworkGlobals, PubsubMessage}; use network::NetworkMessage; @@ -15,39 +19,62 @@ use rand::seq::SliceRandom; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::marker::PhantomData; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BeaconBlockRef, BlobSidecarList, BlockImportSource, DataColumnSidecarList, - DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, - FullPayloadBellatrix, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, VariableList, + AbstractExecPayload, BeaconBlockRef, BlobSidecar, BlobsList, BlockImportSource, + DataColumnSidecarList, DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, + FullPayload, FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, + SignedBlindedBeaconBlock, }; use warp::http::StatusCode; use warp::{reply::Response, Rejection, Reply}; -pub enum ProvenancedBlock> { +pub type UnverifiedBlobs = Option<( + KzgProofs<::EthSpec>, + BlobsList<::EthSpec>, +)>; + +pub enum ProvenancedBlock> { /// The payload was built using a local EE. - Local(B, PhantomData), + Local(B, UnverifiedBlobs, PhantomData), /// The payload was build using a remote builder (e.g., via a mev-boost /// compatible relay). - Builder(B, PhantomData), + Builder(B, UnverifiedBlobs, PhantomData), } -impl> ProvenancedBlock { - pub fn local(block: B) -> Self { - Self::Local(block, PhantomData) +impl> ProvenancedBlock { + pub fn local(block: B, blobs: UnverifiedBlobs) -> Self { + Self::Local(block, blobs, PhantomData) } - pub fn builder(block: B) -> Self { - Self::Builder(block, PhantomData) + pub fn builder(block: B, blobs: UnverifiedBlobs) -> Self { + Self::Builder(block, blobs, PhantomData) + } +} + +impl ProvenancedBlock>> { + pub fn local_from_publish_request(request: PublishBlockRequest) -> Self { + match request { + PublishBlockRequest::Block(block) => Self::local(block, None), + PublishBlockRequest::BlockContents(block_contents) => { + let SignedBlockContents { + signed_block, + kzg_proofs, + blobs, + } = block_contents; + Self::local(signed_block, Some((kzg_proofs, blobs))) + } + } } } /// Handles a request from the HTTP API for full blocks. #[allow(clippy::too_many_arguments)] -pub async fn publish_block>( +pub async fn publish_block>( block_root: Option, provenanced_block: ProvenancedBlock, chain: Arc>, @@ -59,28 +86,29 @@ pub async fn publish_block Result { let seen_timestamp = timestamp_now(); - let (block_contents, is_locally_built_block) = match provenanced_block { - ProvenancedBlock::Local(block_contents, _) => (block_contents, true), - ProvenancedBlock::Builder(block_contents, _) => (block_contents, false), + let (unverified_block, unverified_blobs, is_locally_built_block) = match provenanced_block { + ProvenancedBlock::Local(block, blobs, _) => (block, blobs, true), + ProvenancedBlock::Builder(block, blobs, _) => (block, blobs, false), }; let provenance = if is_locally_built_block { "local" } else { "builder" }; - let block = block_contents.inner_block().clone(); - let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + let block = unverified_block.inner_block(); debug!(log, "Signed block received in HTTP API"; "slot" => block.slot()); let malicious_withhold_count = chain.config.malicious_withhold_count; let chain_cloned = chain.clone(); /* actually publish a block */ - let publish_block = move |block: Arc>, - blobs_opt: Option>, - data_cols_opt: Option>, - sender, - log, - seen_timestamp| { + let publish_block_p2p = move |block: Arc>, + should_publish_block: bool, + blob_sidecars: Vec>>, + mut data_column_sidecars: DataColumnSidecarList, + sender, + log, + seen_timestamp| + -> Result<(), BlockError> { let publish_timestamp = timestamp_now(); let publish_delay = publish_timestamp .checked_sub(seen_timestamp) @@ -92,55 +120,48 @@ pub async fn publish_block block.slot(), - "publish_delay_ms" => publish_delay.as_millis() - ); + let mut pubsub_messages = if should_publish_block { + info!( + log, + "Signed block published to network via HTTP API"; + "slot" => block.slot(), + "blobs_published" => blob_sidecars.len(), + "publish_delay_ms" => publish_delay.as_millis(), + ); + vec![PubsubMessage::BeaconBlock(block.clone())] + } else { + vec![] + }; match block.as_ref() { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) | SignedBeaconBlock::Bellatrix(_) | SignedBeaconBlock::Capella(_) => { - crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block)) + crate::publish_pubsub_messages(&sender, pubsub_messages) .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; } SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => { - let mut pubsub_messages = vec![PubsubMessage::BeaconBlock(block)]; - if let Some(blob_sidecars) = blobs_opt { - // Publish blob sidecars - for (blob_index, blob) in blob_sidecars.into_iter().enumerate() { - pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new(( - blob_index as u64, - blob, - )))); - } + for blob in blob_sidecars.into_iter() { + pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new((blob.index, blob)))); } - if let Some(data_col_sidecars) = data_cols_opt { - let mut data_col_sidecars = data_col_sidecars.to_vec(); - if malicious_withhold_count > 0 { - let columns_to_keep = data_col_sidecars - .len() - .saturating_sub(malicious_withhold_count); - // Randomize columns before dropping the last malicious_withhold_count items - data_col_sidecars.shuffle(&mut rand::thread_rng()); - data_col_sidecars = data_col_sidecars - .into_iter() - .take(columns_to_keep) - .collect::>(); - } - - for data_col in data_col_sidecars { - let subnet = DataColumnSubnetId::from_column_index::( - data_col.index as usize, - &chain_cloned.spec, - ); - pubsub_messages.push(PubsubMessage::DataColumnSidecar(Box::new(( - subnet, data_col, - )))); - } + if malicious_withhold_count > 0 { + let columns_to_keep = data_column_sidecars + .len() + .saturating_sub(malicious_withhold_count); + // Randomize columns before dropping the last malicious_withhold_count items + data_column_sidecars.shuffle(&mut rand::thread_rng()); + drop(data_column_sidecars.drain(columns_to_keep..)); + } + + for data_col in data_column_sidecars { + let subnet = DataColumnSubnetId::from_column_index::( + data_col.index as usize, + &chain_cloned.spec, + ); + pubsub_messages.push(PubsubMessage::DataColumnSidecar(Box::new(( + subnet, data_col, + )))); } crate::publish_pubsub_messages(&sender, pubsub_messages) .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; @@ -150,72 +171,162 @@ pub async fn publish_block b, - Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown(_))) - | Err(BlockContentsError::BlobError( - beacon_chain::blob_verification::GossipBlobError::RepeatBlob { .. }, - )) => { - // Allow the status code for duplicate blocks to be overridden based on config. - return Ok(warp::reply::with_status( - warp::reply::json(&ErrorMessage { - code: duplicate_status_code.as_u16(), - message: "duplicate block".to_string(), - stacktraces: vec![], - }), - duplicate_status_code, - ) - .into_response()); - } - Err(e) => { - warn!( - log, - "Not publishing block - not gossip verified"; - "slot" => slot, - "error" => %e - ); - return Err(warp_utils::reject::custom_bad_request(e.to_string())); - } - }; - // TODO(das): We could potentially get rid of these conversions and pass `GossipVerified` types - // to `publish_block`, i.e. have `GossipVerified` types in `PubsubMessage`? - // This saves us from extra code and provides guarantee that published - // components are verified. - // Clone here, so we can take advantage of the `Arc`. The block in `BlockContents` is not, - // `Arc`'d but blobs are. - let block = gossip_verified_block.block.block_cloned(); - let blobs_opt = gossip_verified_blobs.as_ref().map(|gossip_verified_blobs| { - let blobs = gossip_verified_blobs - .into_iter() - .map(|b| b.clone_blob()) - .collect::>(); - VariableList::from(blobs) - }); - let data_cols_opt = gossip_verified_data_columns - .as_ref() - .map(|gossip_verified_data_columns| { - gossip_verified_data_columns + // Convert blobs to either: + // + // 1. Blob sidecars if prior to peer DAS, or + // 2. Data column sidecars if post peer DAS. + let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); + + let (blob_sidecars, data_column_sidecars) = match unverified_blobs { + // Pre-PeerDAS: construct blob sidecars for the network. + Some((kzg_proofs, blobs)) if !peer_das_enabled => { + let blob_sidecars = kzg_proofs .into_iter() - .map(|col| col.clone_data_column()) - .collect::>() - }); + .zip(blobs) + .enumerate() + .map(|(i, (proof, unverified_blob))| { + let _timer = metrics::start_timer( + &beacon_chain::metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION, + ); + let blob_sidecar = + BlobSidecar::new(i, unverified_blob, &block, proof).map(Arc::new); + blob_sidecar.map_err(|e| { + error!( + log, + "Invalid blob - not publishing block"; + "error" => ?e, + "blob_index" => i, + "slot" => slot, + ); + warp_utils::reject::custom_bad_request(format!("{e:?}")) + }) + }) + .collect::, Rejection>>()?; + (blob_sidecars, vec![]) + } + // Post PeerDAS: construct data columns. + Some((_, blobs)) => { + // TODO(das): this is sub-optimal and should likely not be happening prior to gossip + // block publishing. + let data_column_sidecars = build_blob_data_column_sidecars(&chain, &block, blobs) + .map_err(|e| { + error!( + log, + "Invalid data column - not publishing block"; + "error" => ?e, + "slot" => slot + ); + warp_utils::reject::custom_bad_request(format!("{e:?}")) + })?; + (vec![], data_column_sidecars) + } + None => (vec![], vec![]), + }; - let block_root = block_root.unwrap_or(gossip_verified_block.block_root); + // Gossip verify the block and blobs/data columns separately. + let gossip_verified_block_result = unverified_block.into_gossip_verified_block(&chain); + let gossip_verified_blobs = blob_sidecars + .into_iter() + .map(|blob_sidecar| { + let gossip_verified_blob = + GossipVerifiedBlob::new(blob_sidecar.clone(), blob_sidecar.index, &chain); + + match gossip_verified_blob { + Ok(blob) => Ok(Some(blob)), + Err(GossipBlobError::RepeatBlob { proposer, .. }) => { + // Log the error but do not abort publication, we may need to publish the block + // or some of the other blobs if the block & blobs are only partially published + // by the other publisher. + debug!( + log, + "Blob for publication already known"; + "blob_index" => blob_sidecar.index, + "slot" => slot, + "proposer" => proposer, + ); + Ok(None) + } + Err(e) => { + error!( + log, + "Blob for publication is gossip-invalid"; + "blob_index" => blob_sidecar.index, + "slot" => slot, + "error" => ?e, + ); + Err(warp_utils::reject::custom_bad_request(e.to_string())) + } + } + }) + .collect::, Rejection>>()?; + + let gossip_verified_data_columns = data_column_sidecars + .into_iter() + .map(|data_column_sidecar| { + let column_index = data_column_sidecar.index as usize; + let subnet = + DataColumnSubnetId::from_column_index::(column_index, &chain.spec); + let gossip_verified_column = + GossipVerifiedDataColumn::new(data_column_sidecar, subnet.into(), &chain); + + match gossip_verified_column { + Ok(blob) => Ok(Some(blob)), + Err(GossipDataColumnError::PriorKnown { proposer, .. }) => { + // Log the error but do not abort publication, we may need to publish the block + // or some of the other data columns if the block & data columns are only + // partially published by the other publisher. + debug!( + log, + "Data column for publication already known"; + "column_index" => column_index, + "slot" => slot, + "proposer" => proposer, + ); + Ok(None) + } + Err(e) => { + error!( + log, + "Data column for publication is gossip-invalid"; + "column_index" => column_index, + "slot" => slot, + "error" => ?e, + ); + Err(warp_utils::reject::custom_bad_request(format!("{e:?}"))) + } + } + }) + .collect::, Rejection>>()?; + + let publishable_blobs = gossip_verified_blobs + .iter() + .flatten() + .map(|b| b.clone_blob()) + .collect::>(); + + let publishable_data_columns = gossip_verified_data_columns + .iter() + .flatten() + .map(|b| b.clone_data_column()) + .collect::>(); + + let block_root = block_root.unwrap_or_else(|| { + gossip_verified_block_result.as_ref().map_or_else( + |_| block.canonical_root(), + |verified_block| verified_block.block_root, + ) + }); + let should_publish_block = gossip_verified_block_result.is_ok(); if let BroadcastValidation::Gossip = validation_level { - publish_block( + publish_block_p2p( block.clone(), - blobs_opt.clone(), - data_cols_opt.clone(), + should_publish_block, + publishable_blobs.clone(), + publishable_data_columns.clone(), sender_clone.clone(), log.clone(), seen_timestamp, @@ -223,71 +334,80 @@ pub async fn publish_block Ok(()), - BroadcastValidation::Consensus => publish_block( - block_clone, - blobs_opt, - data_cols_opt, - sender_clone, - log_clone, - seen_timestamp, - ), - BroadcastValidation::ConsensusAndEquivocation => { - check_slashable( - &chain_clone, - &blobs_opt, - block_root, - &block_clone, - &log_clone, - )?; - publish_block( - block_clone, - blobs_opt, - data_cols_opt, - sender_clone, - log_clone, + let publish_fn_completed = Arc::new(AtomicBool::new(false)); + let block_to_publish = block.clone(); + let publish_fn = || { + match validation_level { + BroadcastValidation::Gossip => (), + BroadcastValidation::Consensus => publish_block_p2p( + block_to_publish.clone(), + should_publish_block, + publishable_blobs.clone(), + publishable_data_columns.clone(), + sender_clone.clone(), + log.clone(), seen_timestamp, - ) - } + )?, + BroadcastValidation::ConsensusAndEquivocation => { + check_slashable(&chain, block_root, &block_to_publish, &log)?; + publish_block_p2p( + block_to_publish.clone(), + should_publish_block, + publishable_blobs.clone(), + publishable_data_columns.clone(), + sender_clone.clone(), + log.clone(), + seen_timestamp, + )?; + } + }; + publish_fn_completed.store(true, Ordering::SeqCst); + Ok(()) }; - if let Some(gossip_verified_blobs) = gossip_verified_blobs { - for blob in gossip_verified_blobs { - if let Err(e) = Box::pin(chain.process_gossip_blob(blob)).await { - let msg = format!("Invalid blob: {e}"); - return if let BroadcastValidation::Gossip = validation_level { - Err(warp_utils::reject::broadcast_without_import(msg)) - } else { - error!( - log, - "Invalid blob provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::custom_bad_request(msg)) - }; - } + for blob in gossip_verified_blobs.into_iter().flatten() { + // Importing the blobs could trigger block import and network publication in the case + // where the block was already seen on gossip. + if let Err(e) = Box::pin(chain.process_gossip_blob(blob, &publish_fn)).await { + let msg = format!("Invalid blob: {e}"); + return if let BroadcastValidation::Gossip = validation_level { + Err(warp_utils::reject::broadcast_without_import(msg)) + } else { + error!( + log, + "Invalid blob provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::custom_bad_request(msg)) + }; } } - if let Some(gossip_verified_data_columns) = gossip_verified_data_columns { - let custody_columns_indices = &network_globals.custody_columns; - - let custody_columns = gossip_verified_data_columns + if gossip_verified_data_columns + .iter() + .map(Option::is_some) + .count() + > 0 + { + let sampling_columns_indices = &network_globals.sampling_columns; + let sampling_columns = gossip_verified_data_columns .into_iter() - .filter(|data_column| custody_columns_indices.contains(&data_column.index())) + .flatten() + .filter(|data_column| sampling_columns_indices.contains(&data_column.index())) .collect(); - if let Err(e) = Box::pin(chain.process_gossip_data_columns(custody_columns)).await { + // Importing the columns could trigger block import and network publication in the case + // where the block was already seen on gossip. + if let Err(e) = + Box::pin(chain.process_gossip_data_columns(sampling_columns, publish_fn)).await + { let msg = format!("Invalid data column: {e}"); return if let BroadcastValidation::Gossip = validation_level { Err(warp_utils::reject::broadcast_without_import(msg)) } else { error!( log, - "Invalid blob provided to HTTP API"; + "Invalid data column during block publication"; "reason" => &msg ); Err(warp_utils::reject::custom_bad_request(msg)) @@ -295,23 +415,117 @@ pub async fn publish_block { + match gossip_verified_block_result { + Ok(gossip_verified_block) => { + let import_result = Box::pin(chain.process_block( + block_root, + gossip_verified_block, + NotifyExecutionLayer::Yes, + BlockImportSource::HttpApi, + publish_fn, + )) + .await; + post_block_import_logging_and_response( + import_result, + validation_level, + block, + is_locally_built_block, + seen_timestamp, + &chain, + &log, + ) + .await + } + Err(BlockError::DuplicateFullyImported(root)) => { + if publish_fn_completed.load(Ordering::SeqCst) { + post_block_import_logging_and_response( + Ok(AvailabilityProcessingStatus::Imported(root)), + validation_level, + block, + is_locally_built_block, + seen_timestamp, + &chain, + &log, + ) + .await + } else { + // None of the components provided in this HTTP request were new, so this was an + // entirely redundant duplicate request. Return a status code indicating this, + // which can be overridden based on config. + Ok(warp::reply::with_status( + warp::reply::json(&ErrorMessage { + code: duplicate_status_code.as_u16(), + message: "duplicate block".to_string(), + stacktraces: vec![], + }), + duplicate_status_code, + ) + .into_response()) + } + } + Err(BlockError::DuplicateImportStatusUnknown(root)) => { + debug!( + log, + "Block previously seen"; + "block_root" => ?root, + "slot" => block.slot(), + ); + let import_result = Box::pin(chain.process_block( + block_root, + block.clone(), + NotifyExecutionLayer::Yes, + BlockImportSource::HttpApi, + publish_fn, + )) + .await; + post_block_import_logging_and_response( + import_result, + validation_level, + block, + is_locally_built_block, + seen_timestamp, + &chain, + &log, + ) + .await + } + Err(e) => { + warn!( + log, + "Not publishing block - not gossip verified"; + "slot" => slot, + "error" => %e + ); + Err(warp_utils::reject::custom_bad_request(e.to_string())) + } + } +} + +async fn post_block_import_logging_and_response( + result: Result, + validation_level: BroadcastValidation, + block: Arc>, + is_locally_built_block: bool, + seen_timestamp: Duration, + chain: &Arc>, + log: &Logger, +) -> Result { + match result { + // The `DuplicateFullyImported` case here captures the case where the block finishes + // being imported after gossip verification. It could be that it finished imported as a + // result of the block being imported from gossip, OR it could be that it finished importing + // after processing of a gossip blob. In the latter case we MUST run fork choice to + // re-compute the head. + Ok(AvailabilityProcessingStatus::Imported(root)) + | Err(BlockError::DuplicateFullyImported(root)) => { + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); info!( log, "Valid block from HTTP API"; "block_delay" => ?delay, - "root" => format!("{}", root), - "proposer_index" => proposer_index, - "slot" =>slot, + "root" => %root, + "proposer_index" => block.message().proposer_index(), + "slot" => block.slot(), ); // Notify the validator monitor. @@ -330,7 +544,7 @@ pub async fn publish_block &msg + "reason" => ?e, ); Err(warp_utils::reject::custom_bad_request(format!( "Invalid block: {e}" @@ -385,7 +598,7 @@ pub async fn publish_blinded_block( network_globals: Arc>, ) -> Result { let block_root = blinded_block.canonical_root(); - let full_block: ProvenancedBlock> = + let full_block = reconstruct_block(chain.clone(), block_root, blinded_block, log.clone()).await?; publish_block::( Some(block_root), @@ -408,7 +621,7 @@ pub async fn reconstruct_block( block_root: Hash256, block: Arc>, log: Logger, -) -> Result>, Rejection> { +) -> Result>>, Rejection> { let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) @@ -474,14 +687,17 @@ pub async fn reconstruct_block( match full_payload_opt { // A block without a payload is pre-merge and we consider it locally // built. - None => into_full_block_and_blobs(block, None).map(ProvenancedBlock::local), + None => block + .try_into_full_block(None) + .ok_or("Failed to build full block with payload".to_string()) + .map(|full_block| ProvenancedBlock::local(Arc::new(full_block), None)), Some(ProvenancedPayload::Local(full_payload_contents)) => { - into_full_block_and_blobs(block, Some(full_payload_contents)) - .map(ProvenancedBlock::local) + into_full_block_and_blobs::(block, full_payload_contents) + .map(|(block, blobs)| ProvenancedBlock::local(block, blobs)) } Some(ProvenancedPayload::Builder(full_payload_contents)) => { - into_full_block_and_blobs(block, Some(full_payload_contents)) - .map(ProvenancedBlock::builder) + into_full_block_and_blobs::(block, full_payload_contents) + .map(|(block, blobs)| ProvenancedBlock::builder(block, blobs)) } } .map_err(|e| { @@ -540,28 +756,11 @@ fn late_block_logging>( /// Check if any of the blobs or the block are slashable. Returns `BlockError::Slashable` if so. fn check_slashable( chain_clone: &BeaconChain, - blobs_opt: &Option>, block_root: Hash256, block_clone: &SignedBeaconBlock>, log_clone: &Logger, ) -> Result<(), BlockError> { let slashable_cache = chain_clone.observed_slashable.read(); - if let Some(blobs) = blobs_opt.as_ref() { - blobs.iter().try_for_each(|blob| { - if slashable_cache - .is_slashable(blob.slot(), blob.block_proposer_index(), blob.block_root()) - .map_err(|e| BlockError::BeaconChainError(e.into()))? - { - warn!( - log_clone, - "Not publishing equivocating blob"; - "slot" => block_clone.slot() - ); - return Err(BlockError::Slashable); - } - Ok(()) - })?; - }; if slashable_cache .is_slashable( block_clone.slot(), @@ -579,3 +778,38 @@ fn check_slashable( } Ok(()) } + +/// Converting from a `SignedBlindedBeaconBlock` into a full `SignedBlockContents`. +#[allow(clippy::type_complexity)] +pub fn into_full_block_and_blobs( + blinded_block: SignedBlindedBeaconBlock, + maybe_full_payload_contents: FullPayloadContents, +) -> Result<(Arc>, UnverifiedBlobs), String> { + match maybe_full_payload_contents { + // This variant implies a pre-deneb block + FullPayloadContents::Payload(execution_payload) => { + let signed_block = blinded_block + .try_into_full_block(Some(execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; + Ok((Arc::new(signed_block), None)) + } + // This variant implies a post-deneb block + FullPayloadContents::PayloadAndBlobs(payload_and_blobs) => { + let ExecutionPayloadAndBlobs { + execution_payload, + blobs_bundle, + } = payload_and_blobs; + let signed_block = blinded_block + .try_into_full_block(Some(execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; + + let BlobsBundle { + commitments: _, + proofs, + blobs, + } = blobs_bundle; + + Ok((Arc::new(signed_block), Some((proofs, blobs)))) + } + } +} diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index dcd494a880f..7b48d64e36f 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -16,7 +16,7 @@ use lighthouse_network::{ }, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, - ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager, + ConnectedPoint, Enr, NetworkConfig, NetworkGlobals, PeerId, PeerManager, }; use logging::test_logger; use network::{NetworkReceivers, NetworkSenders}; @@ -61,7 +61,8 @@ type Mutator = BoxedMutator, MemoryStore>; impl InteractiveTester { pub async fn new(spec: Option, validator_count: usize) -> Self { - Self::new_with_initializer_and_mutator(spec, validator_count, None, None).await + Self::new_with_initializer_and_mutator(spec, validator_count, None, None, Config::default()) + .await } pub async fn new_with_initializer_and_mutator( @@ -69,9 +70,10 @@ impl InteractiveTester { validator_count: usize, initializer: Option>, mutator: Option>, + config: Config, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) - .spec_or_default(spec) + .spec_or_default(spec.map(Arc::new)) .logger(test_logger()) .mock_execution_layer(); @@ -99,8 +101,9 @@ impl InteractiveTester { listening_socket, network_rx, .. - } = create_api_server( + } = create_api_server_with_config( harness.chain.clone(), + config, &harness.runtime, harness.logger().clone(), ) @@ -131,6 +134,15 @@ pub async fn create_api_server( chain: Arc>, test_runtime: &TestRuntime, log: Logger, +) -> ApiServer> { + create_api_server_with_config(chain, Config::default(), test_runtime, log).await +} + +pub async fn create_api_server_with_config( + chain: Arc>, + http_config: Config, + test_runtime: &TestRuntime, + log: Logger, ) -> ApiServer> { // Use port 0 to allocate a new unused port. let port = 0; @@ -145,12 +157,14 @@ pub async fn create_api_server( }); let enr_key = CombinedKey::generate_secp256k1(); let enr = Enr::builder().build(&enr_key).unwrap(); + let network_config = Arc::new(NetworkConfig::default()); let network_globals = Arc::new(NetworkGlobals::new( enr.clone(), meta_data, vec![], false, &log, + network_config, chain.spec.clone(), )); @@ -218,12 +232,14 @@ pub async fn create_api_server( .unwrap(); let ctx = Arc::new(Context { + // Override several config fields with defaults. If these need to be tweaked in future + // we could remove these overrides. config: Config { enabled: true, listen_port: port, data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), enable_light_client_server: true, - ..Config::default() + ..http_config }, chain: Some(chain), network_senders: Some(network_senders), diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 59cdbb1c99e..f55983ec66a 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,13 +1,16 @@ +use beacon_chain::blob_verification::GossipVerifiedBlob; use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, - GossipVerifiedBlock, IntoGossipVerifiedBlockContents, + GossipVerifiedBlock, IntoGossipVerifiedBlock, }; use eth2::reqwest::StatusCode; use eth2::types::{BroadcastValidation, PublishBlockRequest}; use http_api::test_utils::InteractiveTester; -use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; +use http_api::{publish_blinded_block, publish_block, reconstruct_block, Config, ProvenancedBlock}; use std::sync::Arc; -use types::{Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot}; +use types::{ + BlobSidecar, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot, +}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; @@ -81,7 +84,7 @@ pub async fn gossip_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. @@ -266,7 +269,7 @@ pub async fn consensus_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. @@ -360,10 +363,9 @@ pub async fn consensus_partial_pass_only_consensus() { ); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) - .into_gossip_verified_block(&tester.harness.chain); - assert!(gossip_block_contents_b.is_ok()); - let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); + let gossip_block_b = block_b.into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = block_a.into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_a.is_err()); /* submit `block_b` which should induce equivocation */ @@ -372,7 +374,7 @@ pub async fn consensus_partial_pass_only_consensus() { let publication_result = publish_block( None, - ProvenancedBlock::local(gossip_block_contents_b.unwrap()), + ProvenancedBlock::local(gossip_block_b.unwrap(), blobs_b), tester.harness.chain.clone(), &channel.0, test_logger, @@ -382,7 +384,7 @@ pub async fn consensus_partial_pass_only_consensus() { ) .await; - assert!(publication_result.is_ok()); + assert!(publication_result.is_ok(), "{publication_result:?}"); assert!(tester .harness .chain @@ -481,7 +483,7 @@ pub async fn equivocation_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -555,10 +557,7 @@ pub async fn equivocation_consensus_early_equivocation() { let error_response: eth2::Error = response.err().unwrap(); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error( - error_response, - "BAD_REQUEST: BlockError(Slashable)".to_string(), - ); + assert_server_message_error(error_response, "BAD_REQUEST: Slashable".to_string()); } /// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -642,7 +641,7 @@ pub async fn equivocation_consensus_late_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block_a, blobs_a), mut state_after_a) = + let ((block_a, _blobs_a), mut state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await; let ((block_b, blobs_b), mut state_after_b) = tester.harness.make_block(state_a, slot_b).await; @@ -657,19 +656,18 @@ pub async fn equivocation_consensus_late_equivocation() { ); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) - .into_gossip_verified_block(&tester.harness.chain); - assert!(gossip_block_contents_b.is_ok()); - let gossip_block_contents_a = PublishBlockRequest::new(block_a, blobs_a) - .into_gossip_verified_block(&tester.harness.chain); - assert!(gossip_block_contents_a.is_err()); + let gossip_block_b = block_b.into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_b.is_ok()); + + let gossip_block_a = block_a.into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); let network_globals = tester.ctx.network_globals.clone().unwrap(); let publication_result = publish_block( None, - ProvenancedBlock::local(gossip_block_contents_b.unwrap()), + ProvenancedBlock::local(gossip_block_b.unwrap(), blobs_b), tester.harness.chain, &channel.0, test_logger, @@ -686,8 +684,8 @@ pub async fn equivocation_consensus_late_equivocation() { assert!(publication_error.find::().is_some()); assert_eq!( - *publication_error.find::().unwrap().0, - "proposal for this slot and proposer has already been seen".to_string() + publication_error.find::().unwrap().0, + "proposal for this slot and proposer has already been seen" ); } @@ -783,7 +781,7 @@ pub async fn blinded_gossip_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. @@ -961,7 +959,7 @@ pub async fn blinded_consensus_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. @@ -1099,7 +1097,7 @@ pub async fn blinded_equivocation_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -1169,10 +1167,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { let error_response: eth2::Error = response.err().unwrap(); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error( - error_response, - "BAD_REQUEST: BlockError(Slashable)".to_string(), - ); + assert_server_message_error(error_response, "BAD_REQUEST: Slashable".to_string()); } /// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -1295,19 +1290,17 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { .unwrap(); let inner_block_a = match unblinded_block_a { - ProvenancedBlock::Local(a, _) => a, - ProvenancedBlock::Builder(a, _) => a, + ProvenancedBlock::Local(a, _, _) => a, + ProvenancedBlock::Builder(a, _, _) => a, }; let inner_block_b = match unblinded_block_b { - ProvenancedBlock::Local(b, _) => b, - ProvenancedBlock::Builder(b, _) => b, + ProvenancedBlock::Local(b, _, _) => b, + ProvenancedBlock::Builder(b, _, _) => b, }; - let gossip_block_b = - GossipVerifiedBlock::new(inner_block_b.clone().deconstruct().0, &tester.harness.chain); + let gossip_block_b = GossipVerifiedBlock::new(inner_block_b, &tester.harness.chain); assert!(gossip_block_b.is_ok()); - let gossip_block_a = - GossipVerifiedBlock::new(inner_block_a.clone().deconstruct().0, &tester.harness.chain); + let gossip_block_a = GossipVerifiedBlock::new(inner_block_a, &tester.harness.chain); assert!(gossip_block_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); @@ -1374,6 +1367,438 @@ pub async fn blinded_equivocation_full_pass() { .block_is_known_to_fork_choice(&block.canonical_root())); } +/// This test checks that an HTTP POST request with the block & blobs succeeds with a 200 response +/// even if the block has already been seen on gossip without any blobs. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn block_seen_on_gossip_without_blobs() { + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; + let blobs = blobs.expect("should have some blobs"); + assert_ne!(blobs.0.len(), 0); + + // Simulate the block being seen on gossip. + block + .clone() + .into_gossip_verified_block(&tester.harness.chain) + .unwrap(); + + // It should not yet be added to fork choice because blobs have not been seen. + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); + + // Post the block *and* blobs to the HTTP API. + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block.clone(), Some(blobs)), + validation_level, + ) + .await; + + // This should result in the block being fully imported. + response.unwrap(); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that an HTTP POST request with the block & blobs succeeds with a 200 response +/// even if the block has already been seen on gossip without all blobs. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn block_seen_on_gossip_with_some_blobs() { + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; + let blobs = blobs.expect("should have some blobs"); + assert!( + blobs.0.len() >= 2, + "need at least 2 blobs for partial reveal" + ); + + let partial_kzg_proofs = vec![blobs.0.get(0).unwrap().clone()]; + let partial_blobs = vec![blobs.1.get(0).unwrap().clone()]; + + // Simulate the block being seen on gossip. + block + .clone() + .into_gossip_verified_block(&tester.harness.chain) + .unwrap(); + + // Simulate some of the blobs being seen on gossip. + for (i, (kzg_proof, blob)) in partial_kzg_proofs + .into_iter() + .zip(partial_blobs) + .enumerate() + { + let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); + let gossip_blob = + GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); + tester + .harness + .chain + .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .await + .unwrap(); + } + + // It should not yet be added to fork choice because all blobs have not been seen. + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); + + // Post the block *and* all blobs to the HTTP API. + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block.clone(), Some(blobs)), + validation_level, + ) + .await; + + // This should result in the block being fully imported. + response.unwrap(); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that an HTTP POST request with the block & blobs succeeds with a 200 response +/// even if the blobs have already been seen on gossip. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blobs_seen_on_gossip_without_block() { + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; + let (kzg_proofs, blobs) = blobs.expect("should have some blobs"); + + // Simulate the blobs being seen on gossip. + for (i, (kzg_proof, blob)) in kzg_proofs + .clone() + .into_iter() + .zip(blobs.clone()) + .enumerate() + { + let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); + let gossip_blob = + GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); + tester + .harness + .chain + .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .await + .unwrap(); + } + + // It should not yet be added to fork choice because the block has not been seen. + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); + + // Post the block *and* all blobs to the HTTP API. + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block.clone(), Some((kzg_proofs, blobs))), + validation_level, + ) + .await; + + // This should result in the block being fully imported. + response.unwrap(); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that an HTTP POST request with the block succeeds with a 200 response +/// if just the blobs have already been seen on gossip. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; + let (kzg_proofs, blobs) = blobs.expect("should have some blobs"); + assert!(!blobs.is_empty()); + + // Simulate the blobs being seen on gossip. + for (i, (kzg_proof, blob)) in kzg_proofs + .clone() + .into_iter() + .zip(blobs.clone()) + .enumerate() + { + let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); + let gossip_blob = + GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); + tester + .harness + .chain + .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .await + .unwrap(); + } + + // It should not yet be added to fork choice because the block has not been seen. + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); + + // Post just the block to the HTTP API (blob lists are empty). + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2( + &PublishBlockRequest::new( + block.clone(), + Some((Default::default(), Default::default())), + ), + validation_level, + ) + .await; + + // This should result in the block being fully imported. + response.unwrap(); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn slashable_blobs_seen_on_gossip_cause_failure() { + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block_a, blobs_a), _) = tester.harness.make_block(state_a.clone(), slot_b).await; + let ((block_b, blobs_b), _) = tester.harness.make_block(state_a, slot_b).await; + let (kzg_proofs_a, blobs_a) = blobs_a.expect("should have some blobs"); + let (kzg_proofs_b, blobs_b) = blobs_b.expect("should have some blobs"); + + // Simulate the blobs of block B being seen on gossip. + for (i, (kzg_proof, blob)) in kzg_proofs_b.into_iter().zip(blobs_b).enumerate() { + let sidecar = Arc::new(BlobSidecar::new(i, blob, &block_b, kzg_proof).unwrap()); + let gossip_blob = + GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); + tester + .harness + .chain + .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .await + .unwrap(); + } + + // It should not yet be added to fork choice because block B has not been seen. + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block_b.canonical_root())); + + // Post block A *and* all its blobs to the HTTP API. + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block_a.clone(), Some((kzg_proofs_a, blobs_a))), + validation_level, + ) + .await; + + // This should not result in block A being fully imported. + response.unwrap_err(); + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root())); +} + +/// This test checks that an HTTP POST request with a duplicate block & blobs results in the +/// `duplicate_status_code` being returned. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn duplicate_block_status_code() { + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let duplicate_block_status_code = StatusCode::IM_A_TEAPOT; + let tester = InteractiveTester::::new_with_initializer_and_mutator( + Some(spec), + validator_count, + None, + None, + Config { + duplicate_block_status_code, + ..Config::default() + }, + ) + .await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; + let (kzg_proofs, blobs) = blobs.expect("should have some blobs"); + + // Post the block blobs to the HTTP API once. + let block_request = PublishBlockRequest::new(block.clone(), Some((kzg_proofs, blobs))); + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block_request, validation_level) + .await; + + // This should result in the block being fully imported. + response.unwrap(); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); + + // Post again. + let duplicate_response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block_request, validation_level) + .await; + let err = duplicate_response.unwrap_err(); + assert_eq!(err.status().unwrap(), duplicate_block_status_code); +} + fn assert_server_message_error(error_response: eth2::Error, expected_message: String) { let eth2::Error::ServerMessage(err) = error_response else { panic!("Not a eth2::Error::ServerMessage"); diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index b5b3edf892e..8cb6053e9ff 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -386,6 +386,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { .genesis_state_ephemeral_store(genesis_state) })), None, + Default::default(), ) .await; let harness = &tester.harness; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 5034492e250..c3ed3347821 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -72,6 +72,7 @@ async fn state_by_root_pruned_from_fork_choice() { }) })), None, + Default::default(), ) .await; @@ -427,6 +428,7 @@ pub async fn proposer_boost_re_org_test( DisallowedReOrgOffsets::new::(disallowed_offsets).unwrap(), ) })), + Default::default(), ) .await; let harness = &tester.harness; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 058d3ff01ac..f4c7a4fdb44 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -122,7 +122,7 @@ impl ApiTester { } pub async fn new_from_config(config: ApiTesterConfig) -> Self { - let spec = config.spec; + let spec = Arc::new(config.spec); let mut harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index f835d13fb66..97ba72a2ac6 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -14,7 +14,7 @@ beacon_chain = { workspace = true } store = { workspace = true } lighthouse_network = { workspace = true } slot_clock = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } lighthouse_version = { workspace = true } warp_utils = { workspace = true } malloc_utils = { workspace = true } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index d68efff4320..d751c51e4c9 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,8 +1,8 @@ use crate::Context; use beacon_chain::BeaconChainTypes; -use lighthouse_metrics::TextEncoder; use lighthouse_network::prometheus_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; +use metrics::TextEncoder; pub fn gather_prometheus_metrics( ctx: &Context, @@ -17,13 +17,13 @@ pub fn gather_prometheus_metrics( // - Statically updated: things which are only updated at the time of the scrape (used where we // can avoid cluttering up code with metrics calls). // - // The `lighthouse_metrics` crate has a `DEFAULT_REGISTRY` global singleton + // The `metrics` crate has a `DEFAULT_REGISTRY` global singleton // which keeps the state of all the metrics. Dynamically updated things will already be // up-to-date in the registry (because they update themselves) however statically updated // things need to be "scraped". // // We proceed by, first updating all the static metrics using `scrape_for_metrics(..)`. Then, - // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into + // using `metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into // a string that can be returned via HTTP. if let Some(beacon_chain) = ctx.chain.as_ref() { @@ -48,7 +48,7 @@ pub fn gather_prometheus_metrics( } encoder - .encode_utf8(&lighthouse_metrics::gather(), &mut buffer) + .encode_utf8(&metrics::gather(), &mut buffer) .unwrap(); // encode gossipsub metrics also if they exist if let Some(registry) = ctx.gossipsub_registry.as_ref() { diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index b0f5b9a5e1c..c4fad997025 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -21,7 +21,7 @@ futures = { workspace = true } error-chain = { workspace = true } dirs = { workspace = true } fnv = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } smallvec = { workspace = true } tokio-io-timeout = "1" lru = { workspace = true } diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index 996f701e890..c50e76e7f2c 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -1812,9 +1812,6 @@ where // Calculate the message id on the transformed data. let msg_id = self.config.message_id(&message); - // Broadcast IDONTWANT messages. - self.send_idontwant(&raw_message, &msg_id, propagation_source); - // Check the validity of the message // Peers get penalized if this message is invalid. We don't add it to the duplicate cache // and instead continually penalize peers that repeatedly send this message. @@ -1830,6 +1827,12 @@ where self.mcache.observe_duplicate(&msg_id, propagation_source); return; } + + // Broadcast IDONTWANT messages + if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() { + self.send_idontwant(&raw_message, &msg_id, propagation_source); + } + tracing::debug!( message=%msg_id, "Put message in duplicate_cache and resolve promises" @@ -3348,6 +3351,8 @@ where }; if let Some(metrics) = self.metrics.as_mut() { metrics.register_idontwant(message_ids.len()); + let idontwant_size = message_ids.iter().map(|id| id.0.len()).sum(); + metrics.register_idontwant_bytes(idontwant_size); } for message_id in message_ids { peer.dont_send.insert(message_id, Instant::now()); diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs index 00de3ba2dbc..62f026b568a 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs @@ -5266,13 +5266,14 @@ fn sends_idontwant() { let message = RawMessage { source: Some(peers[1]), - data: vec![12], + data: vec![12u8; 1024], sequence_number: Some(0), topic: topic_hashes[0].clone(), signature: None, key: None, validated: true, }; + gs.handle_received_message(message.clone(), &local_id); assert_eq!( receivers @@ -5292,6 +5293,48 @@ fn sends_idontwant() { ); } +#[test] +fn doesnt_sends_idontwant_for_lower_message_size() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(5) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + + gs.handle_received_message(message.clone(), &local_id); + assert_eq!( + receivers + .into_iter() + .fold(0, |mut idontwants, (peer_id, c)| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() { + assert_ne!(peer_id, peers[1]); + idontwants += 1; + } + } + idontwants + }), + 0, + "IDONTWANT was sent" + ); +} + /// Test that a node doesn't send IDONTWANT messages to the mesh peers /// that don't run Gossipsub v1.2. #[test] @@ -5316,6 +5359,7 @@ fn doesnt_send_idontwant() { key: None, validated: true, }; + gs.handle_received_message(message.clone(), &local_id); assert_eq!( receivers diff --git a/beacon_node/lighthouse_network/gossipsub/src/config.rs b/beacon_node/lighthouse_network/gossipsub/src/config.rs index 1296e614c89..eb8dd432a33 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/config.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/config.rs @@ -98,6 +98,7 @@ pub struct Config { connection_handler_queue_len: usize, connection_handler_publish_duration: Duration, connection_handler_forward_duration: Duration, + idontwant_message_size_threshold: usize, } impl Config { @@ -370,6 +371,16 @@ impl Config { pub fn forward_queue_duration(&self) -> Duration { self.connection_handler_forward_duration } + + // The message size threshold for which IDONTWANT messages are sent. + // Sending IDONTWANT messages for small messages can have a negative effect to the overall + // traffic and CPU load. This acts as a lower bound cutoff for the message size to which + // IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2 + // (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md#idontwant-message) + // default is 1kB + pub fn idontwant_message_size_threshold(&self) -> usize { + self.idontwant_message_size_threshold + } } impl Default for Config { @@ -440,6 +451,7 @@ impl Default for ConfigBuilder { connection_handler_queue_len: 5000, connection_handler_publish_duration: Duration::from_secs(5), connection_handler_forward_duration: Duration::from_millis(1000), + idontwant_message_size_threshold: 1000, }, invalid_protocol: false, } @@ -825,6 +837,17 @@ impl ConfigBuilder { self } + // The message size threshold for which IDONTWANT messages are sent. + // Sending IDONTWANT messages for small messages can have a negative effect to the overall + // traffic and CPU load. This acts as a lower bound cutoff for the message size to which + // IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2 + // (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md#idontwant-message) + // default is 1kB + pub fn idontwant_message_size_threshold(&mut self, size: usize) -> &mut Self { + self.config.idontwant_message_size_threshold = size; + self + } + /// Constructs a [`Config`] from the given configuration and validates the settings. pub fn build(&self) -> Result { // check all constraints on config @@ -895,6 +918,10 @@ impl std::fmt::Debug for Config { "published_message_ids_cache_time", &self.published_message_ids_cache_time, ); + let _ = builder.field( + "idontwant_message_size_threhold", + &self.idontwant_message_size_threshold, + ); builder.finish() } } diff --git a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs index 7e1cdac18ba..a4ac389a748 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs @@ -185,6 +185,9 @@ pub(crate) struct Metrics { /// The number of msg_id's we have received in every IDONTWANT control message. idontwant_msgs_ids: Counter, + /// The number of bytes we have received in every IDONTWANT control message. + idontwant_bytes: Counter, + /// The size of the priority queue. priority_queue_size: Histogram, /// The size of the non-priority queue. @@ -338,6 +341,16 @@ impl Metrics { metric }; + let idontwant_bytes = { + let metric = Counter::default(); + registry.register( + "idontwant_bytes", + "The total bytes we have received an IDONTWANT control messages", + metric.clone(), + ); + metric + }; + let memcache_misses = { let metric = Counter::default(); registry.register( @@ -390,6 +403,7 @@ impl Metrics { memcache_misses, topic_iwant_msgs, idontwant_msgs, + idontwant_bytes, idontwant_msgs_ids, priority_queue_size, non_priority_queue_size, @@ -589,6 +603,11 @@ impl Metrics { } } + /// Register receiving the total bytes of an IDONTWANT control message. + pub(crate) fn register_idontwant_bytes(&mut self, bytes: usize) { + self.idontwant_bytes.inc_by(bytes as u64); + } + /// Register receiving an IDONTWANT msg for this topic. pub(crate) fn register_idontwant(&mut self, msgs: usize) { self.idontwant_msgs.inc(); diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 7c95977140e..d70e50b1da3 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -19,6 +19,7 @@ pub const DEFAULT_IPV4_ADDRESS: Ipv4Addr = Ipv4Addr::UNSPECIFIED; pub const DEFAULT_TCP_PORT: u16 = 9000u16; pub const DEFAULT_DISC_PORT: u16 = 9000u16; pub const DEFAULT_QUIC_PORT: u16 = 9001u16; +pub const DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD: usize = 1000usize; /// The maximum size of gossip messages. pub fn gossip_max_size(is_merge_enabled: bool, gossip_max_size: usize) -> usize { @@ -141,6 +142,10 @@ pub struct Config { /// Configuration for the inbound rate limiter (requests received by this node). pub inbound_rate_limiter_config: Option, + + /// Configuration for the minimum message size for which IDONTWANT messages are send in the mesh. + /// Lower the value reduces the optimization effect of the IDONTWANT messages. + pub idontwant_message_size_threshold: usize, } impl Config { @@ -352,6 +357,7 @@ impl Default for Config { outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, + idontwant_message_size_threshold: DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD, } } } @@ -433,6 +439,7 @@ pub fn gossipsub_config( gossipsub_config_params: GossipsubConfigParams, seconds_per_slot: u64, slots_per_epoch: u64, + idontwant_message_size_threshold: usize, ) -> gossipsub::Config { fn prefix( prefix: [u8; 4], @@ -440,28 +447,22 @@ pub fn gossipsub_config( fork_context: Arc, ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); - match fork_context.current_fork() { - ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb - | ForkName::Electra => { - let topic_len_bytes = topic_bytes.len().to_le_bytes(); - let mut vec = Vec::with_capacity( - prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), - ); - vec.extend_from_slice(&prefix); - vec.extend_from_slice(&topic_len_bytes); - vec.extend_from_slice(topic_bytes); - vec.extend_from_slice(&message.data); - vec - } - ForkName::Base => { - let mut vec = Vec::with_capacity(prefix.len() + message.data.len()); - vec.extend_from_slice(&prefix); - vec.extend_from_slice(&message.data); - vec - } + + if fork_context.current_fork().altair_enabled() { + let topic_len_bytes = topic_bytes.len().to_le_bytes(); + let mut vec = Vec::with_capacity( + prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), + ); + vec.extend_from_slice(&prefix); + vec.extend_from_slice(&topic_len_bytes); + vec.extend_from_slice(topic_bytes); + vec.extend_from_slice(&message.data); + vec + } else { + let mut vec = Vec::with_capacity(prefix.len() + message.data.len()); + vec.extend_from_slice(&prefix); + vec.extend_from_slice(&message.data); + vec } } let message_domain_valid_snappy = gossipsub_config_params.message_domain_valid_snappy; @@ -504,6 +505,7 @@ pub fn gossipsub_config( .duplicate_cache_time(duplicate_cache_time) .message_id_fn(gossip_message_id) .allow_self_origin(true) + .idontwant_message_size_threshold(idontwant_message_size_threshold) .build() .expect("valid gossipsub configuration") } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index e1cea3153ac..d57c67bacb8 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1215,10 +1215,11 @@ mod tests { } async fn build_discovery() -> Discovery { - let spec = ChainSpec::default(); + let spec = Arc::new(ChainSpec::default()); let keypair = secp256k1::Keypair::generate(); let mut config = NetworkConfig::default(); config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); + let config = Arc::new(config); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default(), &spec).unwrap(); let log = build_log(slog::Level::Debug, false); @@ -1232,6 +1233,7 @@ mod tests { vec![], false, &log, + config.clone(), spec.clone(), ); let keypair = keypair.into(); diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 5c12290b970..ced803add80 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -122,6 +122,6 @@ pub use peer_manager::{ ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; // pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; -pub use service::api_types::{PeerRequestId, Request, Response}; +pub use service::api_types::{PeerRequestId, Response}; pub use service::utils::*; pub use service::{Gossipsub, NetworkEvent}; diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index c3f64a5a1f4..15445c7d645 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static NAT_OPEN: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 320bbc4d638..c1e72d250ff 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -2,11 +2,9 @@ use crate::discovery::enr_ext::EnrExt; use crate::discovery::peer_id_to_node_id; -use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; +use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RpcErrorResponse}; use crate::service::TARGET_SUBNET_PEERS; -use crate::{error, metrics, Gossipsub}; -use crate::{NetworkGlobals, PeerId}; -use crate::{Subnet, SubnetDiscovery}; +use crate::{error, metrics, Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; @@ -340,15 +338,15 @@ impl PeerManager { { // This should be updated with the peer dialing. In fact created once the peer is // dialed + let peer_id = enr.peer_id(); if let Some(min_ttl) = min_ttl { self.network_globals .peers .write() - .update_min_ttl(&enr.peer_id(), min_ttl); + .update_min_ttl(&peer_id, min_ttl); } - let peer_id = enr.peer_id(); if self.dial_peer(enr) { - debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); + debug!(self.log, "Added discovered ENR peer to dial queue"; "peer_id" => %peer_id); to_dial_peers += 1; } } @@ -449,18 +447,6 @@ impl PeerManager { self.network_globals.peers.read().is_connected(peer_id) } - /// Reports whether the peer limit is reached in which case we stop allowing new incoming - /// connections. - pub fn peer_limit_reached(&self, count_dialing: bool) -> bool { - if count_dialing { - // This is an incoming connection so limit by the standard max peers - self.network_globals.connected_or_dialing_peers() >= self.max_peers() - } else { - // We dialed this peer, allow up to max_outbound_dialing_peers - self.network_globals.connected_peers() >= self.max_outbound_dialing_peers() - } - } - /// Updates `PeerInfo` with `identify` information. pub fn identify(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { @@ -528,8 +514,8 @@ impl PeerManager { PeerAction::HighToleranceError } RPCError::ErrorResponse(code, _) => match code { - RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError, - RPCResponseErrorCode::ResourceUnavailable => { + RpcErrorResponse::Unknown => PeerAction::HighToleranceError, + RpcErrorResponse::ResourceUnavailable => { // Don't ban on this because we want to retry with a block by root request. if matches!( protocol, @@ -560,9 +546,9 @@ impl PeerManager { ConnectionDirection::Incoming => return, } } - RPCResponseErrorCode::ServerError => PeerAction::MidToleranceError, - RPCResponseErrorCode::InvalidRequest => PeerAction::LowToleranceError, - RPCResponseErrorCode::RateLimited => match protocol { + RpcErrorResponse::ServerError => PeerAction::MidToleranceError, + RpcErrorResponse::InvalidRequest => PeerAction::LowToleranceError, + RpcErrorResponse::RateLimited => match protocol { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, @@ -572,6 +558,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, @@ -579,7 +566,7 @@ impl PeerManager { Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, }, - RPCResponseErrorCode::BlobsNotFoundForBlock => PeerAction::LowToleranceError, + RpcErrorResponse::BlobsNotFoundForBlock => PeerAction::LowToleranceError, }, RPCError::SSZDecodeError(_) => PeerAction::Fatal, RPCError::UnsupportedProtocol => { @@ -599,6 +586,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::MetaData => PeerAction::Fatal, Protocol::Status => PeerAction::Fatal, } @@ -620,6 +608,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::Goodbye => return, Protocol::MetaData => return, Protocol::Status => return, @@ -1452,6 +1441,7 @@ enum ConnectingType { #[cfg(test)] mod tests { use super::*; + use crate::NetworkConfig; use slog::{o, Drain}; use types::MainnetEthSpec as E; @@ -1468,15 +1458,7 @@ mod tests { } async fn build_peer_manager(target_peer_count: usize) -> PeerManager { - let config = config::Config { - target_peer_count, - discovery_enabled: false, - ..Default::default() - }; - let log = build_log(slog::Level::Debug, false); - let spec = E::default_spec(); - let globals = NetworkGlobals::new_test_globals(vec![], &log, spec); - PeerManager::new(config, Arc::new(globals), &log).unwrap() + build_peer_manager_with_trusted_peers(vec![], target_peer_count).await } async fn build_peer_manager_with_trusted_peers( @@ -1488,9 +1470,13 @@ mod tests { discovery_enabled: false, ..Default::default() }; + let network_config = Arc::new(NetworkConfig { + target_peers: target_peer_count, + ..Default::default() + }); let log = build_log(slog::Level::Debug, false); - let spec = E::default_spec(); - let globals = NetworkGlobals::new_test_globals(trusted_peers, &log, spec); + let spec = Arc::new(E::default_spec()); + let globals = NetworkGlobals::new_test_globals(trusted_peers, &log, network_config, spec); PeerManager::new(config, Arc::new(globals), &log).unwrap() } @@ -2345,16 +2331,6 @@ mod tests { gossipsub_score: f64, } - // generate an arbitrary f64 while preventing NaN values - fn arbitrary_f64(g: &mut Gen) -> f64 { - loop { - let val = f64::arbitrary(g); - if !val.is_nan() { - return val; - } - } - } - impl Arbitrary for PeerCondition { fn arbitrary(g: &mut Gen) -> Self { let attestation_net_bitfield = { @@ -2380,9 +2356,9 @@ mod tests { outgoing: bool::arbitrary(g), attestation_net_bitfield, sync_committee_net_bitfield, - score: arbitrary_f64(g), + score: f64::arbitrary(g), trusted: bool::arbitrary(g), - gossipsub_score: arbitrary_f64(g), + gossipsub_score: f64::arbitrary(g), } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index b7fd5b5e5d7..c40f78b4b08 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -15,7 +15,6 @@ use slog::{debug, error, trace}; use types::EthSpec; use crate::discovery::enr_ext::EnrExt; -use crate::rpc::GoodbyeReason; use crate::types::SyncState; use crate::{metrics, ClearDialError}; @@ -94,26 +93,20 @@ impl NetworkBehaviour for PeerManager { } if let Some(enr) = self.peers_to_dial.pop() { - let peer_id = enr.peer_id(); - self.inject_peer_connection(&peer_id, ConnectingType::Dialing, Some(enr.clone())); - - let quic_multiaddrs = if self.quic_enabled { - let quic_multiaddrs = enr.multiaddr_quic(); - if !quic_multiaddrs.is_empty() { - debug!(self.log, "Dialing QUIC supported peer"; "peer_id"=> %peer_id, "quic_multiaddrs" => ?quic_multiaddrs); - } - quic_multiaddrs - } else { - Vec::new() - }; + self.inject_peer_connection(&enr.peer_id(), ConnectingType::Dialing, Some(enr.clone())); // Prioritize Quic connections over Tcp ones. - let multiaddrs = quic_multiaddrs - .into_iter() - .chain(enr.multiaddr_tcp()) - .collect(); + let multiaddrs = [ + self.quic_enabled + .then_some(enr.multiaddr_quic()) + .unwrap_or_default(), + enr.multiaddr_tcp(), + ] + .concat(); + + debug!(self.log, "Dialing peer"; "peer_id"=> %enr.peer_id(), "multiaddrs" => ?multiaddrs); return Poll::Ready(ToSwarm::Dial { - opts: DialOpts::peer_id(peer_id) + opts: DialOpts::peer_id(enr.peer_id()) .condition(PeerCondition::Disconnected) .addresses(multiaddrs) .build(), @@ -130,14 +123,7 @@ impl NetworkBehaviour for PeerManager { endpoint, other_established, .. - }) => { - // NOTE: We still need to handle the [`ConnectionEstablished`] because the - // [`NetworkBehaviour::handle_established_inbound_connection`] and - // [`NetworkBehaviour::handle_established_outbound_connection`] are fallible. This - // means another behaviour can kill the connection early, and we can't assume a - // peer as connected until this event is received. - self.on_connection_established(peer_id, endpoint, other_established) - } + }) => self.on_connection_established(peer_id, endpoint, other_established), FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, endpoint, @@ -206,6 +192,21 @@ impl NetworkBehaviour for PeerManager { "Connection to peer rejected: peer has a bad score", )); } + + // Check the connection limits + if self.network_globals.connected_or_dialing_peers() >= self.max_peers() + && self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .map_or(true, |peer| !peer.has_future_duty()) + { + return Err(ConnectionDenied::new( + "Connection to peer rejected: too many connections", + )); + } + Ok(ConnectionHandler) } @@ -218,13 +219,26 @@ impl NetworkBehaviour for PeerManager { _port_use: PortUse, ) -> Result, libp2p::swarm::ConnectionDenied> { trace!(self.log, "Outbound connection"; "peer_id" => %peer_id, "multiaddr" => %addr); - match self.ban_status(&peer_id) { - Some(cause) => { - error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); - Err(ConnectionDenied::new(cause)) - } - None => Ok(ConnectionHandler), + if let Some(cause) = self.ban_status(&peer_id) { + error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); + return Err(ConnectionDenied::new(cause)); } + + // Check the connection limits + if self.network_globals.connected_peers() >= self.max_outbound_dialing_peers() + && self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .map_or(true, |peer| !peer.has_future_duty()) + { + return Err(ConnectionDenied::new( + "Connection to peer rejected: too many connections", + )); + } + + Ok(ConnectionHandler) } } @@ -233,7 +247,7 @@ impl PeerManager { &mut self, peer_id: PeerId, endpoint: &ConnectedPoint, - other_established: usize, + _other_established: usize, ) { debug!(self.log, "Connection established"; "peer_id" => %peer_id, "multiaddr" => %endpoint.get_remote_address(), @@ -247,26 +261,6 @@ impl PeerManager { self.update_peer_count_metrics(); } - // Count dialing peers in the limit if the peer dialed us. - let count_dialing = endpoint.is_listener(); - // Check the connection limits - if self.peer_limit_reached(count_dialing) - && self - .network_globals - .peers - .read() - .peer_info(&peer_id) - .map_or(true, |peer| !peer.has_future_duty()) - { - // Gracefully disconnect the peer. - self.disconnect_peer(peer_id, GoodbyeReason::TooManyPeers); - return; - } - - if other_established == 0 { - self.events.push(PeerManagerEvent::MetaData(peer_id)); - } - // NOTE: We don't register peers that we are disconnecting immediately. The network service // does not need to know about these peers. match endpoint { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 08d9e5209c8..d2effd4d037 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,8 +1,8 @@ use crate::discovery::enr::PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY; use crate::discovery::{peer_id_to_node_id, CombinedKey}; use crate::{metrics, multiaddr::Multiaddr, types::Subnet, Enr, EnrExt, Gossipsub, PeerId}; +use itertools::Itertools; use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; -use rand::seq::SliceRandom; use score::{PeerAction, ReportSource, Score, ScoreState}; use slog::{crit, debug, error, trace, warn}; use std::net::IpAddr; @@ -290,15 +290,11 @@ impl PeerDB { /// Returns a vector of all connected peers sorted by score beginning with the worst scores. /// Ties get broken randomly. pub fn worst_connected_peers(&self) -> Vec<(&PeerId, &PeerInfo)> { - let mut connected = self - .peers + self.peers .iter() .filter(|(_, info)| info.is_connected()) - .collect::>(); - - connected.shuffle(&mut rand::thread_rng()); - connected.sort_by_key(|(_, info)| info.score()); - connected + .sorted_by(|(_, info_a), (_, info_b)| info_a.score().total_cmp(info_b.score(), false)) + .collect::>() } /// Returns a vector containing peers (their ids and info), sorted by @@ -307,13 +303,11 @@ impl PeerDB { where F: Fn(&PeerInfo) -> bool, { - let mut by_status = self - .peers + self.peers .iter() .filter(|(_, info)| is_status(info)) - .collect::>(); - by_status.sort_by_key(|(_, info)| info.score()); - by_status.into_iter().rev().collect() + .sorted_by(|(_, info_a), (_, info_b)| info_a.score().total_cmp(info_b.score(), true)) + .collect::>() } /// Returns the peer with highest reputation that satisfies `is_status` @@ -324,7 +318,7 @@ impl PeerDB { self.peers .iter() .filter(|(_, info)| is_status(info)) - .max_by_key(|(_, info)| info.score()) + .max_by(|(_, info_a), (_, info_b)| info_a.score().total_cmp(info_b.score(), false)) .map(|(id, _)| id) } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index c8425fc104d..995ebf90646 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -7,6 +7,7 @@ //! The scoring algorithms are currently experimental. use crate::service::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; use serde::Serialize; +use std::cmp::Ordering; use std::sync::LazyLock; use std::time::Instant; use strum::AsRefStr; @@ -260,7 +261,7 @@ impl RealScore { } } -#[derive(PartialEq, Clone, Debug, Serialize)] +#[derive(Clone, Debug, Serialize)] pub enum Score { Max, Real(RealScore), @@ -323,21 +324,25 @@ impl Score { Self::Real(score) => score.is_good_gossipsub_peer(), } } -} - -impl Eq for Score {} -impl PartialOrd for Score { - fn partial_cmp(&self, other: &Score) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Score { - fn cmp(&self, other: &Score) -> std::cmp::Ordering { - self.score() - .partial_cmp(&other.score()) - .unwrap_or(std::cmp::Ordering::Equal) + /// Instead of implementing `Ord` for `Score`, as we are underneath dealing with f64, + /// follow std convention and impl `Score::total_cmp` similar to `f64::total_cmp`. + pub fn total_cmp(&self, other: &Score, reverse: bool) -> Ordering { + match self.score().partial_cmp(&other.score()) { + Some(v) => { + // Only reverse when none of the items is NAN, + // so that NAN's are never considered. + if reverse { + v.reverse() + } else { + v + } + } + None if self.score().is_nan() && !other.score().is_nan() => Ordering::Less, + None if !self.score().is_nan() && other.score().is_nan() => Ordering::Greater, + // Both are NAN. + None => Ordering::Equal, + } } } diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 224fb8a5f71..9bdecab70b1 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -2,7 +2,7 @@ use crate::rpc::methods::*; use crate::rpc::protocol::{ Encoding, ProtocolId, RPCError, SupportedProtocol, ERROR_TYPE_MAX, ERROR_TYPE_MIN, }; -use crate::rpc::{InboundRequest, OutboundRequest}; +use crate::rpc::RequestType; use libp2p::bytes::BufMut; use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; @@ -18,9 +18,9 @@ use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, - RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, - SignedBeaconBlockElectra, + LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, }; use unsigned_varint::codec::Uvi; @@ -61,23 +61,24 @@ impl SSZSnappyInboundCodec { /// Encodes RPC Responses sent to peers. fn encode_response( &mut self, - item: RPCCodedResponse, + item: RpcResponse, dst: &mut BytesMut, ) -> Result<(), RPCError> { let bytes = match &item { - RPCCodedResponse::Success(resp) => match &resp { - RPCResponse::Status(res) => res.as_ssz_bytes(), - RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), - RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), - RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), - RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(), - RPCResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), - RPCResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), - RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), - RPCResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), - RPCResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), - RPCResponse::Pong(res) => res.data.as_ssz_bytes(), - RPCResponse::MetaData(res) => + RpcResponse::Success(resp) => match &resp { + RpcSuccessResponse::Status(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlocksByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlobsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlobsByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientUpdatesByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::Pong(res) => res.data.as_ssz_bytes(), + RpcSuccessResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. { match self.protocol.versioned_protocol { @@ -92,8 +93,8 @@ impl SSZSnappyInboundCodec { } } }, - RPCCodedResponse::Error(_, err) => err.as_ssz_bytes(), - RPCCodedResponse::StreamTermination(_) => { + RpcResponse::Error(_, err) => err.as_ssz_bytes(), + RpcResponse::StreamTermination(_) => { unreachable!("Code error - attempting to encode a stream termination") } }; @@ -126,10 +127,10 @@ impl SSZSnappyInboundCodec { } // Encoder for inbound streams: Encodes RPC Responses sent to peers. -impl Encoder> for SSZSnappyInboundCodec { +impl Encoder> for SSZSnappyInboundCodec { type Error = RPCError; - fn encode(&mut self, item: RPCCodedResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RpcResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { dst.clear(); dst.reserve(1); dst.put_u8( @@ -142,18 +143,18 @@ impl Encoder> for SSZSnappyInboundCodec { // Decoder for inbound streams: Decodes RPC requests from peers impl Decoder for SSZSnappyInboundCodec { - type Item = InboundRequest; + type Item = RequestType; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV1 { - return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v1()))); + return Ok(Some(RequestType::MetaData(MetadataRequest::new_v1()))); } if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 { - return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))); + return Ok(Some(RequestType::MetaData(MetadataRequest::new_v2()))); } if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV3 { - return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v3()))); + return Ok(Some(RequestType::MetaData(MetadataRequest::new_v3()))); } let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { return Ok(None); @@ -231,7 +232,10 @@ impl SSZSnappyOutboundCodec { } // Decode an Rpc response. - fn decode_response(&mut self, src: &mut BytesMut) -> Result>, RPCError> { + fn decode_response( + &mut self, + src: &mut BytesMut, + ) -> Result>, RPCError> { // Read the context bytes if required if self.protocol.has_context_bytes() && self.fork_name.is_none() { if src.len() >= CONTEXT_BYTES_LEN { @@ -318,28 +322,34 @@ impl SSZSnappyOutboundCodec { } // Encoder for outbound streams: Encodes RPC Requests to peers -impl Encoder> for SSZSnappyOutboundCodec { +impl Encoder> for SSZSnappyOutboundCodec { type Error = RPCError; - fn encode(&mut self, item: OutboundRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { - OutboundRequest::Status(req) => req.as_ssz_bytes(), - OutboundRequest::Goodbye(req) => req.as_ssz_bytes(), - OutboundRequest::BlocksByRange(r) => match r { + RequestType::Status(req) => req.as_ssz_bytes(), + RequestType::Goodbye(req) => req.as_ssz_bytes(), + RequestType::BlocksByRange(r) => match r { OldBlocksByRangeRequest::V1(req) => req.as_ssz_bytes(), OldBlocksByRangeRequest::V2(req) => req.as_ssz_bytes(), }, - OutboundRequest::BlocksByRoot(r) => match r { + RequestType::BlocksByRoot(r) => match r { BlocksByRootRequest::V1(req) => req.block_roots.as_ssz_bytes(), BlocksByRootRequest::V2(req) => req.block_roots.as_ssz_bytes(), }, - OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), - OutboundRequest::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), - OutboundRequest::DataColumnsByRange(req) => req.as_ssz_bytes(), - OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), - OutboundRequest::Ping(req) => req.as_ssz_bytes(), - OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode + RequestType::BlobsByRange(req) => req.as_ssz_bytes(), + RequestType::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), + RequestType::DataColumnsByRange(req) => req.as_ssz_bytes(), + RequestType::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), + RequestType::Ping(req) => req.as_ssz_bytes(), + RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), + RequestType::LightClientUpdatesByRange(req) => req.as_ssz_bytes(), + // no metadata to encode + RequestType::MetaData(_) + | RequestType::LightClientOptimisticUpdate + | RequestType::LightClientFinalityUpdate => return Ok(()), }; + // SSZ encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { return Err(RPCError::InternalError( @@ -369,7 +379,7 @@ impl Encoder> for SSZSnappyOutboundCodec { // We prefer to decode blocks and attestations with extra knowledge about the chain to perform // faster verification checks before decoding entire blocks/attestations. impl Decoder for SSZSnappyOutboundCodec { - type Item = RPCCodedResponse; + type Item = RpcResponse; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -385,15 +395,15 @@ impl Decoder for SSZSnappyOutboundCodec { }); let inner_result = { - if RPCCodedResponse::::is_response(response_code) { + if RpcResponse::::is_response(response_code) { // decode an actual response and mutates the buffer if enough bytes have been read // returning the result. self.decode_response(src) - .map(|r| r.map(RPCCodedResponse::Success)) + .map(|r| r.map(RpcResponse::Success)) } else { // decode an error self.decode_error(src) - .map(|r| r.map(|resp| RPCCodedResponse::from_error(response_code, resp))) + .map(|r| r.map(|resp| RpcResponse::from_error(response_code, resp))) } }; // if the inner decoder was capable of decoding a chunk, we need to reset the current @@ -437,14 +447,14 @@ fn handle_error( fn context_bytes( protocol: &ProtocolId, fork_context: &ForkContext, - resp: &RPCCodedResponse, + resp: &RpcResponse, ) -> Option<[u8; CONTEXT_BYTES_LEN]> { // Add the context bytes if required if protocol.has_context_bytes() { - if let RPCCodedResponse::Success(rpc_variant) = resp { + if let RpcResponse::Success(rpc_variant) = resp { match rpc_variant { - RPCResponse::BlocksByRange(ref_box_block) - | RPCResponse::BlocksByRoot(ref_box_block) => { + RpcSuccessResponse::BlocksByRange(ref_box_block) + | RpcSuccessResponse::BlocksByRoot(ref_box_block) => { return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! @@ -468,10 +478,11 @@ fn context_bytes( } }; } - RPCResponse::BlobsByRange(_) | RPCResponse::BlobsByRoot(_) => { + RpcSuccessResponse::BlobsByRange(_) | RpcSuccessResponse::BlobsByRoot(_) => { return fork_context.to_context_bytes(ForkName::Deneb); } - RPCResponse::DataColumnsByRoot(d) | RPCResponse::DataColumnsByRange(d) => { + RpcSuccessResponse::DataColumnsByRoot(d) + | RpcSuccessResponse::DataColumnsByRange(d) => { // TODO(das): Remove deneb fork after `peerdas-devnet-2`. return if matches!( fork_context.spec.fork_name_at_slot::(d.slot()), @@ -482,20 +493,26 @@ fn context_bytes( fork_context.to_context_bytes(ForkName::Electra) }; } - RPCResponse::LightClientBootstrap(lc_bootstrap) => { + RpcSuccessResponse::LightClientBootstrap(lc_bootstrap) => { return lc_bootstrap .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } - RPCResponse::LightClientOptimisticUpdate(lc_optimistic_update) => { + RpcSuccessResponse::LightClientOptimisticUpdate(lc_optimistic_update) => { return lc_optimistic_update .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } - RPCResponse::LightClientFinalityUpdate(lc_finality_update) => { + RpcSuccessResponse::LightClientFinalityUpdate(lc_finality_update) => { return lc_finality_update .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } + RpcSuccessResponse::LightClientUpdatesByRange(lc_update) => { + return lc_update + .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); + } // These will not pass the has_context_bytes() check - RPCResponse::Status(_) | RPCResponse::Pong(_) | RPCResponse::MetaData(_) => { + RpcSuccessResponse::Status(_) + | RpcSuccessResponse::Pong(_) + | RpcSuccessResponse::MetaData(_) => { return None; } } @@ -536,21 +553,21 @@ fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], spec: &ChainSpec, -) -> Result>, RPCError> { +) -> Result>, RPCError> { match versioned_protocol { - SupportedProtocol::StatusV1 => Ok(Some(InboundRequest::Status( + SupportedProtocol::StatusV1 => Ok(Some(RequestType::Status( StatusMessage::from_ssz_bytes(decoded_buffer)?, ))), - SupportedProtocol::GoodbyeV1 => Ok(Some(InboundRequest::Goodbye( + SupportedProtocol::GoodbyeV1 => Ok(Some(RequestType::Goodbye( GoodbyeReason::from_ssz_bytes(decoded_buffer)?, ))), - SupportedProtocol::BlocksByRangeV2 => Ok(Some(InboundRequest::BlocksByRange( + SupportedProtocol::BlocksByRangeV2 => Ok(Some(RequestType::BlocksByRange( OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2::from_ssz_bytes(decoded_buffer)?), ))), - SupportedProtocol::BlocksByRangeV1 => Ok(Some(InboundRequest::BlocksByRange( + SupportedProtocol::BlocksByRangeV1 => Ok(Some(RequestType::BlocksByRange( OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1::from_ssz_bytes(decoded_buffer)?), ))), - SupportedProtocol::BlocksByRootV2 => Ok(Some(InboundRequest::BlocksByRoot( + SupportedProtocol::BlocksByRootV2 => Ok(Some(RequestType::BlocksByRoot( BlocksByRootRequest::V2(BlocksByRootRequestV2 { block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, @@ -558,7 +575,7 @@ fn handle_rpc_request( )?, }), ))), - SupportedProtocol::BlocksByRootV1 => Ok(Some(InboundRequest::BlocksByRoot( + SupportedProtocol::BlocksByRootV1 => Ok(Some(RequestType::BlocksByRoot( BlocksByRootRequest::V1(BlocksByRootRequestV1 { block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, @@ -566,21 +583,21 @@ fn handle_rpc_request( )?, }), ))), - SupportedProtocol::BlobsByRangeV1 => Ok(Some(InboundRequest::BlobsByRange( + SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange( BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), SupportedProtocol::BlobsByRootV1 => { - Ok(Some(InboundRequest::BlobsByRoot(BlobsByRootRequest { + Ok(Some(RequestType::BlobsByRoot(BlobsByRootRequest { blob_ids: RuntimeVariableList::from_ssz_bytes( decoded_buffer, spec.max_request_blob_sidecars as usize, )?, }))) } - SupportedProtocol::DataColumnsByRangeV1 => Ok(Some(InboundRequest::DataColumnsByRange( + SupportedProtocol::DataColumnsByRangeV1 => Ok(Some(RequestType::DataColumnsByRange( DataColumnsByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), - SupportedProtocol::DataColumnsByRootV1 => Ok(Some(InboundRequest::DataColumnsByRoot( + SupportedProtocol::DataColumnsByRootV1 => Ok(Some(RequestType::DataColumnsByRoot( DataColumnsByRootRequest { data_column_ids: RuntimeVariableList::from_ssz_bytes( decoded_buffer, @@ -588,19 +605,24 @@ fn handle_rpc_request( )?, }, ))), - SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping { + SupportedProtocol::PingV1 => Ok(Some(RequestType::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - SupportedProtocol::LightClientBootstrapV1 => Ok(Some( - InboundRequest::LightClientBootstrap(LightClientBootstrapRequest { + SupportedProtocol::LightClientBootstrapV1 => Ok(Some(RequestType::LightClientBootstrap( + LightClientBootstrapRequest { root: Hash256::from_ssz_bytes(decoded_buffer)?, - }), - )), + }, + ))), SupportedProtocol::LightClientOptimisticUpdateV1 => { - Ok(Some(InboundRequest::LightClientOptimisticUpdate)) + Ok(Some(RequestType::LightClientOptimisticUpdate)) } SupportedProtocol::LightClientFinalityUpdateV1 => { - Ok(Some(InboundRequest::LightClientFinalityUpdate)) + Ok(Some(RequestType::LightClientFinalityUpdate)) + } + SupportedProtocol::LightClientUpdatesByRangeV1 => { + Ok(Some(RequestType::LightClientUpdatesByRange( + LightClientUpdatesByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))) } // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. @@ -610,7 +632,7 @@ fn handle_rpc_request( "Metadata requests shouldn't reach decoder", )) } else { - Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v3()))) + Ok(Some(RequestType::MetaData(MetadataRequest::new_v3()))) } } SupportedProtocol::MetaDataV2 => { @@ -619,14 +641,14 @@ fn handle_rpc_request( "Metadata requests shouldn't reach decoder", )) } else { - Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))) + Ok(Some(RequestType::MetaData(MetadataRequest::new_v2()))) } } SupportedProtocol::MetaDataV1 => { if !decoded_buffer.is_empty() { Err(RPCError::InvalidData("Metadata request".to_string())) } else { - Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v1()))) + Ok(Some(RequestType::MetaData(MetadataRequest::new_v1()))) } } } @@ -642,31 +664,33 @@ fn handle_rpc_response( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], fork_name: Option, -) -> Result>, RPCError> { +) -> Result>, RPCError> { match versioned_protocol { - SupportedProtocol::StatusV1 => Ok(Some(RPCResponse::Status( + SupportedProtocol::StatusV1 => Ok(Some(RpcSuccessResponse::Status( StatusMessage::from_ssz_bytes(decoded_buffer)?, ))), // This case should be unreachable as `Goodbye` has no response. SupportedProtocol::GoodbyeV1 => Err(RPCError::InvalidData( "Goodbye RPC message has no valid response".to_string(), )), - SupportedProtocol::BlocksByRangeV1 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), - )))), - SupportedProtocol::BlocksByRootV1 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SupportedProtocol::BlocksByRangeV1 => { + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))) + } + SupportedProtocol::BlocksByRootV1 => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), SupportedProtocol::BlobsByRangeV1 => match fork_name { - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRange(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlobsByRange(Arc::new( BlobSidecar::from_ssz_bytes(decoded_buffer)?, )))), Some(_) => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for blobs by range".to_string(), )), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -674,15 +698,15 @@ fn handle_rpc_response( )), }, SupportedProtocol::BlobsByRootV1 => match fork_name { - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRoot(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlobsByRoot(Arc::new( BlobSidecar::from_ssz_bytes(decoded_buffer)?, )))), Some(_) => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for blobs by root".to_string(), )), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -695,18 +719,18 @@ fn handle_rpc_response( // does not advertise the topic on deneb, simply allows it to decode it. Advertise // logic is in `SupportedTopic::currently_supported`. if fork_name.deneb_enabled() { - Ok(Some(RPCResponse::DataColumnsByRoot(Arc::new( + Ok(Some(RpcSuccessResponse::DataColumnsByRoot(Arc::new( DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, )))) } else { Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for data columns by root".to_string(), )) } } None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -716,36 +740,36 @@ fn handle_rpc_response( SupportedProtocol::DataColumnsByRangeV1 => match fork_name { Some(fork_name) => { if fork_name.deneb_enabled() { - Ok(Some(RPCResponse::DataColumnsByRange(Arc::new( + Ok(Some(RpcSuccessResponse::DataColumnsByRange(Arc::new( DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, )))) } else { Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for data columns by range".to_string(), )) } } None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol ), )), }, - SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping { + SupportedProtocol::PingV1 => Ok(Some(RpcSuccessResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - SupportedProtocol::MetaDataV1 => Ok(Some(RPCResponse::MetaData(MetaData::V1( + SupportedProtocol::MetaDataV1 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V1( MetaDataV1::from_ssz_bytes(decoded_buffer)?, )))), SupportedProtocol::LightClientBootstrapV1 => match fork_name { - Some(fork_name) => Ok(Some(RPCResponse::LightClientBootstrap(Arc::new( + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientBootstrap(Arc::new( LightClientBootstrap::from_ssz_bytes(decoded_buffer, fork_name)?, )))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -753,11 +777,14 @@ fn handle_rpc_response( )), }, SupportedProtocol::LightClientOptimisticUpdateV1 => match fork_name { - Some(fork_name) => Ok(Some(RPCResponse::LightClientOptimisticUpdate(Arc::new( - LightClientOptimisticUpdate::from_ssz_bytes(decoded_buffer, fork_name)?, - )))), + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientOptimisticUpdate( + Arc::new(LightClientOptimisticUpdate::from_ssz_bytes( + decoded_buffer, + fork_name, + )?), + ))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -765,11 +792,29 @@ fn handle_rpc_response( )), }, SupportedProtocol::LightClientFinalityUpdateV1 => match fork_name { - Some(fork_name) => Ok(Some(RPCResponse::LightClientFinalityUpdate(Arc::new( - LightClientFinalityUpdate::from_ssz_bytes(decoded_buffer, fork_name)?, - )))), + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientFinalityUpdate( + Arc::new(LightClientFinalityUpdate::from_ssz_bytes( + decoded_buffer, + fork_name, + )?), + ))), + None => Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, + SupportedProtocol::LightClientUpdatesByRangeV1 => match fork_name { + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientUpdatesByRange( + Arc::new(LightClientUpdate::from_ssz_bytes( + decoded_buffer, + &fork_name, + )?), + ))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -777,40 +822,40 @@ fn handle_rpc_response( )), }, // MetaData V2/V3 responses have no context bytes, so behave similarly to V1 responses - SupportedProtocol::MetaDataV3 => Ok(Some(RPCResponse::MetaData(MetaData::V3( + SupportedProtocol::MetaDataV3 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V3( MetaDataV3::from_ssz_bytes(decoded_buffer)?, )))), - SupportedProtocol::MetaDataV2 => Ok(Some(RPCResponse::MetaData(MetaData::V2( + SupportedProtocol::MetaDataV2 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V2( MetaDataV2::from_ssz_bytes(decoded_buffer)?, )))), SupportedProtocol::BlocksByRangeV2 => match fork_name { - Some(ForkName::Altair) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Altair) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Base) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Bellatrix) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Bellatrix) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Bellatrix(SignedBeaconBlockBellatrix::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Capella) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Electra) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Electra) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Electra(SignedBeaconBlockElectra::from_ssz_bytes( decoded_buffer, )?), )))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -818,32 +863,32 @@ fn handle_rpc_response( )), }, SupportedProtocol::BlocksByRootV2 => match fork_name { - Some(ForkName::Altair) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Altair) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Base) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Bellatrix) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Bellatrix) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Bellatrix(SignedBeaconBlockBellatrix::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Capella) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Electra) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Electra) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Electra(SignedBeaconBlockElectra::from_ssz_bytes( decoded_buffer, )?), )))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -864,7 +909,7 @@ fn context_bytes_to_fork_name( .ok_or_else(|| { let encoded = hex::encode(context_bytes); RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "Context bytes {} do not correspond to a valid fork", encoded @@ -1063,7 +1108,7 @@ mod tests { /// Encodes the given protocol response as bytes. fn encode_response( protocol: SupportedProtocol, - message: RPCCodedResponse, + message: RpcResponse, fork_name: ForkName, spec: &ChainSpec, ) -> Result { @@ -1113,7 +1158,7 @@ mod tests { message: &mut BytesMut, fork_name: ForkName, spec: &ChainSpec, - ) -> Result>, RPCError> { + ) -> Result>, RPCError> { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); @@ -1126,20 +1171,16 @@ mod tests { /// Encodes the provided protocol message as bytes and tries to decode the encoding bytes. fn encode_then_decode_response( protocol: SupportedProtocol, - message: RPCCodedResponse, + message: RpcResponse, fork_name: ForkName, spec: &ChainSpec, - ) -> Result>, RPCError> { + ) -> Result>, RPCError> { let mut encoded = encode_response(protocol, message, fork_name, spec)?; decode_response(protocol, &mut encoded, fork_name, spec) } /// Verifies that requests we send are encoded in a way that we would correctly decode too. - fn encode_then_decode_request( - req: OutboundRequest, - fork_name: ForkName, - spec: &ChainSpec, - ) { + fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); @@ -1162,35 +1203,48 @@ mod tests { ) }); match req { - OutboundRequest::Status(status) => { - assert_eq!(decoded, InboundRequest::Status(status)) + RequestType::Status(status) => { + assert_eq!(decoded, RequestType::Status(status)) + } + RequestType::Goodbye(goodbye) => { + assert_eq!(decoded, RequestType::Goodbye(goodbye)) } - OutboundRequest::Goodbye(goodbye) => { - assert_eq!(decoded, InboundRequest::Goodbye(goodbye)) + RequestType::BlocksByRange(bbrange) => { + assert_eq!(decoded, RequestType::BlocksByRange(bbrange)) } - OutboundRequest::BlocksByRange(bbrange) => { - assert_eq!(decoded, InboundRequest::BlocksByRange(bbrange)) + RequestType::BlocksByRoot(bbroot) => { + assert_eq!(decoded, RequestType::BlocksByRoot(bbroot)) } - OutboundRequest::BlocksByRoot(bbroot) => { - assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) + RequestType::BlobsByRange(blbrange) => { + assert_eq!(decoded, RequestType::BlobsByRange(blbrange)) } - OutboundRequest::BlobsByRange(blbrange) => { - assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange)) + RequestType::BlobsByRoot(bbroot) => { + assert_eq!(decoded, RequestType::BlobsByRoot(bbroot)) } - OutboundRequest::BlobsByRoot(bbroot) => { - assert_eq!(decoded, InboundRequest::BlobsByRoot(bbroot)) + RequestType::DataColumnsByRoot(dcbroot) => { + assert_eq!(decoded, RequestType::DataColumnsByRoot(dcbroot)) } - OutboundRequest::DataColumnsByRoot(dcbroot) => { - assert_eq!(decoded, InboundRequest::DataColumnsByRoot(dcbroot)) + RequestType::DataColumnsByRange(dcbrange) => { + assert_eq!(decoded, RequestType::DataColumnsByRange(dcbrange)) } - OutboundRequest::DataColumnsByRange(dcbrange) => { - assert_eq!(decoded, InboundRequest::DataColumnsByRange(dcbrange)) + RequestType::Ping(ping) => { + assert_eq!(decoded, RequestType::Ping(ping)) } - OutboundRequest::Ping(ping) => { - assert_eq!(decoded, InboundRequest::Ping(ping)) + RequestType::MetaData(metadata) => { + assert_eq!(decoded, RequestType::MetaData(metadata)) + } + RequestType::LightClientBootstrap(light_client_bootstrap_request) => { + assert_eq!( + decoded, + RequestType::LightClientBootstrap(light_client_bootstrap_request) + ) } - OutboundRequest::MetaData(metadata) => { - assert_eq!(decoded, InboundRequest::MetaData(metadata)) + RequestType::LightClientOptimisticUpdate | RequestType::LightClientFinalityUpdate => {} + RequestType::LightClientUpdatesByRange(light_client_updates_by_range) => { + assert_eq!( + decoded, + RequestType::LightClientUpdatesByRange(light_client_updates_by_range) + ) } } } @@ -1203,31 +1257,33 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::StatusV1, - RPCCodedResponse::Success(RPCResponse::Status(status_message())), + RpcResponse::Success(RpcSuccessResponse::Status(status_message())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::Status(status_message()))) + Ok(Some(RpcSuccessResponse::Status(status_message()))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::PingV1, - RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), + RpcResponse::Success(RpcSuccessResponse::Pong(ping_message())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::Pong(ping_message()))) + Ok(Some(RpcSuccessResponse::Pong(ping_message()))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1236,7 +1292,9 @@ mod tests { matches!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + altair_block() + ))), ForkName::Altair, &chain_spec, ) @@ -1249,20 +1307,24 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot( - Arc::new(empty_base_block()) - ))) + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + )))) ); assert!( matches!( encode_then_decode_response( SupportedProtocol::BlocksByRootV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot( + Arc::new(altair_block()) + )), ForkName::Altair, &chain_spec, ) @@ -1275,65 +1337,65 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV1, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata()))), + Ok(Some(RpcSuccessResponse::MetaData(metadata()))), ); // A MetaDataV2 still encodes as a MetaDataV1 since version is Version::V1 assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV1, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata_v2())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata()))), + Ok(Some(RpcSuccessResponse::MetaData(metadata()))), ); // A MetaDataV3 still encodes as a MetaDataV2 since version is Version::V2 assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v3())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata_v3())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata_v2()))), + Ok(Some(RpcSuccessResponse::MetaData(metadata_v2()))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRangeV1, - RPCCodedResponse::Success(RPCResponse::BlobsByRange(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::BlobsByRange(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, - RPCCodedResponse::Success(RPCResponse::BlobsByRoot(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::BlobsByRoot(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, - RPCCodedResponse::Success(RPCResponse::DataColumnsByRange( + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( empty_data_column_sidecar() )), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::DataColumnsByRange( + Ok(Some(RpcSuccessResponse::DataColumnsByRange( empty_data_column_sidecar() ))), ); @@ -1341,13 +1403,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, - RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot( + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( empty_data_column_sidecar() )), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::DataColumnsByRoot( + Ok(Some(RpcSuccessResponse::DataColumnsByRoot( empty_data_column_sidecar() ))), ); @@ -1361,11 +1423,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1376,11 +1440,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block() + ))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1388,11 +1454,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( + altair_block() + )))) ); let bellatrix_block_small = @@ -1403,13 +1471,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new( + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( bellatrix_block_small.clone() ))), ForkName::Bellatrix, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( bellatrix_block_small.clone() )))) ); @@ -1435,13 +1503,15 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot( - Arc::new(empty_base_block()) - ))), + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + )))), ); // Decode the smallest possible base block when current fork is altair @@ -1450,35 +1520,39 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + ))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot( - Arc::new(empty_base_block()) - ))) + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + )))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block())))) + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + altair_block() + )))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new( + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( bellatrix_block_small.clone() ))), ForkName::Bellatrix, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( bellatrix_block_small )))) ); @@ -1505,21 +1579,21 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata_v2()))) + Ok(Some(RpcSuccessResponse::MetaData(metadata_v2()))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata_v2())), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata_v2()))) + Ok(Some(RpcSuccessResponse::MetaData(metadata_v2()))) ); } @@ -1533,7 +1607,9 @@ mod tests { // Removing context bytes for v2 messages should error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block(), + ))), ForkName::Base, &chain_spec, ) @@ -1549,12 +1625,14 @@ mod tests { &chain_spec, ) .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + RPCError::ErrorResponse(RpcErrorResponse::InvalidRequest, _), )); let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block(), + ))), ForkName::Base, &chain_spec, ) @@ -1570,13 +1648,15 @@ mod tests { &chain_spec, ) .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + RPCError::ErrorResponse(RpcErrorResponse::InvalidRequest, _), )); // Trying to decode a base block with altair context bytes should give ssz decoding error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block(), + ))), ForkName::Altair, &chain_spec, ) @@ -1601,7 +1681,7 @@ mod tests { // Trying to decode an altair block with base context bytes should give ssz decoding error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, &chain_spec, ) @@ -1628,7 +1708,7 @@ mod tests { encoded_bytes.extend_from_slice( &encode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata())), ForkName::Altair, &chain_spec, ) @@ -1646,7 +1726,9 @@ mod tests { // Sending context bytes which do not correspond to any fork should return an error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block(), + ))), ForkName::Altair, &chain_spec, ) @@ -1664,13 +1746,15 @@ mod tests { &chain_spec, ) .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + RPCError::ErrorResponse(RpcErrorResponse::InvalidRequest, _), )); // Sending bytes less than context bytes length should wait for more bytes by returning `Ok(None)` let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block(), + ))), ForkName::Altair, &chain_spec, ) @@ -1693,20 +1777,20 @@ mod tests { fn test_encode_then_decode_request() { let chain_spec = Spec::default_spec(); - let requests: &[OutboundRequest] = &[ - OutboundRequest::Ping(ping_message()), - OutboundRequest::Status(status_message()), - OutboundRequest::Goodbye(GoodbyeReason::Fault), - OutboundRequest::BlocksByRange(bbrange_request_v1()), - OutboundRequest::BlocksByRange(bbrange_request_v2()), - OutboundRequest::BlocksByRoot(bbroot_request_v1(&chain_spec)), - OutboundRequest::BlocksByRoot(bbroot_request_v2(&chain_spec)), - OutboundRequest::MetaData(MetadataRequest::new_v1()), - OutboundRequest::BlobsByRange(blbrange_request()), - OutboundRequest::BlobsByRoot(blbroot_request(&chain_spec)), - OutboundRequest::DataColumnsByRange(dcbrange_request()), - OutboundRequest::DataColumnsByRoot(dcbroot_request(&chain_spec)), - OutboundRequest::MetaData(MetadataRequest::new_v2()), + let requests: &[RequestType] = &[ + RequestType::Ping(ping_message()), + RequestType::Status(status_message()), + RequestType::Goodbye(GoodbyeReason::Fault), + RequestType::BlocksByRange(bbrange_request_v1()), + RequestType::BlocksByRange(bbrange_request_v2()), + RequestType::BlocksByRoot(bbroot_request_v1(&chain_spec)), + RequestType::BlocksByRoot(bbroot_request_v2(&chain_spec)), + RequestType::MetaData(MetadataRequest::new_v1()), + RequestType::BlobsByRange(blbrange_request()), + RequestType::BlobsByRoot(blbroot_request(&chain_spec)), + RequestType::DataColumnsByRange(dcbrange_request()), + RequestType::DataColumnsByRoot(dcbroot_request(&chain_spec)), + RequestType::MetaData(MetadataRequest::new_v2()), ]; for req in requests.iter() { diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index fcb9c986048..42ece6dc4ff 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -96,6 +96,7 @@ pub struct RateLimiterConfig { pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, + pub(super) light_client_updates_by_range_quota: Quota, } impl RateLimiterConfig { @@ -121,6 +122,7 @@ impl RateLimiterConfig { pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); + pub const DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA: Quota = Quota::one_every(10); } impl Default for RateLimiterConfig { @@ -140,6 +142,7 @@ impl Default for RateLimiterConfig { light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, light_client_finality_update_quota: Self::DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA, + light_client_updates_by_range_quota: Self::DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA, } } } @@ -198,6 +201,7 @@ impl FromStr for RateLimiterConfig { let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; + let mut light_client_updates_by_range_quota = None; for proto_def in s.split(';') { let ProtocolQuota { protocol, quota } = proto_def.parse()?; @@ -228,6 +232,10 @@ impl FromStr for RateLimiterConfig { light_client_finality_update_quota = light_client_finality_update_quota.or(quota) } + Protocol::LightClientUpdatesByRange => { + light_client_updates_by_range_quota = + light_client_updates_by_range_quota.or(quota) + } } } Ok(RateLimiterConfig { @@ -252,6 +260,8 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA), light_client_finality_update_quota: light_client_finality_update_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA), + light_client_updates_by_range_quota: light_client_updates_by_range_quota + .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA), }) } } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 08e55e50c9c..e76d6d27866 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -1,11 +1,12 @@ #![allow(clippy::type_complexity)] #![allow(clippy::cognitive_complexity)] -use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode}; +use super::methods::{GoodbyeReason, RpcErrorResponse, RpcResponse}; use super::outbound::OutboundRequestContainer; -use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol}; -use super::{RPCReceived, RPCResponse, RPCSend, ReqId}; -use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; +use super::protocol::{InboundOutput, Protocol, RPCError, RPCProtocol, RequestType}; +use super::RequestId; +use super::{RPCReceived, RPCSend, ReqId, Request}; +use crate::rpc::outbound::OutboundFramed; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; use futures::prelude::*; @@ -14,8 +15,7 @@ use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; -use libp2p::swarm::{ConnectionId, Stream}; -use libp2p::PeerId; +use libp2p::swarm::Stream; use slog::{crit, debug, trace}; use smallvec::SmallVec; use std::{ @@ -89,12 +89,6 @@ pub struct RPCHandler where E: EthSpec, { - /// This `ConnectionId`. - id: ConnectionId, - - /// The matching `PeerId` of this connection. - peer_id: PeerId, - /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, ()>, @@ -102,7 +96,7 @@ where events_out: SmallVec<[HandlerEvent; 4]>, /// Queue of outbound substreams to open. - dial_queue: SmallVec<[(Id, OutboundRequest); 4]>, + dial_queue: SmallVec<[(Id, RequestType); 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, @@ -166,7 +160,7 @@ struct InboundInfo { /// State of the substream. state: InboundState, /// Responses queued for sending. - pending_items: VecDeque>, + pending_items: VecDeque>, /// Protocol of the original request we received from the peer. protocol: Protocol, /// Responses that the peer is still expecting from us. @@ -212,7 +206,7 @@ pub enum OutboundSubstreamState { /// The framed negotiated substream. substream: Box>, /// Keeps track of the actual request sent. - request: OutboundRequest, + request: RequestType, }, /// Closing an outbound substream> Closing(Box>), @@ -225,16 +219,12 @@ where E: EthSpec, { pub fn new( - id: ConnectionId, - peer_id: PeerId, listen_protocol: SubstreamProtocol, ()>, fork_context: Arc, log: &slog::Logger, resp_timeout: Duration, ) -> Self { RPCHandler { - id, - peer_id, listen_protocol, events_out: SmallVec::new(), dial_queue: SmallVec::new(), @@ -274,7 +264,7 @@ where // Queue our goodbye message. if let Some((id, reason)) = goodbye_reason { - self.dial_queue.push((id, OutboundRequest::Goodbye(reason))); + self.dial_queue.push((id, RequestType::Goodbye(reason))); } self.state = HandlerState::ShuttingDown(Box::pin(sleep(Duration::from_secs( @@ -284,7 +274,7 @@ where } /// Opens an outbound substream with a request. - fn send_request(&mut self, id: Id, req: OutboundRequest) { + fn send_request(&mut self, id: Id, req: RequestType) { match self.state { HandlerState::Active => { self.dial_queue.push((id, req)); @@ -302,10 +292,10 @@ where /// Sends a response to a peer's request. // NOTE: If the substream has closed due to inactivity, or the substream is in the // wrong state a response will fail silently. - fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse) { + fn send_response(&mut self, inbound_id: SubstreamId, response: RpcResponse) { // check if the stream matching the response still exists let Some(inbound_info) = self.inbound_substreams.get_mut(&inbound_id) else { - if !matches!(response, RPCCodedResponse::StreamTermination(..)) { + if !matches!(response, RpcResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses trace!(self.log, "Inbound stream has expired. Response not sent"; "response" => %response, "id" => inbound_id); @@ -313,7 +303,7 @@ where return; }; // If the response we are sending is an error, report back for handling - if let RPCCodedResponse::Error(ref code, ref reason) = response { + if let RpcResponse::Error(ref code, ref reason) = response { self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error: RPCError::ErrorResponse(*code, reason.to_string()), proto: inbound_info.protocol, @@ -340,7 +330,7 @@ where type ToBehaviour = HandlerEvent; type InboundProtocol = RPCProtocol; type OutboundProtocol = OutboundRequestContainer; - type OutboundOpenInfo = (Id, OutboundRequest); // Keep track of the id and the request + type OutboundOpenInfo = (Id, RequestType); // Keep track of the id and the request type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { @@ -414,8 +404,8 @@ where if info.pending_items.back().map(|l| l.close_after()) == Some(false) { // if the last chunk does not close the stream, append an error - info.pending_items.push_back(RPCCodedResponse::Error( - RPCResponseErrorCode::ServerError, + info.pending_items.push_back(RpcResponse::Error( + RpcErrorResponse::ServerError, "Request timed out".into(), )); } @@ -683,13 +673,13 @@ where let proto = entry.get().proto; let received = match response { - RPCCodedResponse::StreamTermination(t) => { + RpcResponse::StreamTermination(t) => { HandlerEvent::Ok(RPCReceived::EndOfStream(id, t)) } - RPCCodedResponse::Success(resp) => { + RpcResponse::Success(resp) => { HandlerEvent::Ok(RPCReceived::Response(id, resp)) } - RPCCodedResponse::Error(ref code, ref r) => { + RpcResponse::Error(ref code, ref r) => { HandlerEvent::Err(HandlerErr::Outbound { id, proto, @@ -899,30 +889,23 @@ where } // If we received a goodbye, shutdown the connection. - if let InboundRequest::Goodbye(_) = req { + if let RequestType::Goodbye(_) = req { self.shutdown(None); } - // If we received a Ping, we queue a Pong response. - if let InboundRequest::Ping(ping) = req { - trace!(self.log, "Received Ping, queueing Pong";"connection_id" => %self.id, "peer_id" => %self.peer_id); - self.send_response( - self.current_inbound_substream_id, - RPCCodedResponse::Success(RPCResponse::Pong(ping)), - ); - } - - self.events_out.push(HandlerEvent::Ok(RPCReceived::Request( - self.current_inbound_substream_id, - req, - ))); + self.events_out + .push(HandlerEvent::Ok(RPCReceived::Request(Request { + id: RequestId::next(), + substream_id: self.current_inbound_substream_id, + r#type: req, + }))); self.current_inbound_substream_id.0 += 1; } fn on_fully_negotiated_outbound( &mut self, substream: OutboundFramed, - (id, request): (Id, OutboundRequest), + (id, request): (Id, RequestType), ) { self.dial_negotiated -= 1; // Reset any io-retries counter. @@ -978,7 +961,7 @@ where } fn on_dial_upgrade_error( &mut self, - request_info: (Id, OutboundRequest), + request_info: (Id, RequestType), error: StreamUpgradeError, ) { let (id, req) = request_info; @@ -1036,15 +1019,15 @@ impl slog::Value for SubstreamId { /// error that occurred with sending a message is reported also. async fn send_message_to_inbound_substream( mut substream: InboundSubstream, - message: RPCCodedResponse, + message: RpcResponse, last_chunk: bool, ) -> Result<(InboundSubstream, bool), RPCError> { - if matches!(message, RPCCodedResponse::StreamTermination(_)) { + if matches!(message, RpcResponse::StreamTermination(_)) { substream.close().await.map(|_| (substream, true)) } else { // chunks that are not stream terminations get sent, and the stream is closed if // the response is an error - let is_error = matches!(message, RPCCodedResponse::Error(..)); + let is_error = matches!(message, RpcResponse::Error(..)); let send_result = substream.send(message).await; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 6e1ba9cd302..bb8bfb0e206 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -14,10 +14,11 @@ use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; +use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, + LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; /// Maximum length of error message. @@ -477,11 +478,39 @@ impl DataColumnsByRootRequest { } } +/// Request a number of beacon data columns from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct LightClientUpdatesByRangeRequest { + /// The starting period to request light client updates. + pub start_period: u64, + /// The number of periods from `start_period`. + pub count: u64, +} + +impl LightClientUpdatesByRangeRequest { + pub fn max_requested(&self) -> u64 { + MAX_REQUEST_LIGHT_CLIENT_UPDATES + } + + pub fn ssz_min_len() -> usize { + LightClientUpdatesByRangeRequest { + start_period: 0, + count: 0, + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len() -> usize { + Self::ssz_min_len() + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages #[derive(Debug, Clone, PartialEq)] -pub enum RPCResponse { +pub enum RpcSuccessResponse { /// A HELLO message. Status(StatusMessage), @@ -504,6 +533,9 @@ pub enum RPCResponse { /// A response to a get LIGHT_CLIENT_FINALITY_UPDATE request. LightClientFinalityUpdate(Arc>), + /// A response to a get LIGHT_CLIENT_UPDATES_BY_RANGE request. + LightClientUpdatesByRange(Arc>), + /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Arc>), @@ -540,16 +572,19 @@ pub enum ResponseTermination { /// Data column sidecars by range stream termination. DataColumnsByRange, + + /// Light client updates by range stream termination. + LightClientUpdatesByRange, } /// The structured response containing a result/code indicating success or failure /// and the contents of the response #[derive(Debug, Clone)] -pub enum RPCCodedResponse { +pub enum RpcResponse { /// The response is a successful. - Success(RPCResponse), + Success(RpcSuccessResponse), - Error(RPCResponseErrorCode, ErrorType), + Error(RpcErrorResponse, ErrorType), /// Received a stream termination indicating which response is being terminated. StreamTermination(ResponseTermination), @@ -564,7 +599,7 @@ pub struct LightClientBootstrapRequest { /// The code assigned to an erroneous `RPCResponse`. #[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] -pub enum RPCResponseErrorCode { +pub enum RpcErrorResponse { RateLimited, BlobsNotFoundForBlock, InvalidRequest, @@ -574,13 +609,13 @@ pub enum RPCResponseErrorCode { Unknown, } -impl RPCCodedResponse { +impl RpcResponse { /// Used to encode the response in the codec. pub fn as_u8(&self) -> Option { match self { - RPCCodedResponse::Success(_) => Some(0), - RPCCodedResponse::Error(code, _) => Some(code.as_u8()), - RPCCodedResponse::StreamTermination(_) => None, + RpcResponse::Success(_) => Some(0), + RpcResponse::Error(code, _) => Some(code.as_u8()), + RpcResponse::StreamTermination(_) => None, } } @@ -592,64 +627,67 @@ impl RPCCodedResponse { /// Builds an RPCCodedResponse from a response code and an ErrorMessage pub fn from_error(response_code: u8, err: ErrorType) -> Self { let code = match response_code { - 1 => RPCResponseErrorCode::InvalidRequest, - 2 => RPCResponseErrorCode::ServerError, - 3 => RPCResponseErrorCode::ResourceUnavailable, - 139 => RPCResponseErrorCode::RateLimited, - 140 => RPCResponseErrorCode::BlobsNotFoundForBlock, - _ => RPCResponseErrorCode::Unknown, + 1 => RpcErrorResponse::InvalidRequest, + 2 => RpcErrorResponse::ServerError, + 3 => RpcErrorResponse::ResourceUnavailable, + 139 => RpcErrorResponse::RateLimited, + 140 => RpcErrorResponse::BlobsNotFoundForBlock, + _ => RpcErrorResponse::Unknown, }; - RPCCodedResponse::Error(code, err) + RpcResponse::Error(code, err) } /// Returns true if this response always terminates the stream. pub fn close_after(&self) -> bool { - !matches!(self, RPCCodedResponse::Success(_)) + !matches!(self, RpcResponse::Success(_)) } } -impl RPCResponseErrorCode { +impl RpcErrorResponse { fn as_u8(&self) -> u8 { match self { - RPCResponseErrorCode::InvalidRequest => 1, - RPCResponseErrorCode::ServerError => 2, - RPCResponseErrorCode::ResourceUnavailable => 3, - RPCResponseErrorCode::Unknown => 255, - RPCResponseErrorCode::RateLimited => 139, - RPCResponseErrorCode::BlobsNotFoundForBlock => 140, + RpcErrorResponse::InvalidRequest => 1, + RpcErrorResponse::ServerError => 2, + RpcErrorResponse::ResourceUnavailable => 3, + RpcErrorResponse::Unknown => 255, + RpcErrorResponse::RateLimited => 139, + RpcErrorResponse::BlobsNotFoundForBlock => 140, } } } use super::Protocol; -impl RPCResponse { +impl RpcSuccessResponse { pub fn protocol(&self) -> Protocol { match self { - RPCResponse::Status(_) => Protocol::Status, - RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, - RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, - RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, - RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, - RPCResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, - RPCResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, - RPCResponse::Pong(_) => Protocol::Ping, - RPCResponse::MetaData(_) => Protocol::MetaData, - RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, - RPCResponse::LightClientOptimisticUpdate(_) => Protocol::LightClientOptimisticUpdate, - RPCResponse::LightClientFinalityUpdate(_) => Protocol::LightClientFinalityUpdate, + RpcSuccessResponse::Status(_) => Protocol::Status, + RpcSuccessResponse::BlocksByRange(_) => Protocol::BlocksByRange, + RpcSuccessResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, + RpcSuccessResponse::BlobsByRange(_) => Protocol::BlobsByRange, + RpcSuccessResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, + RpcSuccessResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, + RpcSuccessResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, + RpcSuccessResponse::Pong(_) => Protocol::Ping, + RpcSuccessResponse::MetaData(_) => Protocol::MetaData, + RpcSuccessResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, + RpcSuccessResponse::LightClientOptimisticUpdate(_) => { + Protocol::LightClientOptimisticUpdate + } + RpcSuccessResponse::LightClientFinalityUpdate(_) => Protocol::LightClientFinalityUpdate, + RpcSuccessResponse::LightClientUpdatesByRange(_) => Protocol::LightClientUpdatesByRange, } } } -impl std::fmt::Display for RPCResponseErrorCode { +impl std::fmt::Display for RpcErrorResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { - RPCResponseErrorCode::InvalidRequest => "The request was invalid", - RPCResponseErrorCode::ResourceUnavailable => "Resource unavailable", - RPCResponseErrorCode::ServerError => "Server error occurred", - RPCResponseErrorCode::Unknown => "Unknown error occurred", - RPCResponseErrorCode::RateLimited => "Rate limited", - RPCResponseErrorCode::BlobsNotFoundForBlock => "No blobs for the given root", + RpcErrorResponse::InvalidRequest => "The request was invalid", + RpcErrorResponse::ResourceUnavailable => "Resource unavailable", + RpcErrorResponse::ServerError => "Server error occurred", + RpcErrorResponse::Unknown => "Unknown error occurred", + RpcErrorResponse::RateLimited => "Rate limited", + RpcErrorResponse::BlobsNotFoundForBlock => "No blobs for the given root", }; f.write_str(repr) } @@ -661,61 +699,70 @@ impl std::fmt::Display for StatusMessage { } } -impl std::fmt::Display for RPCResponse { +impl std::fmt::Display for RpcSuccessResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - RPCResponse::Status(status) => write!(f, "{}", status), - RPCResponse::BlocksByRange(block) => { + RpcSuccessResponse::Status(status) => write!(f, "{}", status), + RpcSuccessResponse::BlocksByRange(block) => { write!(f, "BlocksByRange: Block slot: {}", block.slot()) } - RPCResponse::BlocksByRoot(block) => { + RpcSuccessResponse::BlocksByRoot(block) => { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } - RPCResponse::BlobsByRange(blob) => { + RpcSuccessResponse::BlobsByRange(blob) => { write!(f, "BlobsByRange: Blob slot: {}", blob.slot()) } - RPCResponse::BlobsByRoot(sidecar) => { + RpcSuccessResponse::BlobsByRoot(sidecar) => { write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot()) } - RPCResponse::DataColumnsByRoot(sidecar) => { + RpcSuccessResponse::DataColumnsByRoot(sidecar) => { write!(f, "DataColumnsByRoot: Data column slot: {}", sidecar.slot()) } - RPCResponse::DataColumnsByRange(sidecar) => { + RpcSuccessResponse::DataColumnsByRange(sidecar) => { write!( f, "DataColumnsByRange: Data column slot: {}", sidecar.slot() ) } - RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), - RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), - RPCResponse::LightClientBootstrap(bootstrap) => { + RpcSuccessResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), + RpcSuccessResponse::MetaData(metadata) => { + write!(f, "Metadata: {}", metadata.seq_number()) + } + RpcSuccessResponse::LightClientBootstrap(bootstrap) => { write!(f, "LightClientBootstrap Slot: {}", bootstrap.get_slot()) } - RPCResponse::LightClientOptimisticUpdate(update) => { + RpcSuccessResponse::LightClientOptimisticUpdate(update) => { write!( f, "LightClientOptimisticUpdate Slot: {}", update.signature_slot() ) } - RPCResponse::LightClientFinalityUpdate(update) => { + RpcSuccessResponse::LightClientFinalityUpdate(update) => { write!( f, "LightClientFinalityUpdate Slot: {}", update.signature_slot() ) } + RpcSuccessResponse::LightClientUpdatesByRange(update) => { + write!( + f, + "LightClientUpdatesByRange Slot: {}", + update.signature_slot(), + ) + } } } } -impl std::fmt::Display for RPCCodedResponse { +impl std::fmt::Display for RpcResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - RPCCodedResponse::Success(res) => write!(f, "{}", res), - RPCCodedResponse::Error(code, err) => write!(f, "{}: {}", code, err), - RPCCodedResponse::StreamTermination(_) => write!(f, "Stream Termination"), + RpcResponse::Success(res) => write!(f, "{}", res), + RpcResponse::Error(code, err) => write!(f, "{}: {}", code, err), + RpcResponse::StreamTermination(_) => write!(f, "Stream Termination"), } } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index eae206e022d..7d091da7660 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -14,8 +14,9 @@ use libp2p::swarm::{ use libp2p::swarm::{ConnectionClosed, FromSwarm, SubstreamProtocol, THandlerInEvent}; use libp2p::PeerId; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; -use slog::{crit, debug, o}; +use slog::{crit, debug, o, trace}; use std::marker::PhantomData; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; @@ -23,16 +24,15 @@ use types::{EthSpec, ForkContext}; pub(crate) use handler::{HandlerErr, HandlerEvent}; pub(crate) use methods::{ - MetaData, MetaDataV1, MetaDataV2, MetaDataV3, Ping, RPCCodedResponse, RPCResponse, + MetaData, MetaDataV1, MetaDataV2, MetaDataV3, Ping, RpcResponse, RpcSuccessResponse, }; -pub(crate) use protocol::InboundRequest; +pub use protocol::RequestType; pub use handler::SubstreamId; pub use methods::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, - RPCResponseErrorCode, ResponseTermination, StatusMessage, + ResponseTermination, RpcErrorResponse, StatusMessage, }; -pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; @@ -48,6 +48,8 @@ mod protocol; mod rate_limiter; mod self_limiter; +static NEXT_REQUEST_ID: AtomicUsize = AtomicUsize::new(1); + /// Composite trait for a request id. pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {} impl ReqId for T where T: Send + 'static + std::fmt::Debug + Copy + Clone {} @@ -59,13 +61,13 @@ pub enum RPCSend { /// /// The `Id` is given by the application making the request. These /// go over *outbound* connections. - Request(Id, OutboundRequest), + Request(Id, RequestType), /// A response sent from Lighthouse. /// /// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the /// peer. The second parameter is a single chunk of a response. These go over *inbound* /// connections. - Response(SubstreamId, RPCCodedResponse), + Response(SubstreamId, RpcResponse), /// Lighthouse has requested to terminate the connection with a goodbye message. Shutdown(Id, GoodbyeReason), } @@ -77,17 +79,46 @@ pub enum RPCReceived { /// /// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the /// *inbound* substream over which it is managed. - Request(SubstreamId, InboundRequest), + Request(Request), /// A response received from the outside. /// /// The `Id` corresponds to the application given ID of the original request sent to the /// peer. The second parameter is a single chunk of a response. These go over *outbound* /// connections. - Response(Id, RPCResponse), + Response(Id, RpcSuccessResponse), /// Marks a request as completed EndOfStream(Id, ResponseTermination), } +/// Rpc `Request` identifier. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct RequestId(usize); + +impl RequestId { + /// Returns the next available [`RequestId`]. + pub fn next() -> Self { + Self(NEXT_REQUEST_ID.fetch_add(1, Ordering::SeqCst)) + } + + /// Creates an _unchecked_ [`RequestId`]. + /// + /// [`Rpc`] enforces that [`RequestId`]s are unique and not reused. + /// This constructor does not, hence the _unchecked_. + /// + /// It is primarily meant for allowing manual tests. + pub fn new_unchecked(id: usize) -> Self { + Self(id) + } +} + +/// An Rpc Request. +#[derive(Debug, Clone)] +pub struct Request { + pub id: RequestId, + pub substream_id: SubstreamId, + pub r#type: RequestType, +} + impl std::fmt::Display for RPCSend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -132,6 +163,8 @@ pub struct RPC { log: slog::Logger, /// Networking constant values network_params: NetworkParams, + /// A sequential counter indicating when data gets modified. + seq_number: u64, } impl RPC { @@ -142,6 +175,7 @@ impl RPC { outbound_rate_limiter_config: Option, log: slog::Logger, network_params: NetworkParams, + seq_number: u64, ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); @@ -163,6 +197,7 @@ impl RPC { enable_light_client_server, log, network_params, + seq_number, } } @@ -173,7 +208,8 @@ impl RPC { &mut self, peer_id: PeerId, id: (ConnectionId, SubstreamId), - event: RPCCodedResponse, + _request_id: RequestId, + event: RpcResponse, ) { self.events.push(ToSwarm::NotifyHandler { peer_id, @@ -185,7 +221,7 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: OutboundRequest) { + pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType) { let event = if let Some(self_limiter) = self.self_limiter.as_mut() { match self_limiter.allows(peer_id, request_id, req) { Ok(event) => event, @@ -214,6 +250,19 @@ impl RPC { event: RPCSend::Shutdown(id, reason), }); } + + pub fn update_seq_number(&mut self, seq_number: u64) { + self.seq_number = seq_number + } + + /// Send a Ping request to the destination `PeerId` via `ConnectionId`. + pub fn ping(&mut self, peer_id: PeerId, id: Id) { + let ping = Ping { + data: self.seq_number, + }; + trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); + self.send_request(peer_id, id, RequestType::Ping(ping)); + } } impl NetworkBehaviour for RPC @@ -245,8 +294,6 @@ where .log .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); let handler = RPCHandler::new( - connection_id, - peer_id, protocol, self.fork_context.clone(), &log, @@ -280,8 +327,6 @@ where .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); let handler = RPCHandler::new( - connection_id, - peer_id, protocol, self.fork_context.clone(), &log, @@ -355,21 +400,17 @@ where event: ::ToBehaviour, ) { match event { - HandlerEvent::Ok(RPCReceived::Request(id, req)) => { + HandlerEvent::Ok(RPCReceived::Request(Request { + id, + substream_id, + r#type, + })) => { if let Some(limiter) = self.limiter.as_mut() { // check if the request is conformant to the quota - match limiter.allows(&peer_id, &req) { - Ok(()) => { - // send the event to the user - self.events.push(ToSwarm::GenerateEvent(RPCMessage { - peer_id, - conn_id, - message: Ok(RPCReceived::Request(id, req)), - })) - } + match limiter.allows(&peer_id, &r#type) { Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols - let protocol = req.versioned_protocol().protocol(); + let protocol = r#type.versioned_protocol().protocol(); if matches!( protocol, Protocol::BlocksByRange @@ -379,7 +420,7 @@ where | Protocol::BlobsByRoot | Protocol::DataColumnsByRoot ) { - debug!(self.log, "Request too large to process"; "request" => %req, "protocol" => %protocol); + debug!(self.log, "Request too large to process"; "request" => %r#type, "protocol" => %protocol); } else { // Other protocols shouldn't be sending large messages, we should flag the peer kind crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); @@ -388,36 +429,58 @@ where // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, + (conn_id, substream_id), + id, + RpcResponse::Error( + RpcErrorResponse::RateLimited, "Rate limited. Request too large".into(), ), ); + return; } Err(RateLimitedErr::TooSoon(wait_time)) => { debug!(self.log, "Request exceeds the rate limit"; - "request" => %req, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis()); + "request" => %r#type, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis()); // send an error code to the peer. // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, + (conn_id, substream_id), + id, + RpcResponse::Error( + RpcErrorResponse::RateLimited, format!("Wait {:?}", wait_time).into(), ), ); + return; } + // No rate limiting, continue. + Ok(()) => {} } - } else { - // No rate limiting, send the event to the user - self.events.push(ToSwarm::GenerateEvent(RPCMessage { + } + + // If we received a Ping, we queue a Pong response. + if let RequestType::Ping(_) = r#type { + trace!(self.log, "Received Ping, queueing Pong";"connection_id" => %conn_id, "peer_id" => %peer_id); + self.send_response( peer_id, - conn_id, - message: Ok(RPCReceived::Request(id, req)), - })) + (conn_id, substream_id), + id, + RpcResponse::Success(RpcSuccessResponse::Pong(Ping { + data: self.seq_number, + })), + ); } + + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + message: Ok(RPCReceived::Request(Request { + id, + substream_id, + r#type, + })), + })); } HandlerEvent::Ok(rpc) => { self.events.push(ToSwarm::GenerateEvent(RPCMessage { @@ -477,8 +540,8 @@ where match &self.message { Ok(received) => { let (msg_kind, protocol) = match received { - RPCReceived::Request(_, req) => { - ("request", req.versioned_protocol().protocol()) + RPCReceived::Request(Request { r#type, .. }) => { + ("request", r#type.versioned_protocol().protocol()) } RPCReceived::Response(_, res) => ("response", res.protocol()), RPCReceived::EndOfStream(_, end) => ( @@ -490,6 +553,9 @@ where ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, + ResponseTermination::LightClientUpdatesByRange => { + Protocol::LightClientUpdatesByRange + } }, ), }; diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 2bfa42ccac9..b614313a84b 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -1,7 +1,6 @@ -use super::methods::*; use super::protocol::ProtocolId; -use super::protocol::SupportedProtocol; use super::RPCError; +use super::RequestType; use crate::rpc::codec::SSZSnappyOutboundCodec; use crate::rpc::protocol::Encoding; use futures::future::BoxFuture; @@ -21,25 +20,11 @@ use types::{EthSpec, ForkContext}; #[derive(Debug, Clone)] pub struct OutboundRequestContainer { - pub req: OutboundRequest, + pub req: RequestType, pub fork_context: Arc, pub max_rpc_size: usize, } -#[derive(Debug, Clone, PartialEq)] -pub enum OutboundRequest { - Status(StatusMessage), - Goodbye(GoodbyeReason), - BlocksByRange(OldBlocksByRangeRequest), - BlocksByRoot(BlocksByRootRequest), - BlobsByRange(BlobsByRangeRequest), - BlobsByRoot(BlobsByRootRequest), - DataColumnsByRoot(DataColumnsByRootRequest), - DataColumnsByRange(DataColumnsByRangeRequest), - Ping(Ping), - MetaData(MetadataRequest), -} - impl UpgradeInfo for OutboundRequestContainer { type Info = ProtocolId; type InfoIter = Vec; @@ -50,133 +35,6 @@ impl UpgradeInfo for OutboundRequestContainer { } } -/// Implements the encoding per supported protocol for `RPCRequest`. -impl OutboundRequest { - pub fn supported_protocols(&self) -> Vec { - match self { - // add more protocols when versions/encodings are supported - OutboundRequest::Status(_) => vec![ProtocolId::new( - SupportedProtocol::StatusV1, - Encoding::SSZSnappy, - )], - OutboundRequest::Goodbye(_) => vec![ProtocolId::new( - SupportedProtocol::GoodbyeV1, - Encoding::SSZSnappy, - )], - OutboundRequest::BlocksByRange(_) => vec![ - ProtocolId::new(SupportedProtocol::BlocksByRangeV2, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy), - ], - OutboundRequest::BlocksByRoot(_) => vec![ - ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy), - ], - OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new( - SupportedProtocol::BlobsByRangeV1, - Encoding::SSZSnappy, - )], - OutboundRequest::BlobsByRoot(_) => vec![ProtocolId::new( - SupportedProtocol::BlobsByRootV1, - Encoding::SSZSnappy, - )], - OutboundRequest::DataColumnsByRoot(_) => vec![ProtocolId::new( - SupportedProtocol::DataColumnsByRootV1, - Encoding::SSZSnappy, - )], - OutboundRequest::DataColumnsByRange(_) => vec![ProtocolId::new( - SupportedProtocol::DataColumnsByRangeV1, - Encoding::SSZSnappy, - )], - OutboundRequest::Ping(_) => vec![ProtocolId::new( - SupportedProtocol::PingV1, - Encoding::SSZSnappy, - )], - OutboundRequest::MetaData(_) => vec![ - ProtocolId::new(SupportedProtocol::MetaDataV3, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::MetaDataV2, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::MetaDataV1, Encoding::SSZSnappy), - ], - } - } - /* These functions are used in the handler for stream management */ - - /// Maximum number of responses expected for this request. - pub fn max_responses(&self) -> u64 { - match self { - OutboundRequest::Status(_) => 1, - OutboundRequest::Goodbye(_) => 0, - OutboundRequest::BlocksByRange(req) => *req.count(), - OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, - OutboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), - OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, - OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, - OutboundRequest::DataColumnsByRange(req) => req.max_requested::(), - OutboundRequest::Ping(_) => 1, - OutboundRequest::MetaData(_) => 1, - } - } - - pub fn expect_exactly_one_response(&self) -> bool { - match self { - OutboundRequest::Status(_) => true, - OutboundRequest::Goodbye(_) => false, - OutboundRequest::BlocksByRange(_) => false, - OutboundRequest::BlocksByRoot(_) => false, - OutboundRequest::BlobsByRange(_) => false, - OutboundRequest::BlobsByRoot(_) => false, - OutboundRequest::DataColumnsByRoot(_) => false, - OutboundRequest::DataColumnsByRange(_) => false, - OutboundRequest::Ping(_) => true, - OutboundRequest::MetaData(_) => true, - } - } - - /// Gives the corresponding `SupportedProtocol` to this request. - pub fn versioned_protocol(&self) -> SupportedProtocol { - match self { - OutboundRequest::Status(_) => SupportedProtocol::StatusV1, - OutboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1, - OutboundRequest::BlocksByRange(req) => match req { - OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1, - OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2, - }, - OutboundRequest::BlocksByRoot(req) => match req { - BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, - BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, - }, - OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, - OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, - OutboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, - OutboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, - OutboundRequest::Ping(_) => SupportedProtocol::PingV1, - OutboundRequest::MetaData(req) => match req { - MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, - MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2, - MetadataRequest::V3(_) => SupportedProtocol::MetaDataV3, - }, - } - } - - /// Returns the `ResponseTermination` type associated with the request if a stream gets - /// terminated. - pub fn stream_termination(&self) -> ResponseTermination { - match self { - // this only gets called after `multiple_responses()` returns true. Therefore, only - // variants that have `multiple_responses()` can have values. - OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, - OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, - OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, - OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, - OutboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, - OutboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, - OutboundRequest::Status(_) => unreachable!(), - OutboundRequest::Goodbye(_) => unreachable!(), - OutboundRequest::Ping(_) => unreachable!(), - OutboundRequest::MetaData(_) => unreachable!(), - } - } -} - /* RPC Response type - used for outbound upgrades */ /* Outbound upgrades */ @@ -211,22 +69,3 @@ where .boxed() } } - -impl std::fmt::Display for OutboundRequest { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - OutboundRequest::Status(status) => write!(f, "Status Message: {}", status), - OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), - OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), - OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), - OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), - OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), - OutboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), - OutboundRequest::DataColumnsByRange(req) => { - write!(f, "Data columns by range: {:?}", req) - } - OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), - OutboundRequest::MetaData(_) => write!(f, "MetaData request"), - } - } -} diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 09a18e5de6b..d0dbffe9326 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,7 +21,8 @@ use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, - LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, SignedBeaconBlock, + LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, Signature, + SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -143,6 +144,13 @@ pub static LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX: LazyLock = LazyLock::new(| LightClientBootstrap::::ssz_max_len_for_fork(ForkName::Electra) }); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_CAPELLA_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Capella)); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Deneb)); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Electra)); + /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// The number of seconds to wait for the first bytes of a request once a protocol has been @@ -151,12 +159,10 @@ const REQUEST_TIMEOUT: u64 = 15; /// Returns the maximum bytes that can be sent across the RPC. pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize { - match fork_context.current_fork() { - ForkName::Altair | ForkName::Base => max_chunk_size / 10, - ForkName::Bellatrix => max_chunk_size, - ForkName::Capella => max_chunk_size, - ForkName::Deneb => max_chunk_size, - ForkName::Electra => max_chunk_size, + if fork_context.current_fork().bellatrix_enabled() { + max_chunk_size + } else { + max_chunk_size / 10 } } @@ -192,6 +198,26 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { } } +fn rpc_light_client_updates_by_range_limits_by_fork(current_fork: ForkName) -> RpcLimits { + let altair_fixed_len = LightClientFinalityUpdateAltair::::ssz_fixed_len(); + + match ¤t_fork { + ForkName::Base => RpcLimits::new(0, 0), + ForkName::Altair | ForkName::Bellatrix => { + RpcLimits::new(altair_fixed_len, altair_fixed_len) + } + ForkName::Capella => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_CAPELLA_MAX) + } + ForkName::Deneb => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX) + } + ForkName::Electra => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX) + } + } +} + fn rpc_light_client_finality_update_limits_by_fork(current_fork: ForkName) -> RpcLimits { let altair_fixed_len = LightClientFinalityUpdateAltair::::ssz_fixed_len(); @@ -288,6 +314,9 @@ pub enum Protocol { /// The `LightClientFinalityUpdate` protocol name. #[strum(serialize = "light_client_finality_update")] LightClientFinalityUpdate, + /// The `LightClientUpdatesByRange` protocol name + #[strum(serialize = "light_client_updates_by_range")] + LightClientUpdatesByRange, } impl Protocol { @@ -306,6 +335,7 @@ impl Protocol { Protocol::LightClientBootstrap => None, Protocol::LightClientOptimisticUpdate => None, Protocol::LightClientFinalityUpdate => None, + Protocol::LightClientUpdatesByRange => None, } } } @@ -336,6 +366,7 @@ pub enum SupportedProtocol { LightClientBootstrapV1, LightClientOptimisticUpdateV1, LightClientFinalityUpdateV1, + LightClientUpdatesByRangeV1, } impl SupportedProtocol { @@ -358,6 +389,7 @@ impl SupportedProtocol { SupportedProtocol::LightClientBootstrapV1 => "1", SupportedProtocol::LightClientOptimisticUpdateV1 => "1", SupportedProtocol::LightClientFinalityUpdateV1 => "1", + SupportedProtocol::LightClientUpdatesByRangeV1 => "1", } } @@ -382,6 +414,7 @@ impl SupportedProtocol { Protocol::LightClientOptimisticUpdate } SupportedProtocol::LightClientFinalityUpdateV1 => Protocol::LightClientFinalityUpdate, + SupportedProtocol::LightClientUpdatesByRangeV1 => Protocol::LightClientUpdatesByRange, } } @@ -544,6 +577,10 @@ impl ProtocolId { ), Protocol::LightClientOptimisticUpdate => RpcLimits::new(0, 0), Protocol::LightClientFinalityUpdate => RpcLimits::new(0, 0), + Protocol::LightClientUpdatesByRange => RpcLimits::new( + LightClientUpdatesByRangeRequest::ssz_min_len(), + LightClientUpdatesByRangeRequest::ssz_max_len(), + ), Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty } } @@ -579,6 +616,9 @@ impl ProtocolId { Protocol::LightClientFinalityUpdate => { rpc_light_client_finality_update_limits_by_fork(fork_context.current_fork()) } + Protocol::LightClientUpdatesByRange => { + rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork()) + } } } @@ -594,7 +634,8 @@ impl ProtocolId { | SupportedProtocol::DataColumnsByRangeV1 | SupportedProtocol::LightClientBootstrapV1 | SupportedProtocol::LightClientOptimisticUpdateV1 - | SupportedProtocol::LightClientFinalityUpdateV1 => true, + | SupportedProtocol::LightClientFinalityUpdateV1 + | SupportedProtocol::LightClientUpdatesByRangeV1 => true, SupportedProtocol::StatusV1 | SupportedProtocol::BlocksByRootV1 | SupportedProtocol::BlocksByRangeV1 @@ -645,7 +686,7 @@ pub fn rpc_data_column_limits() -> RpcLimits { // The inbound protocol reads the request, decodes it and returns the stream to the protocol // handler to respond to once ready. -pub type InboundOutput = (InboundRequest, InboundFramed); +pub type InboundOutput = (RequestType, InboundFramed); pub type InboundFramed = Framed>>>, SSZSnappyInboundCodec>; @@ -679,19 +720,19 @@ where // MetaData requests should be empty, return the stream match versioned_protocol { SupportedProtocol::MetaDataV1 => { - Ok((InboundRequest::MetaData(MetadataRequest::new_v1()), socket)) + Ok((RequestType::MetaData(MetadataRequest::new_v1()), socket)) } SupportedProtocol::MetaDataV2 => { - Ok((InboundRequest::MetaData(MetadataRequest::new_v2()), socket)) + Ok((RequestType::MetaData(MetadataRequest::new_v2()), socket)) } SupportedProtocol::MetaDataV3 => { - Ok((InboundRequest::MetaData(MetadataRequest::new_v3()), socket)) + Ok((RequestType::MetaData(MetadataRequest::new_v3()), socket)) } SupportedProtocol::LightClientOptimisticUpdateV1 => { - Ok((InboundRequest::LightClientOptimisticUpdate, socket)) + Ok((RequestType::LightClientOptimisticUpdate, socket)) } SupportedProtocol::LightClientFinalityUpdateV1 => { - Ok((InboundRequest::LightClientFinalityUpdate, socket)) + Ok((RequestType::LightClientFinalityUpdate, socket)) } _ => { match tokio::time::timeout( @@ -713,7 +754,7 @@ where } #[derive(Debug, Clone, PartialEq)] -pub enum InboundRequest { +pub enum RequestType { Status(StatusMessage), Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), @@ -725,63 +766,68 @@ pub enum InboundRequest { LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, + LightClientUpdatesByRange(LightClientUpdatesByRangeRequest), Ping(Ping), MetaData(MetadataRequest), } /// Implements the encoding per supported protocol for `RPCRequest`. -impl InboundRequest { +impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. pub fn max_responses(&self) -> u64 { match self { - InboundRequest::Status(_) => 1, - InboundRequest::Goodbye(_) => 0, - InboundRequest::BlocksByRange(req) => *req.count(), - InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, - InboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), - InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, - InboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, - InboundRequest::DataColumnsByRange(req) => req.max_requested::(), - InboundRequest::Ping(_) => 1, - InboundRequest::MetaData(_) => 1, - InboundRequest::LightClientBootstrap(_) => 1, - InboundRequest::LightClientOptimisticUpdate => 1, - InboundRequest::LightClientFinalityUpdate => 1, + RequestType::Status(_) => 1, + RequestType::Goodbye(_) => 0, + RequestType::BlocksByRange(req) => *req.count(), + RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, + RequestType::BlobsByRange(req) => req.max_blobs_requested::(), + RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, + RequestType::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, + RequestType::DataColumnsByRange(req) => req.max_requested::(), + RequestType::Ping(_) => 1, + RequestType::MetaData(_) => 1, + RequestType::LightClientBootstrap(_) => 1, + RequestType::LightClientOptimisticUpdate => 1, + RequestType::LightClientFinalityUpdate => 1, + RequestType::LightClientUpdatesByRange(req) => req.count, } } /// Gives the corresponding `SupportedProtocol` to this request. pub fn versioned_protocol(&self) -> SupportedProtocol { match self { - InboundRequest::Status(_) => SupportedProtocol::StatusV1, - InboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1, - InboundRequest::BlocksByRange(req) => match req { + RequestType::Status(_) => SupportedProtocol::StatusV1, + RequestType::Goodbye(_) => SupportedProtocol::GoodbyeV1, + RequestType::BlocksByRange(req) => match req { OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1, OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2, }, - InboundRequest::BlocksByRoot(req) => match req { + RequestType::BlocksByRoot(req) => match req { BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, }, - InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, - InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, - InboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, - InboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, - InboundRequest::Ping(_) => SupportedProtocol::PingV1, - InboundRequest::MetaData(req) => match req { + RequestType::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, + RequestType::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, + RequestType::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, + RequestType::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, + RequestType::Ping(_) => SupportedProtocol::PingV1, + RequestType::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2, MetadataRequest::V3(_) => SupportedProtocol::MetaDataV3, }, - InboundRequest::LightClientBootstrap(_) => SupportedProtocol::LightClientBootstrapV1, - InboundRequest::LightClientOptimisticUpdate => { + RequestType::LightClientBootstrap(_) => SupportedProtocol::LightClientBootstrapV1, + RequestType::LightClientOptimisticUpdate => { SupportedProtocol::LightClientOptimisticUpdateV1 } - InboundRequest::LightClientFinalityUpdate => { + RequestType::LightClientFinalityUpdate => { SupportedProtocol::LightClientFinalityUpdateV1 } + RequestType::LightClientUpdatesByRange(_) => { + SupportedProtocol::LightClientUpdatesByRangeV1 + } } } @@ -791,19 +837,102 @@ impl InboundRequest { match self { // this only gets called after `multiple_responses()` returns true. Therefore, only // variants that have `multiple_responses()` can have values. - InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, - InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, - InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, - InboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, - InboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, - InboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, - InboundRequest::Status(_) => unreachable!(), - InboundRequest::Goodbye(_) => unreachable!(), - InboundRequest::Ping(_) => unreachable!(), - InboundRequest::MetaData(_) => unreachable!(), - InboundRequest::LightClientBootstrap(_) => unreachable!(), - InboundRequest::LightClientFinalityUpdate => unreachable!(), - InboundRequest::LightClientOptimisticUpdate => unreachable!(), + RequestType::BlocksByRange(_) => ResponseTermination::BlocksByRange, + RequestType::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + RequestType::BlobsByRange(_) => ResponseTermination::BlobsByRange, + RequestType::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, + RequestType::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, + RequestType::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, + RequestType::Status(_) => unreachable!(), + RequestType::Goodbye(_) => unreachable!(), + RequestType::Ping(_) => unreachable!(), + RequestType::MetaData(_) => unreachable!(), + RequestType::LightClientBootstrap(_) => unreachable!(), + RequestType::LightClientFinalityUpdate => unreachable!(), + RequestType::LightClientOptimisticUpdate => unreachable!(), + RequestType::LightClientUpdatesByRange(_) => unreachable!(), + } + } + + pub fn supported_protocols(&self) -> Vec { + match self { + // add more protocols when versions/encodings are supported + RequestType::Status(_) => vec![ProtocolId::new( + SupportedProtocol::StatusV1, + Encoding::SSZSnappy, + )], + RequestType::Goodbye(_) => vec![ProtocolId::new( + SupportedProtocol::GoodbyeV1, + Encoding::SSZSnappy, + )], + RequestType::BlocksByRange(_) => vec![ + ProtocolId::new(SupportedProtocol::BlocksByRangeV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy), + ], + RequestType::BlocksByRoot(_) => vec![ + ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy), + ], + RequestType::BlobsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::BlobsByRangeV1, + Encoding::SSZSnappy, + )], + RequestType::BlobsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::BlobsByRootV1, + Encoding::SSZSnappy, + )], + RequestType::DataColumnsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRootV1, + Encoding::SSZSnappy, + )], + RequestType::DataColumnsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRangeV1, + Encoding::SSZSnappy, + )], + RequestType::Ping(_) => vec![ProtocolId::new( + SupportedProtocol::PingV1, + Encoding::SSZSnappy, + )], + RequestType::MetaData(_) => vec![ + ProtocolId::new(SupportedProtocol::MetaDataV3, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::MetaDataV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::MetaDataV1, Encoding::SSZSnappy), + ], + RequestType::LightClientBootstrap(_) => vec![ProtocolId::new( + SupportedProtocol::LightClientBootstrapV1, + Encoding::SSZSnappy, + )], + RequestType::LightClientOptimisticUpdate => vec![ProtocolId::new( + SupportedProtocol::LightClientOptimisticUpdateV1, + Encoding::SSZSnappy, + )], + RequestType::LightClientFinalityUpdate => vec![ProtocolId::new( + SupportedProtocol::LightClientFinalityUpdateV1, + Encoding::SSZSnappy, + )], + RequestType::LightClientUpdatesByRange(_) => vec![ProtocolId::new( + SupportedProtocol::LightClientUpdatesByRangeV1, + Encoding::SSZSnappy, + )], + } + } + + pub fn expect_exactly_one_response(&self) -> bool { + match self { + RequestType::Status(_) => true, + RequestType::Goodbye(_) => false, + RequestType::BlocksByRange(_) => false, + RequestType::BlocksByRoot(_) => false, + RequestType::BlobsByRange(_) => false, + RequestType::BlobsByRoot(_) => false, + RequestType::DataColumnsByRoot(_) => false, + RequestType::DataColumnsByRange(_) => false, + RequestType::Ping(_) => true, + RequestType::MetaData(_) => true, + RequestType::LightClientBootstrap(_) => true, + RequestType::LightClientOptimisticUpdate => true, + RequestType::LightClientFinalityUpdate => true, + RequestType::LightClientUpdatesByRange(_) => true, } } } @@ -819,7 +948,7 @@ pub enum RPCError { /// IO Error. IoError(String), /// The peer returned a valid response but the response indicated an error. - ErrorResponse(RPCResponseErrorCode, String), + ErrorResponse(RpcErrorResponse, String), /// Timed out waiting for a response. StreamTimeout, /// Peer does not support the protocol. @@ -898,30 +1027,33 @@ impl std::error::Error for RPCError { } } -impl std::fmt::Display for InboundRequest { +impl std::fmt::Display for RequestType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - InboundRequest::Status(status) => write!(f, "Status Message: {}", status), - InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), - InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), - InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), - InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), - InboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), - InboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), - InboundRequest::DataColumnsByRange(req) => { + RequestType::Status(status) => write!(f, "Status Message: {}", status), + RequestType::Goodbye(reason) => write!(f, "Goodbye: {}", reason), + RequestType::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), + RequestType::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + RequestType::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), + RequestType::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), + RequestType::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), + RequestType::DataColumnsByRange(req) => { write!(f, "Data columns by range: {:?}", req) } - InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), - InboundRequest::MetaData(_) => write!(f, "MetaData request"), - InboundRequest::LightClientBootstrap(bootstrap) => { + RequestType::Ping(ping) => write!(f, "Ping: {}", ping.data), + RequestType::MetaData(_) => write!(f, "MetaData request"), + RequestType::LightClientBootstrap(bootstrap) => { write!(f, "Light client boostrap: {}", bootstrap.root) } - InboundRequest::LightClientOptimisticUpdate => { + RequestType::LightClientOptimisticUpdate => { write!(f, "Light client optimistic update request") } - InboundRequest::LightClientFinalityUpdate => { + RequestType::LightClientFinalityUpdate => { write!(f, "Light client finality update request") } + RequestType::LightClientUpdatesByRange(_) => { + write!(f, "Light client updates by range request") + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 523b891a009..ecbacc8c112 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -107,6 +107,8 @@ pub struct RPCRateLimiter { lc_optimistic_update_rl: Limiter, /// LightClientFinalityUpdate rate limiter. lc_finality_update_rl: Limiter, + /// LightClientUpdatesByRange rate limiter. + lc_updates_by_range_rl: Limiter, } /// Error type for non conformant requests @@ -147,6 +149,8 @@ pub struct RPCRateLimiterBuilder { lc_optimistic_update_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. lc_finality_update_quota: Option, + /// Quota for the LightClientUpdatesByRange protocol. + lc_updates_by_range_quota: Option, } impl RPCRateLimiterBuilder { @@ -167,6 +171,7 @@ impl RPCRateLimiterBuilder { Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, + Protocol::LightClientUpdatesByRange => self.lc_updates_by_range_quota = q, } self } @@ -192,6 +197,9 @@ impl RPCRateLimiterBuilder { let lc_finality_update_quota = self .lc_finality_update_quota .ok_or("LightClientFinalityUpdate quota not specified")?; + let lc_updates_by_range_quota = self + .lc_updates_by_range_quota + .ok_or("LightClientUpdatesByRange quota not specified")?; let blbrange_quota = self .blbrange_quota @@ -222,6 +230,7 @@ impl RPCRateLimiterBuilder { let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; + let lc_updates_by_range_rl = Limiter::from_quota(lc_updates_by_range_quota)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -242,6 +251,7 @@ impl RPCRateLimiterBuilder { lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, + lc_updates_by_range_rl, init_time: Instant::now(), }) } @@ -252,7 +262,7 @@ pub trait RateLimiterItem { fn max_responses(&self) -> u64; } -impl RateLimiterItem for super::InboundRequest { +impl RateLimiterItem for super::RequestType { fn protocol(&self) -> Protocol { self.versioned_protocol().protocol() } @@ -262,15 +272,6 @@ impl RateLimiterItem for super::InboundRequest { } } -impl RateLimiterItem for super::OutboundRequest { - fn protocol(&self) -> Protocol { - self.versioned_protocol().protocol() - } - - fn max_responses(&self) -> u64 { - self.max_responses() - } -} impl RPCRateLimiter { pub fn new_with_config(config: RateLimiterConfig) -> Result { // Destructure to make sure every configuration value is used. @@ -288,6 +289,7 @@ impl RPCRateLimiter { light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, + light_client_updates_by_range_quota, } = config; Self::builder() @@ -310,6 +312,10 @@ impl RPCRateLimiter { Protocol::LightClientFinalityUpdate, light_client_finality_update_quota, ) + .set_quota( + Protocol::LightClientUpdatesByRange, + light_client_updates_by_range_quota, + ) .build() } @@ -342,6 +348,7 @@ impl RPCRateLimiter { Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, + Protocol::LightClientUpdatesByRange => &mut self.lc_updates_by_range_rl, }; check(limiter) } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 77caecb16df..e968ad11e3d 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -14,13 +14,13 @@ use types::EthSpec; use super::{ config::OutboundRateLimiterConfig, rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}, - BehaviourAction, OutboundRequest, Protocol, RPCSend, ReqId, + BehaviourAction, Protocol, RPCSend, ReqId, RequestType, }; /// A request that was rate limited or waiting on rate limited requests for the same peer and /// protocol. struct QueuedRequest { - req: OutboundRequest, + req: RequestType, request_id: Id, } @@ -70,7 +70,7 @@ impl SelfRateLimiter { &mut self, peer_id: PeerId, request_id: Id, - req: OutboundRequest, + req: RequestType, ) -> Result, Error> { let protocol = req.versioned_protocol().protocol(); // First check that there are not already other requests waiting to be sent. @@ -101,7 +101,7 @@ impl SelfRateLimiter { limiter: &mut RateLimiter, peer_id: PeerId, request_id: Id, - req: OutboundRequest, + req: RequestType, log: &Logger, ) -> Result, (QueuedRequest, Duration)> { match limiter.allows(&peer_id, &req) { @@ -211,7 +211,7 @@ mod tests { use crate::rpc::config::{OutboundRateLimiterConfig, RateLimiterConfig}; use crate::rpc::rate_limiter::Quota; use crate::rpc::self_limiter::SelfRateLimiter; - use crate::rpc::{OutboundRequest, Ping, Protocol}; + use crate::rpc::{Ping, Protocol, RequestType}; use crate::service::api_types::{AppRequestId, RequestId, SyncRequestId}; use libp2p::PeerId; use std::time::Duration; @@ -235,7 +235,7 @@ mod tests { RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id: i, })), - OutboundRequest::Ping(Ping { data: i as u64 }), + RequestType::Ping(Ping { data: i as u64 }), ); } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 30400db3b66..cb228153908 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -3,19 +3,12 @@ use std::sync::Arc; use libp2p::swarm::ConnectionId; use types::{ BlobSidecar, DataColumnSidecar, EthSpec, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, SignedBeaconBlock, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; -use crate::rpc::methods::{ - BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, -}; use crate::rpc::{ - methods::{ - BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, - OldBlocksByRangeRequest, OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2, - RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage, - }, - OutboundRequest, SubstreamId, + methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}, + SubstreamId, }; /// Identifier of requests sent by a peer. @@ -29,11 +22,6 @@ pub struct SingleLookupReqId { pub req_id: Id, } -/// Request ID for data_columns_by_root requests. Block lookup do not issue this requests directly. -/// Wrapping this particular req_id, ensures not mixing this requests with a custody req_id. -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub struct DataColumnsByRootRequestId(pub Id); - /// Id of rpc requests sent by sync to the network. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum SyncRequestId { @@ -42,11 +30,19 @@ pub enum SyncRequestId { /// Request searching for a set of blobs given a hash. SingleBlob { id: SingleLookupReqId }, /// Request searching for a set of data columns given a hash and list of column indices. - DataColumnsByRoot(DataColumnsByRootRequestId, DataColumnsByRootRequester), + DataColumnsByRoot(DataColumnsByRootRequestId), /// Range request that is composed by both a block range request and a blob range request. RangeBlockAndBlobs { id: Id }, } +/// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. +/// Wrapping this particular req_id, ensures not mixing this request with a custody req_id. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct DataColumnsByRootRequestId { + pub id: Id, + pub requester: DataColumnsByRootRequester, +} + #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum DataColumnsByRootRequester { Sampling(SamplingId), @@ -93,69 +89,6 @@ pub enum RequestId { Internal, } -/// The type of RPC requests the Behaviour informs it has received and allows for sending. -/// -// NOTE: This is an application-level wrapper over the lower network level requests that can be -// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't -// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`. -#[derive(Debug, Clone, PartialEq)] -pub enum Request { - /// A Status message. - Status(StatusMessage), - /// A blocks by range request. - BlocksByRange(BlocksByRangeRequest), - /// A blobs by range request. - BlobsByRange(BlobsByRangeRequest), - /// A request blocks root request. - BlocksByRoot(BlocksByRootRequest), - // light client bootstrap request - LightClientBootstrap(LightClientBootstrapRequest), - // light client optimistic update request - LightClientOptimisticUpdate, - // light client finality update request - LightClientFinalityUpdate, - /// A request blobs root request. - BlobsByRoot(BlobsByRootRequest), - /// A request data columns root request. - DataColumnsByRoot(DataColumnsByRootRequest), - /// A request data columns by range request. - DataColumnsByRange(DataColumnsByRangeRequest), -} - -impl std::convert::From for OutboundRequest { - fn from(req: Request) -> OutboundRequest { - match req { - Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), - Request::BlocksByRange(r) => match r { - BlocksByRangeRequest::V1(req) => OutboundRequest::BlocksByRange( - OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { - start_slot: req.start_slot, - count: req.count, - step: 1, - }), - ), - BlocksByRangeRequest::V2(req) => OutboundRequest::BlocksByRange( - OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { - start_slot: req.start_slot, - count: req.count, - step: 1, - }), - ), - }, - Request::LightClientBootstrap(_) - | Request::LightClientOptimisticUpdate - | Request::LightClientFinalityUpdate => { - unreachable!("Lighthouse never makes an outbound light client request") - } - Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), - Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r), - Request::DataColumnsByRoot(r) => OutboundRequest::DataColumnsByRoot(r), - Request::DataColumnsByRange(r) => OutboundRequest::DataColumnsByRange(r), - Request::Status(s) => OutboundRequest::Status(s), - } - } -} - /// The type of RPC responses the Behaviour informs it has received, and allows for sending. /// // NOTE: This is an application-level wrapper over the lower network level responses that can be @@ -184,47 +117,53 @@ pub enum Response { LightClientOptimisticUpdate(Arc>), /// A response to a LightClientFinalityUpdate request. LightClientFinalityUpdate(Arc>), + /// A response to a LightClientUpdatesByRange request. + LightClientUpdatesByRange(Option>>), } -impl std::convert::From> for RPCCodedResponse { - fn from(resp: Response) -> RPCCodedResponse { +impl std::convert::From> for RpcResponse { + fn from(resp: Response) -> RpcResponse { match resp { Response::BlocksByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlocksByRoot), }, Response::BlocksByRange(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlocksByRange(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlocksByRange), }, Response::BlobsByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRoot(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRoot), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlobsByRoot), }, Response::BlobsByRange(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlobsByRange(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlobsByRange), }, Response::DataColumnsByRoot(r) => match r { - Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot(d)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRoot), + Some(d) => RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot(d)), + None => RpcResponse::StreamTermination(ResponseTermination::DataColumnsByRoot), }, Response::DataColumnsByRange(r) => match r { - Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRange(d)), - None => { - RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRange) - } + Some(d) => RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange(d)), + None => RpcResponse::StreamTermination(ResponseTermination::DataColumnsByRange), }, - Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), + Response::Status(s) => RpcResponse::Success(RpcSuccessResponse::Status(s)), Response::LightClientBootstrap(b) => { - RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) + RpcResponse::Success(RpcSuccessResponse::LightClientBootstrap(b)) } Response::LightClientOptimisticUpdate(o) => { - RPCCodedResponse::Success(RPCResponse::LightClientOptimisticUpdate(o)) + RpcResponse::Success(RpcSuccessResponse::LightClientOptimisticUpdate(o)) } Response::LightClientFinalityUpdate(f) => { - RPCCodedResponse::Success(RPCResponse::LightClientFinalityUpdate(f)) + RpcResponse::Success(RpcSuccessResponse::LightClientFinalityUpdate(f)) } + Response::LightClientUpdatesByRange(f) => match f { + Some(d) => RpcResponse::Success(RpcSuccessResponse::LightClientUpdatesByRange(d)), + None => { + RpcResponse::StreamTermination(ResponseTermination::LightClientUpdatesByRange) + } + }, } } } @@ -245,8 +184,9 @@ impl slog::Value for RequestId { } } +// This custom impl reduces log boilerplate not printing `DataColumnsByRootRequestId` on each id log impl std::fmt::Display for DataColumnsByRootRequestId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) + write!(f, "{} {:?}", self.id, self.requester) } } diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs deleted file mode 100644 index ab2e43630bb..00000000000 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::discovery::Discovery; -use crate::peer_manager::PeerManager; -use crate::rpc::RPC; -use crate::types::SnappyTransform; - -use libp2p::identify; -use libp2p::swarm::behaviour::toggle::Toggle; -use libp2p::swarm::NetworkBehaviour; -use libp2p::upnp::tokio::Behaviour as Upnp; -use types::EthSpec; - -use super::api_types::RequestId; - -pub type SubscriptionFilter = - gossipsub::MaxCountSubscriptionFilter; -pub type Gossipsub = gossipsub::Behaviour; - -#[derive(NetworkBehaviour)] -pub(crate) struct Behaviour -where - E: EthSpec, -{ - /// Keep track of active and pending connections to enforce hard limits. - pub connection_limits: libp2p::connection_limits::Behaviour, - /// The peer manager that keeps track of peer's reputation and status. - pub peer_manager: PeerManager, - /// The Eth2 RPC specified in the wire-0 protocol. - pub eth2_rpc: RPC, - /// Discv5 Discovery protocol. - pub discovery: Discovery, - /// Keep regular connection to peers and disconnect if absent. - // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. - /// Provides IP addresses and peer information. - pub identify: identify::Behaviour, - /// Libp2p UPnP port mapping. - pub upnp: Toggle, - /// The routing pub-sub mechanism for eth2. - pub gossipsub: Gossipsub, -} diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index a97157ff0a4..056b6be24d3 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,4 +1,3 @@ -use self::behaviour::Behaviour; use self::gossip_cache::GossipCache; use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad}; use crate::discovery::{ @@ -11,12 +10,9 @@ use crate::peer_manager::{ use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::rpc::methods::MetadataRequest; use crate::rpc::{ - methods, BlocksByRangeRequest, GoodbyeReason, HandlerErr, InboundRequest, NetworkParams, - OutboundRequest, Protocol, RPCCodedResponse, RPCError, RPCMessage, RPCReceived, RPCResponse, - RPCResponseErrorCode, ResponseTermination, RPC, + self, GoodbyeReason, HandlerErr, NetworkParams, Protocol, RPCError, RPCMessage, RPCReceived, + RequestType, ResponseTermination, RpcErrorResponse, RpcResponse, RpcSuccessResponse, RPC, }; -use crate::service::behaviour::BehaviourEvent; -pub use crate::service::behaviour::Gossipsub; use crate::types::{ attestation_sync_committee_topics, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, @@ -25,7 +21,7 @@ use crate::types::{ use crate::EnrExt; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; -use api_types::{AppRequestId, PeerRequestId, Request, RequestId, Response}; +use api_types::{AppRequestId, PeerRequestId, RequestId, Response}; use futures::stream::StreamExt; use gossipsub::{ IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, @@ -34,7 +30,8 @@ use gossipsub::{ use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use libp2p::multiaddr::{self, Multiaddr, Protocol as MProtocol}; use libp2p::swarm::behaviour::toggle::Toggle; -use libp2p::swarm::{Swarm, SwarmEvent}; +use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p::upnp::tokio::Behaviour as Upnp; use libp2p::{identify, PeerId, SwarmBuilder}; use slog::{crit, debug, info, o, trace, warn}; use std::num::{NonZeroU8, NonZeroUsize}; @@ -48,10 +45,9 @@ use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; use types::{ChainSpec, ForkName}; -use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; +use utils::{build_transport, strip_peer_id, Context as ServiceContext}; pub mod api_types; -mod behaviour; mod gossip_cache; pub mod gossipsub_scoring_parameters; pub mod utils; @@ -84,7 +80,7 @@ pub enum NetworkEvent { /// Identifier of the request. All responses to this request must use this id. id: PeerRequestId, /// Request the peer sent. - request: Request, + request: rpc::Request, }, ResponseReceived { /// Peer that sent the response. @@ -110,6 +106,41 @@ pub enum NetworkEvent { ZeroListeners, } +pub type Gossipsub = gossipsub::Behaviour; +pub type SubscriptionFilter = + gossipsub::MaxCountSubscriptionFilter; + +#[derive(NetworkBehaviour)] +pub(crate) struct Behaviour +where + E: EthSpec, +{ + // NOTE: The order of the following list of behaviours has meaning, + // `NetworkBehaviour::handle_{pending, established}_{inbound, outbound}` methods + // are called sequentially for each behaviour and they are fallible, + // therefore we want `connection_limits` and `peer_manager` running first, + // which are the behaviours that may reject a connection, so that + // when the subsequent behaviours are called they are certain the connection won't be rejected. + + // + /// Keep track of active and pending connections to enforce hard limits. + pub connection_limits: libp2p::connection_limits::Behaviour, + /// The peer manager that keeps track of peer's reputation and status. + pub peer_manager: PeerManager, + /// The Eth2 RPC specified in the wire-0 protocol. + pub eth2_rpc: RPC, + /// Discv5 Discovery protocol. + pub discovery: Discovery, + /// Keep regular connection to peers and disconnect if absent. + // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. + /// Provides IP addresses and peer information. + pub identify: identify::Behaviour, + /// Libp2p UPnP port mapping. + pub upnp: Toggle, + /// The routing pub-sub mechanism for eth2. + pub gossipsub: Gossipsub, +} + /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. @@ -159,37 +190,36 @@ impl Network { .collect(); // set up a collection of variables accessible outside of the network crate - let network_globals = { - // Create an ENR or load from disk if appropriate - let enr = crate::discovery::enr::build_or_load_enr::( - local_keypair.clone(), - &config, - &ctx.enr_fork_id, - &log, - ctx.chain_spec, - )?; - // Construct the metadata - let custody_subnet_count = if ctx.chain_spec.is_peer_das_scheduled() { - if config.subscribe_all_data_column_subnets { - Some(ctx.chain_spec.data_column_sidecar_subnet_count) - } else { - Some(ctx.chain_spec.custody_requirement) - } + // Create an ENR or load from disk if appropriate + let enr = crate::discovery::enr::build_or_load_enr::( + local_keypair.clone(), + &config, + &ctx.enr_fork_id, + &log, + &ctx.chain_spec, + )?; + + // Construct the metadata + let custody_subnet_count = ctx.chain_spec.is_peer_das_scheduled().then(|| { + if config.subscribe_all_data_column_subnets { + ctx.chain_spec.data_column_sidecar_subnet_count } else { - None - }; - let meta_data = - utils::load_or_build_metadata(&config.network_dir, custody_subnet_count, &log); - let globals = NetworkGlobals::new( - enr, - meta_data, - trusted_peers, - config.disable_peer_scoring, - &log, - ctx.chain_spec.clone(), - ); - Arc::new(globals) - }; + ctx.chain_spec.custody_requirement + } + }); + let meta_data = + utils::load_or_build_metadata(&config.network_dir, custody_subnet_count, &log); + let seq_number = *meta_data.seq_number(); + let globals = NetworkGlobals::new( + enr, + meta_data, + trusted_peers, + config.disable_peer_scoring, + &log, + config.clone(), + ctx.chain_spec.clone(), + ); + let network_globals = Arc::new(globals); // Grab our local ENR FORK ID let enr_fork_id = network_globals @@ -207,9 +237,10 @@ impl Network { gossipsub_config_params, ctx.chain_spec.seconds_per_slot, E::slots_per_epoch(), + config.idontwant_message_size_threshold, ); - let score_settings = PeerScoreSettings::new(ctx.chain_spec, gs_config.mesh_n()); + let score_settings = PeerScoreSettings::new(&ctx.chain_spec, gs_config.mesh_n()); let gossip_cache = { let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot); @@ -337,6 +368,7 @@ impl Network { config.outbound_rate_limiter_config.clone(), log.clone(), network_params, + seq_number, ); let discovery = { @@ -346,7 +378,7 @@ impl Network { &config, network_globals.clone(), &log, - ctx.chain_spec, + &ctx.chain_spec, ) .await?; // start searching for peers @@ -397,7 +429,7 @@ impl Network { (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) .ceil() as u32, )) - .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); + .with_max_established_per_peer(Some(1)); libp2p::connection_limits::Behaviour::new(limits) }; @@ -934,25 +966,28 @@ impl Network { &mut self, peer_id: PeerId, request_id: AppRequestId, - request: Request, + request: RequestType, ) -> Result<(), (AppRequestId, RPCError)> { // Check if the peer is connected before sending an RPC request if !self.swarm.is_connected(&peer_id) { return Err((request_id, RPCError::Disconnected)); } - self.eth2_rpc_mut().send_request( - peer_id, - RequestId::Application(request_id), - request.into(), - ); + self.eth2_rpc_mut() + .send_request(peer_id, RequestId::Application(request_id), request); Ok(()) } /// Send a successful response to a peer over RPC. - pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { + pub fn send_response( + &mut self, + peer_id: PeerId, + id: PeerRequestId, + request_id: rpc::RequestId, + response: Response, + ) { self.eth2_rpc_mut() - .send_response(peer_id, id, response.into()) + .send_response(peer_id, id, request_id, response.into()) } /// Inform the peer that their request produced an error. @@ -960,13 +995,15 @@ impl Network { &mut self, peer_id: PeerId, id: PeerRequestId, - error: RPCResponseErrorCode, + request_id: rpc::RequestId, + error: RpcErrorResponse, reason: String, ) { self.eth2_rpc_mut().send_response( peer_id, id, - RPCCodedResponse::Error(error, reason.into()), + request_id, + RpcResponse::Error(error, reason.into()), ) } @@ -1103,33 +1140,26 @@ impl Network { .sync_committee_bitfield::() .expect("Local discovery must have sync committee bitfield"); - { - // write lock scope - let mut meta_data = self.network_globals.local_metadata.write(); + // write lock scope + let mut meta_data_w = self.network_globals.local_metadata.write(); - *meta_data.seq_number_mut() += 1; - *meta_data.attnets_mut() = local_attnets; - if let Ok(syncnets) = meta_data.syncnets_mut() { - *syncnets = local_syncnets; - } + *meta_data_w.seq_number_mut() += 1; + *meta_data_w.attnets_mut() = local_attnets; + if let Ok(syncnets) = meta_data_w.syncnets_mut() { + *syncnets = local_syncnets; } + let seq_number = *meta_data_w.seq_number(); + let meta_data = meta_data_w.clone(); + + drop(meta_data_w); + self.eth2_rpc_mut().update_seq_number(seq_number); // Save the updated metadata to disk - utils::save_metadata_to_disk( - &self.network_dir, - self.network_globals.local_metadata.read().clone(), - &self.log, - ); + utils::save_metadata_to_disk(&self.network_dir, meta_data, &self.log); } /// Sends a Ping request to the peer. fn ping(&mut self, peer_id: PeerId) { - let ping = crate::rpc::Ping { - data: *self.network_globals.local_metadata.read().seq_number(), - }; - trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); - let id = RequestId::Internal; - self.eth2_rpc_mut() - .send_request(peer_id, id, OutboundRequest::Ping(ping)); + self.eth2_rpc_mut().ping(peer_id, RequestId::Internal); } /// Sends a METADATA request to a peer. @@ -1137,10 +1167,10 @@ impl Network { let event = if self.fork_context.spec.is_peer_das_scheduled() { // Nodes with higher custody will probably start advertising it // before peerdas is activated - OutboundRequest::MetaData(MetadataRequest::new_v3()) + RequestType::MetaData(MetadataRequest::new_v3()) } else { // We always prefer sending V2 requests otherwise - OutboundRequest::MetaData(MetadataRequest::new_v2()) + RequestType::MetaData(MetadataRequest::new_v2()) }; self.eth2_rpc_mut() .send_request(peer_id, RequestId::Internal, event); @@ -1151,12 +1181,14 @@ impl Network { &mut self, _req: MetadataRequest, id: PeerRequestId, + request_id: rpc::RequestId, peer_id: PeerId, ) { let metadata = self.network_globals.local_metadata.read().clone(); // The encoder is responsible for sending the negotiated version of the metadata - let event = RPCCodedResponse::Success(RPCResponse::MetaData(metadata)); - self.eth2_rpc_mut().send_response(peer_id, id, event); + let event = RpcResponse::Success(RpcSuccessResponse::MetaData(metadata)); + self.eth2_rpc_mut() + .send_response(peer_id, id, request_id, event); } // RPC Propagation methods @@ -1178,56 +1210,6 @@ impl Network { } } - /// Convenience function to propagate a request. - #[must_use = "actually return the event"] - fn build_request( - &mut self, - id: PeerRequestId, - peer_id: PeerId, - request: Request, - ) -> NetworkEvent { - // Increment metrics - match &request { - Request::Status(_) => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]) - } - Request::LightClientBootstrap(_) => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["light_client_bootstrap"]) - } - Request::LightClientOptimisticUpdate => metrics::inc_counter_vec( - &metrics::TOTAL_RPC_REQUESTS, - &["light_client_optimistic_update"], - ), - Request::LightClientFinalityUpdate => metrics::inc_counter_vec( - &metrics::TOTAL_RPC_REQUESTS, - &["light_client_finality_update"], - ), - Request::BlocksByRange { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) - } - Request::BlocksByRoot { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) - } - Request::BlobsByRange { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]) - } - Request::BlobsByRoot { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]) - } - Request::DataColumnsByRoot { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_root"]) - } - Request::DataColumnsByRange { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_range"]) - } - } - NetworkEvent::RequestReceived { - peer_id, - id, - request, - } - } - /// Dial cached Enrs in discovery service that are in the given `subnet_id` and aren't /// in Connected, Dialing or Banned state. fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet, spec: Arc) { @@ -1249,7 +1231,7 @@ impl Network { self.discovery_mut().remove_cached_enr(&enr.peer_id()); let peer_id = enr.peer_id(); if self.peer_manager_mut().dial_peer(enr) { - debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); + debug!(self.log, "Added cached ENR peer to dial queue"; "peer_id" => %peer_id); } } } @@ -1399,8 +1381,12 @@ impl Network { fn inject_rpc_event(&mut self, event: RPCMessage) -> Option> { let peer_id = event.peer_id; - // Do not permit Inbound events from peers that are being disconnected, or RPC requests. - if !self.peer_manager().is_connected(&peer_id) { + // Do not permit Inbound events from peers that are being disconnected or RPC requests, + // but allow `RpcFailed` and `HandlerErr::Outbound` to be bubble up to sync for state management. + if !self.peer_manager().is_connected(&peer_id) + && (matches!(event.message, Err(HandlerErr::Inbound { .. })) + || matches!(event.message, Ok(RPCReceived::Request(..)))) + { debug!( self.log, "Ignoring rpc message of disconnecting peer"; @@ -1409,7 +1395,7 @@ impl Network { return None; } - let handler_id = event.conn_id; + let connection_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated match event.message { Err(handler_err) => { @@ -1447,21 +1433,25 @@ impl Network { } } } - Ok(RPCReceived::Request(id, request)) => { - let peer_request_id = (handler_id, id); - match request { + Ok(RPCReceived::Request(request)) => { + match request.r#type { /* Behaviour managed protocols: Ping and Metadata */ - InboundRequest::Ping(ping) => { + RequestType::Ping(ping) => { // inform the peer manager and send the response self.peer_manager_mut().ping_request(&peer_id, ping.data); None } - InboundRequest::MetaData(req) => { + RequestType::MetaData(req) => { // send the requested meta-data - self.send_meta_data_response(req, (handler_id, id), peer_id); + self.send_meta_data_response( + req, + (connection_id, request.substream_id), + request.id, + peer_id, + ); None } - InboundRequest::Goodbye(reason) => { + RequestType::Goodbye(reason) => { // queue for disconnection without a goodbye message debug!( self.log, "Peer sent Goodbye"; @@ -1476,17 +1466,19 @@ impl Network { None } /* Protocols propagated to the Network */ - InboundRequest::Status(msg) => { + RequestType::Status(_) => { // inform the peer manager that we have received a status from a peer self.peer_manager_mut().peer_statusd(&peer_id); + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]); // propagate the STATUS message upwards - let event = - self.build_request(peer_request_id, peer_id, Request::Status(msg)); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::BlocksByRange(req) => { + RequestType::BlocksByRange(ref req) => { // Still disconnect the peer if the request is naughty. - let mut count = *req.count(); if *req.step() == 0 { self.peer_manager_mut().handle_rpc_error( &peer_id, @@ -1498,135 +1490,164 @@ impl Network { ); return None; } - // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 - if *req.step() > 1 { - count = 1; - } - let request = match req { - methods::OldBlocksByRangeRequest::V1(req) => Request::BlocksByRange( - BlocksByRangeRequest::new_v1(req.start_slot, count), - ), - methods::OldBlocksByRangeRequest::V2(req) => Request::BlocksByRange( - BlocksByRangeRequest::new(req.start_slot, count), - ), - }; - let event = self.build_request(peer_request_id, peer_id, request); - Some(event) - } - InboundRequest::BlocksByRoot(req) => { - let event = self.build_request( - peer_request_id, - peer_id, - Request::BlocksByRoot(req), + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["blocks_by_range"], ); - Some(event) - } - InboundRequest::BlobsByRange(req) => { - let event = self.build_request( - peer_request_id, + Some(NetworkEvent::RequestReceived { peer_id, - Request::BlobsByRange(req), - ); - Some(event) + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::BlobsByRoot(req) => { - let event = - self.build_request(peer_request_id, peer_id, Request::BlobsByRoot(req)); - Some(event) + RequestType::BlocksByRoot(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]); + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::DataColumnsByRoot(req) => { - let event = self.build_request( - peer_request_id, + RequestType::BlobsByRange(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]); + Some(NetworkEvent::RequestReceived { peer_id, - Request::DataColumnsByRoot(req), - ); - Some(event) + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::DataColumnsByRange(req) => { - let event = self.build_request( - peer_request_id, + RequestType::BlobsByRoot(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]); + Some(NetworkEvent::RequestReceived { peer_id, - Request::DataColumnsByRange(req), - ); - Some(event) + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::LightClientBootstrap(req) => { - let event = self.build_request( - peer_request_id, + RequestType::DataColumnsByRoot(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["data_columns_by_root"], + ); + Some(NetworkEvent::RequestReceived { peer_id, - Request::LightClientBootstrap(req), + id: (connection_id, request.substream_id), + request, + }) + } + RequestType::DataColumnsByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["data_columns_by_range"], ); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::LightClientOptimisticUpdate => { - let event = self.build_request( - peer_request_id, + RequestType::LightClientBootstrap(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_bootstrap"], + ); + Some(NetworkEvent::RequestReceived { peer_id, - Request::LightClientOptimisticUpdate, + id: (connection_id, request.substream_id), + request, + }) + } + RequestType::LightClientOptimisticUpdate => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_optimistic_update"], ); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::LightClientFinalityUpdate => { - let event = self.build_request( - peer_request_id, + RequestType::LightClientFinalityUpdate => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_finality_update"], + ); + Some(NetworkEvent::RequestReceived { peer_id, - Request::LightClientFinalityUpdate, + id: (connection_id, request.substream_id), + request, + }) + } + RequestType::LightClientUpdatesByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_updates_by_range"], ); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } } } Ok(RPCReceived::Response(id, resp)) => { match resp { /* Behaviour managed protocols */ - RPCResponse::Pong(ping) => { + RpcSuccessResponse::Pong(ping) => { self.peer_manager_mut().pong_response(&peer_id, ping.data); None } - RPCResponse::MetaData(meta_data) => { + RpcSuccessResponse::MetaData(meta_data) => { self.peer_manager_mut() .meta_data_response(&peer_id, meta_data); None } /* Network propagated protocols */ - RPCResponse::Status(msg) => { + RpcSuccessResponse::Status(msg) => { // inform the peer manager that we have received a status from a peer self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards self.build_response(id, peer_id, Response::Status(msg)) } - RPCResponse::BlocksByRange(resp) => { + RpcSuccessResponse::BlocksByRange(resp) => { self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } - RPCResponse::BlobsByRange(resp) => { + RpcSuccessResponse::BlobsByRange(resp) => { self.build_response(id, peer_id, Response::BlobsByRange(Some(resp))) } - RPCResponse::BlocksByRoot(resp) => { + RpcSuccessResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } - RPCResponse::BlobsByRoot(resp) => { + RpcSuccessResponse::BlobsByRoot(resp) => { self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) } - RPCResponse::DataColumnsByRoot(resp) => { + RpcSuccessResponse::DataColumnsByRoot(resp) => { self.build_response(id, peer_id, Response::DataColumnsByRoot(Some(resp))) } - RPCResponse::DataColumnsByRange(resp) => { + RpcSuccessResponse::DataColumnsByRange(resp) => { self.build_response(id, peer_id, Response::DataColumnsByRange(Some(resp))) } // Should never be reached - RPCResponse::LightClientBootstrap(bootstrap) => { + RpcSuccessResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) } - RPCResponse::LightClientOptimisticUpdate(update) => self.build_response( + RpcSuccessResponse::LightClientOptimisticUpdate(update) => self.build_response( id, peer_id, Response::LightClientOptimisticUpdate(update), ), - RPCResponse::LightClientFinalityUpdate(update) => self.build_response( + RpcSuccessResponse::LightClientFinalityUpdate(update) => self.build_response( id, peer_id, Response::LightClientFinalityUpdate(update), ), + RpcSuccessResponse::LightClientUpdatesByRange(update) => self.build_response( + id, + peer_id, + Response::LightClientUpdatesByRange(Some(update)), + ), } } Ok(RPCReceived::EndOfStream(id, termination)) => { @@ -1637,6 +1658,9 @@ impl Network { ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), + ResponseTermination::LightClientUpdatesByRange => { + Response::LightClientUpdatesByRange(None) + } }; self.build_response(id, peer_id, response) } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 8b6a84ae0cb..f4988e68cd5 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -24,16 +24,14 @@ use types::{ }; pub const NETWORK_KEY_FILENAME: &str = "key"; -/// The maximum simultaneous libp2p connections per peer. -pub const MAX_CONNECTIONS_PER_PEER: u32 = 1; /// The filename to store our local metadata. pub const METADATA_FILENAME: &str = "metadata"; pub struct Context<'a> { - pub config: &'a NetworkConfig, + pub config: Arc, pub enr_fork_id: EnrForkId, pub fork_context: Arc, - pub chain_spec: &'a ChainSpec, + pub chain_spec: Arc, pub libp2p_registry: Option<&'a mut Registry>, } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index ac78e2cb01e..bcebd02a0ed 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -2,12 +2,11 @@ use crate::peer_manager::peerdb::PeerDB; use crate::rpc::{MetaData, MetaDataV3}; use crate::types::{BackFillState, SyncState}; -use crate::Client; -use crate::EnrExt; -use crate::{Enr, GossipTopic, Multiaddr, PeerId}; +use crate::{Client, Enr, EnrExt, GossipTopic, Multiaddr, NetworkConfig, PeerId}; use itertools::Itertools; use parking_lot::RwLock; use std::collections::HashSet; +use std::sync::Arc; use types::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec}; pub struct NetworkGlobals { @@ -27,10 +26,13 @@ pub struct NetworkGlobals { pub sync_state: RwLock, /// The current state of the backfill sync. pub backfill_state: RwLock, - /// The computed custody subnets and columns is stored to avoid re-computing. - pub custody_subnets: Vec, - pub custody_columns: Vec, - pub spec: ChainSpec, + /// The computed sampling subnets and columns is stored to avoid re-computing. + pub sampling_subnets: Vec, + pub sampling_columns: Vec, + /// Network-related configuration. Immutable after initialization. + pub config: Arc, + /// Ethereum chain configuration. Immutable after initialization. + pub spec: Arc, } impl NetworkGlobals { @@ -40,26 +42,34 @@ impl NetworkGlobals { trusted_peers: Vec, disable_peer_scoring: bool, log: &slog::Logger, - spec: ChainSpec, + config: Arc, + spec: Arc, ) -> Self { - let (custody_subnets, custody_columns) = if spec.is_peer_das_scheduled() { + let (sampling_subnets, sampling_columns) = if spec.is_peer_das_scheduled() { + let node_id = enr.node_id().raw(); + let custody_subnet_count = local_metadata .custody_subnet_count() .copied() .expect("custody subnet count must be set if PeerDAS is scheduled"); - let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( - enr.node_id().raw(), - custody_subnet_count, + + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); + + let sampling_subnets = DataColumnSubnetId::compute_custody_subnets::( + node_id, + subnet_sampling_size, &spec, ) - .expect("custody subnet count must be valid") + .expect("sampling subnet count must be valid") .collect::>(); - let custody_columns = custody_subnets + + let sampling_columns = sampling_subnets .iter() .flat_map(|subnet| subnet.columns::(&spec)) .sorted() .collect(); - (custody_subnets, custody_columns) + + (sampling_subnets, sampling_columns) } else { (vec![], vec![]) }; @@ -73,8 +83,9 @@ impl NetworkGlobals { gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::NotRequired), - custody_subnets, - custody_columns, + sampling_subnets, + sampling_columns, + config, spec, } } @@ -160,7 +171,8 @@ impl NetworkGlobals { pub fn new_test_globals( trusted_peers: Vec, log: &slog::Logger, - spec: ChainSpec, + config: Arc, + spec: Arc, ) -> NetworkGlobals { let metadata = MetaData::V3(MetaDataV3 { seq_number: 0, @@ -168,20 +180,21 @@ impl NetworkGlobals { syncnets: Default::default(), custody_subnet_count: spec.custody_requirement, }); - Self::new_test_globals_with_metadata(trusted_peers, metadata, log, spec) + Self::new_test_globals_with_metadata(trusted_peers, metadata, log, config, spec) } pub(crate) fn new_test_globals_with_metadata( trusted_peers: Vec, metadata: MetaData, log: &slog::Logger, - spec: ChainSpec, + config: Arc, + spec: Arc, ) -> NetworkGlobals { use crate::CombinedKeyExt; let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair); let enr = discv5::enr::Enr::builder().build(&enr_key).unwrap(); - NetworkGlobals::new(enr, metadata, trusted_peers, false, log, spec) + NetworkGlobals::new(enr, metadata, trusted_peers, false, log, config, spec) } } @@ -191,32 +204,51 @@ mod test { use types::{Epoch, EthSpec, MainnetEthSpec as E}; #[test] - fn test_custody_subnets() { + fn test_sampling_subnets() { let log = logging::test_logger(); let mut spec = E::default_spec(); spec.eip7594_fork_epoch = Some(Epoch::new(0)); let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); let metadata = get_metadata(custody_subnet_count); + let config = Arc::new(NetworkConfig::default()); - let globals = - NetworkGlobals::::new_test_globals_with_metadata(vec![], metadata, &log, spec); - assert_eq!(globals.custody_subnets.len(), custody_subnet_count as usize); + let globals = NetworkGlobals::::new_test_globals_with_metadata( + vec![], + metadata, + &log, + config, + Arc::new(spec), + ); + assert_eq!( + globals.sampling_subnets.len(), + subnet_sampling_size as usize + ); } #[test] - fn test_custody_columns() { + fn test_sampling_columns() { let log = logging::test_logger(); let mut spec = E::default_spec(); spec.eip7594_fork_epoch = Some(Epoch::new(0)); let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; - let custody_columns_count = spec.number_of_columns / 2; + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); let metadata = get_metadata(custody_subnet_count); + let config = Arc::new(NetworkConfig::default()); - let globals = - NetworkGlobals::::new_test_globals_with_metadata(vec![], metadata, &log, spec); - assert_eq!(globals.custody_columns.len(), custody_columns_count); + let globals = NetworkGlobals::::new_test_globals_with_metadata( + vec![], + metadata, + &log, + config, + Arc::new(spec), + ); + assert_eq!( + globals.sampling_columns.len(), + subnet_sampling_size as usize + ); } fn get_metadata(custody_subnet_count: u64) -> MetaData { diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 1bc99f9a6c4..9f68278e284 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -252,28 +252,25 @@ impl PubsubMessage { Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } GossipKind::BlobSidecar(blob_index) => { - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(ForkName::Deneb | ForkName::Electra) => { + if let Some(fork_name) = + fork_context.from_context_bytes(gossip_topic.fork_digest) + { + if fork_name.deneb_enabled() { let blob_sidecar = Arc::new( BlobSidecar::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ); - Ok(PubsubMessage::BlobSidecar(Box::new(( + return Ok(PubsubMessage::BlobSidecar(Box::new(( *blob_index, blob_sidecar, - )))) + )))); } - Some( - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella, - ) - | None => Err(format!( - "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", - gossip_topic.fork_digest - )), } + + Err(format!( + "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )) } GossipKind::DataColumnSidecar(subnet_id) => { match fork_context.from_context_bytes(gossip_topic.fork_digest) { diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index b82e63bd9c0..4322763fc58 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -91,6 +91,14 @@ impl SyncState { pub fn is_synced(&self) -> bool { matches!(self, SyncState::Synced | SyncState::BackFillSyncing { .. }) } + + /// Returns true if the node is *stalled*, i.e. has no synced peers. + /// + /// Usually this state is treated as unsynced, except in some places where we make an exception + /// for single-node testnets where having 0 peers is desired. + pub fn is_stalled(&self) -> bool { + matches!(self, SyncState::Stalled) + } } impl std::fmt::Display for SyncState { diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 660d786169f..84e19c81d04 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -76,7 +76,7 @@ pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { } } -pub fn build_config(mut boot_nodes: Vec) -> NetworkConfig { +pub fn build_config(mut boot_nodes: Vec) -> Arc { let mut config = NetworkConfig::default(); // Find unused ports by using the 0 port. @@ -92,7 +92,7 @@ pub fn build_config(mut boot_nodes: Vec) -> NetworkConfig { config.enr_address = (Some(std::net::Ipv4Addr::LOCALHOST), None); config.boot_nodes_enr.append(&mut boot_nodes); config.network_dir = path.into_path(); - config + Arc::new(config) } pub async fn build_libp2p_instance( @@ -100,7 +100,7 @@ pub async fn build_libp2p_instance( boot_nodes: Vec, log: slog::Logger, fork_name: ForkName, - spec: &ChainSpec, + chain_spec: Arc, ) -> Libp2pInstance { let config = build_config(boot_nodes); // launch libp2p service @@ -109,10 +109,10 @@ pub async fn build_libp2p_instance( let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); let libp2p_context = lighthouse_network::Context { - config: &config, + config, enr_fork_id: EnrForkId::default(), fork_context: Arc::new(fork_context(fork_name)), - chain_spec: spec, + chain_spec, libp2p_registry: None, }; Libp2pInstance( @@ -142,14 +142,16 @@ pub async fn build_node_pair( rt: Weak, log: &slog::Logger, fork_name: ForkName, - spec: &ChainSpec, + spec: Arc, protocol: Protocol, ) -> (Libp2pInstance, Libp2pInstance) { let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); - let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name, spec).await; - let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name, spec).await; + let mut sender = + build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name, spec.clone()).await; + let mut receiver = + build_libp2p_instance(rt, vec![], receiver_log, fork_name, spec.clone()).await; // let the two nodes set up listeners let sender_fut = async { @@ -218,11 +220,13 @@ pub async fn build_linear( log: slog::Logger, n: usize, fork_name: ForkName, - spec: &ChainSpec, + spec: Arc, ) -> Vec { let mut nodes = Vec::with_capacity(n); for _ in 0..n { - nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name, spec).await); + nodes.push( + build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name, spec.clone()).await, + ); } let multiaddrs: Vec = nodes diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 25d249960d2..f721c8477cf 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -3,9 +3,9 @@ mod common; use common::Protocol; -use lighthouse_network::rpc::methods::*; +use lighthouse_network::rpc::{methods::*, RequestType}; use lighthouse_network::service::api_types::AppRequestId; -use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response}; +use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Response}; use slog::{debug, warn, Level}; use ssz::Encode; use ssz_types::VariableList; @@ -61,7 +61,7 @@ fn test_tcp_status_rpc() { let log = common::build_log(log_level, enable_logging); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver @@ -69,13 +69,13 @@ fn test_tcp_status_rpc() { Arc::downgrade(&rt), &log, ForkName::Base, - &spec, + spec, Protocol::Tcp, ) .await; // Dummy STATUS RPC message - let rpc_request = Request::Status(StatusMessage { + let rpc_request = RequestType::Status(StatusMessage { fork_digest: [0; 4], finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), @@ -128,10 +128,10 @@ fn test_tcp_status_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response debug!(log, "Receiver Received"); - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response(peer_id, id, request.id, rpc_response.clone()); } } _ => {} // Ignore other events @@ -163,7 +163,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver @@ -171,15 +171,18 @@ fn test_tcp_blocks_by_range_chunked_rpc() { Arc::downgrade(&rt), &log, ForkName::Bellatrix, - &spec, + spec.clone(), Protocol::Tcp, ) .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); - - let spec = E::default_spec(); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: 0, + count: messages_to_send, + step: 1, + })); // BlocksByRange Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); @@ -249,7 +252,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for i in 0..messages_to_send { @@ -262,10 +265,20 @@ fn test_tcp_blocks_by_range_chunked_rpc() { } else { rpc_response_bellatrix_small.clone() }; - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); } } _ => {} // Ignore other events @@ -300,18 +313,18 @@ fn test_blobs_by_range_chunked_rpc() { rt.block_on(async { // get sender/receiver - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), &log, ForkName::Deneb, - &spec, + spec.clone(), Protocol::Tcp, ) .await; // BlobsByRange Request - let rpc_request = Request::BlobsByRange(BlobsByRangeRequest { + let rpc_request = RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: 0, count: slot_count, }); @@ -369,16 +382,26 @@ fn test_blobs_by_range_chunked_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 0..messages_to_send { // Send first third of responses as base blocks, // second as altair and third as bellatrix. - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlobsByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlobsByRange(None), + ); } } _ => {} // Ignore other events @@ -410,7 +433,7 @@ fn test_tcp_blocks_by_range_over_limit() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver @@ -418,13 +441,18 @@ fn test_tcp_blocks_by_range_over_limit() { Arc::downgrade(&rt), &log, ForkName::Bellatrix, - &spec, + spec.clone(), Protocol::Tcp, ) .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { + start_slot: 0, + count: messages_to_send, + step: 1, + })); // BlocksByRange Response let full_block = bellatrix_block_large(&common::fork_context(ForkName::Bellatrix), &spec); @@ -462,15 +490,25 @@ fn test_tcp_blocks_by_range_over_limit() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 0..messages_to_send { let rpc_response = rpc_response_bellatrix_large.clone(); - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); } } _ => {} // Ignore other events @@ -502,7 +540,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver @@ -510,16 +548,20 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { Arc::downgrade(&rt), &log, ForkName::Base, - &spec, + spec.clone(), Protocol::Tcp, ) .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: 0, + count: messages_to_send, + step: 1, + })); // BlocksByRange Response - let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); @@ -586,10 +628,10 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { }, _, )) => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); - message_info = Some((peer_id, id)); + message_info = Some((peer_id, id, request.id)); } } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required @@ -599,8 +641,8 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { // if we need to send messages send them here. This will happen after a delay if message_info.is_some() { messages_sent += 1; - let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); + let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); + receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -631,7 +673,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { let log = common::build_log(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver @@ -639,16 +681,20 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { Arc::downgrade(&rt), &log, ForkName::Base, - &spec, + spec.clone(), Protocol::Tcp, ) .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: 0, + count: 10, + step: 1, + })); // BlocksByRange Response - let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); @@ -700,15 +746,25 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); } } _ => {} // Ignore other events @@ -739,7 +795,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { let messages_to_send = 6; let log = common::build_log(log_level, enable_logging); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver @@ -748,13 +804,13 @@ fn test_tcp_blocks_by_root_chunked_rpc() { Arc::downgrade(&rt), &log, ForkName::Bellatrix, - &spec, + spec.clone(), Protocol::Tcp, ) .await; // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new( vec![ Hash256::zero(), Hash256::zero(), @@ -831,7 +887,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response debug!(log, "Receiver got request"); @@ -844,11 +900,16 @@ fn test_tcp_blocks_by_root_chunked_rpc() { } else { rpc_response_bellatrix_small.clone() }; - receiver.send_response(peer_id, id, rpc_response); + receiver.send_response(peer_id, id, request.id, rpc_response); debug!(log, "Sending message"); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); debug!(log, "Send stream term"); } } @@ -877,7 +938,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { let extra_messages_to_send: u64 = 10; let log = common::build_log(log_level, enable_logging); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver @@ -886,13 +947,13 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { Arc::downgrade(&rt), &log, ForkName::Base, - &spec, + spec.clone(), Protocol::Tcp, ) .await; // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new( vec![ Hash256::zero(), Hash256::zero(), @@ -975,10 +1036,10 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { }, _, )) => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); - message_info = Some((peer_id, id)); + message_info = Some((peer_id, id, request.id)); } } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required @@ -988,8 +1049,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { // if we need to send messages send them here. This will happen after a delay if message_info.is_some() { messages_sent += 1; - let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); + let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); + receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -1016,12 +1077,12 @@ fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) { let rt = Arc::new(Runtime::new().unwrap()); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec, protocol) + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, spec, protocol) .await; // build the sender future diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 6a81eb33f08..500cd23faeb 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -8,10 +8,14 @@ edition = { workspace = true } sloggers = { workspace = true } genesis = { workspace = true } matches = "0.1.8" +serde_json = { workspace = true } slog-term = { workspace = true } slog-async = { workspace = true } eth2 = { workspace = true } gossipsub = { workspace = true } +eth2_network_config = { workspace = true } +kzg = { workspace = true } +bls = { workspace = true } [dependencies] alloy-primitives = { workspace = true } @@ -34,7 +38,7 @@ smallvec = { workspace = true } rand = { workspace = true } fnv = { workspace = true } alloy-rlp = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } logging = { workspace = true } task_executor = { workspace = true } igd-next = "0.14" @@ -54,3 +58,4 @@ disable-backfill = [] fork_from_env = ["beacon_chain/fork_from_env"] portable = ["beacon_chain/portable"] test_logger = [] +ci_logger = [] diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 9e42aa8e924..4b7e8a50a36 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -5,11 +5,11 @@ use beacon_chain::{ sync_committee_verification::Error as SyncCommitteeError, }; use fnv::FnvHashMap; -pub use lighthouse_metrics::*; use lighthouse_network::{ peer_manager::peerdb::client::ClientKind, types::GossipKind, GossipTopic, Gossipsub, NetworkGlobals, }; +pub use metrics::*; use std::sync::{Arc, LazyLock}; use strum::IntoEnumIterator; use types::EthSpec; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 62f1371c811..4d875cb4a14 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,6 +4,7 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; +use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::store::Error; @@ -18,13 +19,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer, }; -use beacon_chain::{ - blob_verification::{GossipBlobError, GossipVerifiedBlob}, - data_availability_checker::DataColumnsToPublish, -}; -use lighthouse_network::{ - Client, MessageAcceptance, MessageId, PeerAction, PeerId, PubsubMessage, ReportSource, -}; +use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use operation_pool::ReceivedPreCapella; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; @@ -171,26 +166,6 @@ impl NetworkBeaconProcessor { }) } - pub(crate) fn handle_data_columns_to_publish( - &self, - data_columns_to_publish: DataColumnsToPublish, - ) { - if let Some(data_columns_to_publish) = data_columns_to_publish { - self.send_network_message(NetworkMessage::Publish { - messages: data_columns_to_publish - .iter() - .map(|d| { - let subnet = DataColumnSubnetId::from_column_index::( - d.index as usize, - &self.chain.spec, - ); - PubsubMessage::DataColumnSidecar(Box::new((subnet, d.clone()))) - }) - .collect(), - }); - } - } - /// Send a message on `message_tx` that the `message_id` sent by `peer_id` should be propagated on /// the gossip network. /// @@ -696,8 +671,7 @@ impl NetworkBeaconProcessor { column_sidecar, )); } - GossipDataColumnError::KzgNotInitialized - | GossipDataColumnError::PubkeyCacheTimeout + GossipDataColumnError::PubkeyCacheTimeout | GossipDataColumnError::BeaconChainError(_) => { crit!( self.log, @@ -712,6 +686,9 @@ impl NetworkBeaconProcessor { | GossipDataColumnError::InvalidSubnetId { .. } | GossipDataColumnError::InvalidInclusionProof { .. } | GossipDataColumnError::InvalidKzgProof { .. } + | GossipDataColumnError::UnexpectedDataColumn + | GossipDataColumnError::InvalidColumnIndex(_) + | GossipDataColumnError::InconsistentCommitmentsOrProofLength | GossipDataColumnError::NotFinalizedDescendant { .. } => { debug!( self.log, @@ -839,9 +816,7 @@ impl NetworkBeaconProcessor { blob_sidecar, )); } - GossipBlobError::KzgNotInitialized - | GossipBlobError::PubkeyCacheTimeout - | GossipBlobError::BeaconChainError(_) => { + GossipBlobError::PubkeyCacheTimeout | GossipBlobError::BeaconChainError(_) => { crit!( self.log, "Internal error when verifying blob sidecar"; @@ -939,7 +914,10 @@ impl NetworkBeaconProcessor { let blob_slot = verified_blob.slot(); let blob_index = verified_blob.id().index; - let result = self.chain.process_gossip_blob(verified_blob).await; + let result = self + .chain + .process_gossip_blob(verified_blob, || Ok(())) + .await; match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { @@ -966,7 +944,7 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, "Ignoring gossip blob already imported"; @@ -1016,12 +994,10 @@ impl NetworkBeaconProcessor { match self .chain - .process_gossip_data_columns(vec![verified_data_column]) + .process_gossip_data_columns(vec![verified_data_column], || Ok(())) .await { - Ok((availability, data_columns_to_publish)) => { - self.handle_data_columns_to_publish(data_columns_to_publish); - + Ok(availability) => { match availability { AvailabilityProcessingStatus::Imported(block_root) => { // Note: Reusing block imported metric here @@ -1049,11 +1025,11 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); - // Potentially trigger reconstruction + self.attempt_data_column_reconstruction(block_root).await; } } } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, "Ignoring gossip column already imported"; @@ -1245,7 +1221,10 @@ impl NetworkBeaconProcessor { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + Err( + BlockError::DuplicateFullyImported(_) + | BlockError::DuplicateImportStatusUnknown(..), + ) => { debug!( self.log, "Gossip block is already known"; diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 7f551c544c7..76f5e886ff2 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -2,27 +2,33 @@ use crate::sync::manager::BlockProcessType; use crate::sync::SamplingId; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend, BeaconChain}; +use beacon_chain::{ + builder::Witness, eth1_chain::CachingEth1Backend, AvailabilityProcessingStatus, BeaconChain, +}; use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorChannels, BeaconProcessorSend, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, }; +use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, + LightClientUpdatesByRangeRequest, }; +use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, - Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, + Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, }; -use slog::{debug, Logger}; +use slog::{debug, error, trace, Logger}; use slot_clock::ManualSlotClock; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use store::MemoryStore; use task_executor::TaskExecutor; +use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::{self, error::TrySendError}; use types::*; @@ -596,13 +602,21 @@ impl NetworkBeaconProcessor { pub fn send_blocks_by_range_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .handle_blocks_by_range_request(peer_id, request_id, request) + .handle_blocks_by_range_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) .await; }; @@ -616,13 +630,21 @@ impl NetworkBeaconProcessor { pub fn send_blocks_by_roots_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .handle_blocks_by_root_request(peer_id, request_id, request) + .handle_blocks_by_root_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) .await; }; @@ -636,12 +658,21 @@ impl NetworkBeaconProcessor { pub fn send_blobs_by_range_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_blobs_by_range_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_blobs_by_range_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -653,12 +684,21 @@ impl NetworkBeaconProcessor { pub fn send_blobs_by_roots_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_blobs_by_root_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_blobs_by_root_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -670,12 +710,21 @@ impl NetworkBeaconProcessor { pub fn send_data_columns_by_roots_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_data_columns_by_root_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_data_columns_by_root_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -687,12 +736,21 @@ impl NetworkBeaconProcessor { pub fn send_data_columns_by_range_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_data_columns_by_range_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_data_columns_by_range_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -704,12 +762,21 @@ impl NetworkBeaconProcessor { pub fn send_light_client_bootstrap_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: LightClientBootstrapRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_light_client_bootstrap(peer_id, request_id, request); + let process_fn = move || { + processor.handle_light_client_bootstrap( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -721,11 +788,19 @@ impl NetworkBeaconProcessor { pub fn send_light_client_optimistic_update_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_light_client_optimistic_update(peer_id, request_id); + let process_fn = move || { + processor.handle_light_client_optimistic_update( + peer_id, + connection_id, + substream_id, + request_id, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -737,10 +812,19 @@ impl NetworkBeaconProcessor { pub fn send_light_client_finality_update_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || processor.handle_light_client_finality_update(peer_id, request_id); + let process_fn = move || { + processor.handle_light_client_finality_update( + peer_id, + connection_id, + substream_id, + request_id, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -748,10 +832,36 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process a `LightClientUpdatesByRange` request from the RPC network. + pub fn send_light_client_updates_by_range_request( + self: &Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + request: LightClientUpdatesByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_light_client_updates_by_range( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::LightClientUpdatesByRangeRequest(Box::new(process_fn)), + }) + } + /// Send a message to `sync_tx`. /// /// Creates a log if there is an internal error. - fn send_sync_message(&self, message: SyncMessage) { + pub(crate) fn send_sync_message(&self, message: SyncMessage) { self.sync_tx.send(message).unwrap_or_else(|e| { debug!(self.log, "Could not send message to the sync service"; "error" => %e) @@ -767,6 +877,75 @@ impl NetworkBeaconProcessor { "error" => %e) }); } + + /// Attempt to reconstruct all data columns if the following conditions satisfies: + /// - Our custody requirement is all columns + /// - We have >= 50% of columns, but not all columns + /// + /// Returns `Some(AvailabilityProcessingStatus)` if reconstruction is successfully performed, + /// otherwise returns `None`. + async fn attempt_data_column_reconstruction( + &self, + block_root: Hash256, + ) -> Option { + let result = self.chain.reconstruct_data_columns(block_root).await; + match result { + Ok(Some((availability_processing_status, data_columns_to_publish))) => { + self.send_network_message(NetworkMessage::Publish { + messages: data_columns_to_publish + .iter() + .map(|d| { + let subnet = DataColumnSubnetId::from_column_index::( + d.index as usize, + &self.chain.spec, + ); + PubsubMessage::DataColumnSidecar(Box::new((subnet, d.clone()))) + }) + .collect(), + }); + + match &availability_processing_status { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components available via reconstruction"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Block components still missing block after reconstruction"; + "result" => "imported all custody columns", + "block_hash" => %block_root, + ); + } + } + + Some(availability_processing_status) + } + Ok(None) => { + // reason is tracked via the `KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL` metric + trace!( + self.log, + "Reconstruction not required for block"; + "block_hash" => %block_root, + ); + None + } + Err(e) => { + error!( + self.log, + "Error during data column reconstruction"; + "block_root" => %block_root, + "error" => ?e + ); + None + } + } + } } type TestBeaconChainType = @@ -779,6 +958,7 @@ impl NetworkBeaconProcessor> { // processor (but not much else). pub fn null_for_testing( network_globals: Arc>, + sync_tx: UnboundedSender>, chain: Arc>>, executor: TaskExecutor, log: Logger, @@ -791,7 +971,6 @@ impl NetworkBeaconProcessor> { } = <_>::default(); let (network_tx, _network_rx) = mpsc::unbounded_channel(); - let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); let network_beacon_processor = Self { beacon_processor_send: beacon_processor_tx, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 0c98f5c17e5..6d32806713d 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -4,18 +4,20 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use itertools::process_results; +use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; +use methods::LightClientUpdatesByRangeRequest; use slog::{debug, error, warn}; use slot_clock::SlotClock; use std::collections::{hash_map::Entry, HashMap}; use std::sync::Arc; use tokio_stream::StreamExt; use types::blob_sidecar::BlobIdentifier; -use types::{Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, Slot}; +use types::{Epoch, EthSpec, FixedBytesExtended, Hash256, Slot}; impl NetworkBeaconProcessor { /* Auxiliary functions */ @@ -33,11 +35,14 @@ impl NetworkBeaconProcessor { &self, peer_id: PeerId, response: Response, - id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) { self.send_network_message(NetworkMessage::SendResponse { peer_id, - id, + request_id, + id: (connection_id, substream_id), response, }) } @@ -45,15 +50,17 @@ impl NetworkBeaconProcessor { pub fn send_error_response( &self, peer_id: PeerId, - error: RPCResponseErrorCode, + error: RpcErrorResponse, reason: String, id: PeerRequestId, + request_id: RequestId, ) { self.send_network_message(NetworkMessage::SendErrorResponse { peer_id, error, reason, id, + request_id, }) } @@ -131,14 +138,24 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_root_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRootRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, self.clone() - .handle_blocks_by_root_request_inner(peer_id, request_id, request) + .handle_blocks_by_root_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) .await, Response::BlocksByRoot, ); @@ -148,9 +165,11 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_root_request_inner( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRootRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { let log_results = |peer_id, requested_blocks, send_block_count| { debug!( self.log, @@ -169,10 +188,7 @@ impl NetworkBeaconProcessor { Ok(block_stream) => block_stream, Err(e) => { error!(self.log, "Error getting block stream"; "error" => ?e); - return Err(( - RPCResponseErrorCode::ServerError, - "Error getting block stream", - )); + return Err((RpcErrorResponse::ServerError, "Error getting block stream")); } }; // Fetching blocks is async because it may have to hit the execution layer for payloads. @@ -183,6 +199,8 @@ impl NetworkBeaconProcessor { self.send_response( peer_id, Response::BlocksByRoot(Some(block.clone())), + connection_id, + substream_id, request_id, ); send_block_count += 1; @@ -204,7 +222,7 @@ impl NetworkBeaconProcessor { ); log_results(peer_id, requested_blocks, send_block_count); return Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Execution layer not synced", )); } @@ -228,13 +246,23 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_root_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRootRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_blobs_by_root_request_inner(peer_id, request_id, request), + self.handle_blobs_by_root_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ), Response::BlobsByRoot, ); } @@ -243,9 +271,11 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_root_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRootRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { let Some(requested_root) = request.blob_ids.as_slice().first().map(|id| id.block_root) else { // No blob ids requested. @@ -263,7 +293,13 @@ impl NetworkBeaconProcessor { for id in request.blob_ids.as_slice() { // First attempt to get the blobs from the RPC cache. if let Ok(Some(blob)) = self.chain.data_availability_checker.get_blob(id) { - self.send_response(peer_id, Response::BlobsByRoot(Some(blob)), request_id); + self.send_response( + peer_id, + Response::BlobsByRoot(Some(blob)), + connection_id, + substream_id, + request_id, + ); send_blob_count += 1; } else { let BlobIdentifier { @@ -285,6 +321,8 @@ impl NetworkBeaconProcessor { self.send_response( peer_id, Response::BlobsByRoot(Some(blob_sidecar.clone())), + connection_id, + substream_id, request_id, ); send_blob_count += 1; @@ -320,13 +358,23 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRootRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_data_columns_by_root_request_inner(peer_id, request_id, request), + self.handle_data_columns_by_root_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ), Response::DataColumnsByRoot, ); } @@ -335,9 +383,11 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRootRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { let mut send_data_column_count = 0; for data_column_id in request.data_column_ids.as_slice() { @@ -350,6 +400,8 @@ impl NetworkBeaconProcessor { self.send_response( peer_id, Response::DataColumnsByRoot(Some(data_column)), + connection_id, + substream_id, request_id, ); } @@ -361,10 +413,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err(( - RPCResponseErrorCode::ServerError, - "Error getting data column", - )); + return Err((RpcErrorResponse::ServerError, "Error getting data column")); } } } @@ -380,20 +429,123 @@ impl NetworkBeaconProcessor { Ok(()) } + pub fn handle_light_client_updates_by_range( + self: &Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + request: LightClientUpdatesByRangeRequest, + ) { + self.terminate_response_stream( + peer_id, + connection_id, + substream_id, + request_id, + self.clone() + .handle_light_client_updates_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ), + Response::LightClientUpdatesByRange, + ); + } + + /// Handle a `LightClientUpdatesByRange` request from the peer. + pub fn handle_light_client_updates_by_range_request_inner( + self: Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + req: LightClientUpdatesByRangeRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + debug!(self.log, "Received LightClientUpdatesByRange Request"; + "peer_id" => %peer_id, + "count" => req.count, + "start_period" => req.start_period, + ); + + // Should not send more than max light client updates + let max_request_size: u64 = req.max_requested(); + if req.count > max_request_size { + return Err(( + RpcErrorResponse::InvalidRequest, + "Request exceeded max size", + )); + } + + let lc_updates = match self + .chain + .get_light_client_updates(req.start_period, req.count) + { + Ok(lc_updates) => lc_updates, + Err(e) => { + error!(self.log, "Unable to obtain light client updates"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + return Err((RpcErrorResponse::ServerError, "Database error")); + } + }; + + for lc_update in lc_updates.iter() { + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::LightClientUpdatesByRange(Some(Arc::new(lc_update.clone()))), + request_id, + id: (connection_id, substream_id), + }); + } + + let lc_updates_sent = lc_updates.len(); + + if lc_updates_sent < req.count as usize { + debug!( + self.log, + "LightClientUpdatesByRange outgoing response processed"; + "peer" => %peer_id, + "info" => "Failed to return all requested light client updates. The peer may have requested data ahead of whats currently available", + "start_period" => req.start_period, + "requested" => req.count, + "returned" => lc_updates_sent + ); + } else { + debug!( + self.log, + "LightClientUpdatesByRange outgoing response processed"; + "peer" => %peer_id, + "start_period" => req.start_period, + "requested" => req.count, + "returned" => lc_updates_sent + ); + } + + Ok(()) + } + /// Handle a `LightClientBootstrap` request from the peer. pub fn handle_light_client_bootstrap( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: LightClientBootstrapRequest, ) { self.terminate_response_single_item( peer_id, + connection_id, + substream_id, request_id, match self.chain.get_light_client_bootstrap(&request.root) { Ok(Some((bootstrap, _))) => Ok(Arc::new(bootstrap)), Ok(None) => Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Bootstrap not available".to_string(), )), Err(e) => { @@ -402,10 +554,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - Err(( - RPCResponseErrorCode::ResourceUnavailable, - format!("{:?}", e), - )) + Err((RpcErrorResponse::ResourceUnavailable, format!("{:?}", e))) } }, Response::LightClientBootstrap, @@ -416,10 +565,14 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_optimistic_update( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) { self.terminate_response_single_item( peer_id, + connection_id, + substream_id, request_id, match self .chain @@ -428,7 +581,7 @@ impl NetworkBeaconProcessor { { Some(update) => Ok(Arc::new(update)), None => Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Latest optimistic update not available".to_string(), )), }, @@ -440,10 +593,14 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_finality_update( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) { self.terminate_response_single_item( peer_id, + connection_id, + substream_id, request_id, match self .chain @@ -452,7 +609,7 @@ impl NetworkBeaconProcessor { { Some(update) => Ok(Arc::new(update)), None => Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Latest finality update not available".to_string(), )), }, @@ -464,14 +621,24 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_range_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlocksByRangeRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, self.clone() - .handle_blocks_by_range_request_inner(peer_id, request_id, req) + .handle_blocks_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + req, + ) .await, Response::BlocksByRange, ); @@ -481,9 +648,11 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_range_request_inner( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlocksByRangeRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!(self.log, "Received BlocksByRange Request"; "peer_id" => %peer_id, "count" => req.count(), @@ -495,19 +664,15 @@ impl NetworkBeaconProcessor { self.chain .epoch() .map_or(self.chain.spec.max_request_blocks, |epoch| { - match self.chain.spec.fork_name_at_epoch(epoch) { - ForkName::Deneb | ForkName::Electra => { - self.chain.spec.max_request_blocks_deneb - } - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella => self.chain.spec.max_request_blocks, + if self.chain.spec.fork_name_at_epoch(epoch).deneb_enabled() { + self.chain.spec.max_request_blocks_deneb + } else { + self.chain.spec.max_request_blocks } }); if *req.count() > max_request_size { return Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Request exceeded max size", )); } @@ -527,7 +692,7 @@ impl NetworkBeaconProcessor { "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot ); - return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { error!(self.log, "Unable to obtain root iter"; @@ -535,7 +700,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -566,7 +731,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Iteration error")); + return Err((RpcErrorResponse::ServerError, "Iteration error")); } }; @@ -607,7 +772,7 @@ impl NetworkBeaconProcessor { Ok(block_stream) => block_stream, Err(e) => { error!(self.log, "Error getting block stream"; "error" => ?e); - return Err((RPCResponseErrorCode::ServerError, "Iterator error")); + return Err((RpcErrorResponse::ServerError, "Iterator error")); } }; @@ -624,8 +789,9 @@ impl NetworkBeaconProcessor { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: Response::BlocksByRange(Some(block.clone())), - id: request_id, + id: (connection_id, substream_id), }); } } @@ -638,7 +804,7 @@ impl NetworkBeaconProcessor { "request_root" => ?root ); log_results(req, peer_id, blocks_sent); - return Err((RPCResponseErrorCode::ServerError, "Database inconsistency")); + return Err((RpcErrorResponse::ServerError, "Database inconsistency")); } Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { debug!( @@ -650,7 +816,7 @@ impl NetworkBeaconProcessor { log_results(req, peer_id, blocks_sent); // send the stream terminator return Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Execution layer not synced", )); } @@ -677,7 +843,7 @@ impl NetworkBeaconProcessor { } log_results(req, peer_id, blocks_sent); // send the stream terminator - return Err((RPCResponseErrorCode::ServerError, "Failed fetching blocks")); + return Err((RpcErrorResponse::ServerError, "Failed fetching blocks")); } } } @@ -690,13 +856,23 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_range_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlobsByRangeRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_blobs_by_range_request_inner(peer_id, request_id, req), + self.handle_blobs_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + req, + ), Response::BlobsByRange, ); } @@ -705,9 +881,11 @@ impl NetworkBeaconProcessor { fn handle_blobs_by_range_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlobsByRangeRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!(self.log, "Received BlobsByRange Request"; "peer_id" => %peer_id, "count" => req.count, @@ -717,7 +895,7 @@ impl NetworkBeaconProcessor { // Should not send more than max request blocks if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { return Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", )); } @@ -728,10 +906,7 @@ impl NetworkBeaconProcessor { Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), None => { debug!(self.log, "Deneb fork is disabled"); - return Err(( - RPCResponseErrorCode::InvalidRequest, - "Deneb fork is disabled", - )); + return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled")); } }; @@ -752,12 +927,12 @@ impl NetworkBeaconProcessor { return if data_availability_boundary_slot < oldest_blob_slot { Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "blobs pruned within boundary", )) } else { Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Req outside availability period", )) }; @@ -776,7 +951,7 @@ impl NetworkBeaconProcessor { "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot ); - return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { error!(self.log, "Unable to obtain root iter"; @@ -784,7 +959,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -821,7 +996,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -854,7 +1029,8 @@ impl NetworkBeaconProcessor { self.send_network_message(NetworkMessage::SendResponse { peer_id, response: Response::BlobsByRange(Some(blob_sidecar.clone())), - id: request_id, + request_id, + id: (connection_id, substream_id), }); } } @@ -870,7 +1046,7 @@ impl NetworkBeaconProcessor { log_results(peer_id, req, blobs_sent); return Err(( - RPCResponseErrorCode::ServerError, + RpcErrorResponse::ServerError, "No blobs and failed fetching corresponding block", )); } @@ -885,13 +1061,23 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_range_request( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: DataColumnsByRangeRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_data_columns_by_range_request_inner(peer_id, request_id, req), + self.handle_data_columns_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + req, + ), Response::DataColumnsByRange, ); } @@ -900,9 +1086,11 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_range_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: DataColumnsByRangeRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!(self.log, "Received DataColumnsByRange Request"; "peer_id" => %peer_id, "count" => req.count, @@ -912,7 +1100,7 @@ impl NetworkBeaconProcessor { // Should not send more than max request data columns if req.max_requested::() > self.chain.spec.max_request_data_column_sidecars { return Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", )); } @@ -923,10 +1111,7 @@ impl NetworkBeaconProcessor { Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), None => { debug!(self.log, "Deneb fork is disabled"); - return Err(( - RPCResponseErrorCode::InvalidRequest, - "Deneb fork is disabled", - )); + return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled")); } }; @@ -948,12 +1133,12 @@ impl NetworkBeaconProcessor { return if data_availability_boundary_slot < oldest_data_column_slot { Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "blobs pruned within boundary", )) } else { Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Req outside availability period", )) }; @@ -972,7 +1157,7 @@ impl NetworkBeaconProcessor { "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot ); - return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { error!(self.log, "Unable to obtain root iter"; @@ -980,7 +1165,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -1017,7 +1202,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -1032,10 +1217,11 @@ impl NetworkBeaconProcessor { data_columns_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: Response::DataColumnsByRange(Some( data_column_sidecar.clone(), )), - id: request_id, + id: (connection_id, substream_id), }); } Ok(None) => {} // no-op @@ -1049,7 +1235,7 @@ impl NetworkBeaconProcessor { "error" => ?e ); return Err(( - RPCResponseErrorCode::ServerError, + RpcErrorResponse::ServerError, "No data columns and failed fetching corresponding block", )); } @@ -1080,8 +1266,10 @@ impl NetworkBeaconProcessor { fn terminate_response_single_item Response>( &self, peer_id: PeerId, - request_id: PeerRequestId, - result: Result, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + result: Result, into_response: F, ) { match result { @@ -1091,12 +1279,19 @@ impl NetworkBeaconProcessor { // https://github.com/sigp/lighthouse/blob/3058b96f2560f1da04ada4f9d8ba8e5651794ff6/beacon_node/lighthouse_network/src/rpc/handler.rs#L555-L558 self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: into_response(resp), - id: request_id, + id: (connection_id, substream_id), }); } Err((error_code, reason)) => { - self.send_error_response(peer_id, error_code, reason, request_id); + self.send_error_response( + peer_id, + error_code, + reason, + (connection_id, substream_id), + request_id, + ); } } } @@ -1106,18 +1301,27 @@ impl NetworkBeaconProcessor { fn terminate_response_stream) -> Response>( &self, peer_id: PeerId, - request_id: PeerRequestId, - result: Result<(), (RPCResponseErrorCode, &'static str)>, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + result: Result<(), (RpcErrorResponse, &'static str)>, into_response: F, ) { match result { Ok(_) => self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: into_response(None), - id: request_id, + id: (connection_id, substream_id), }), Err((error_code, reason)) => { - self.send_error_response(peer_id, error_code, reason.into(), request_id); + self.send_error_response( + peer_id, + error_code, + reason.into(), + (connection_id, substream_id), + request_id, + ); } } } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index c21054dab50..82d06c20f8e 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -294,7 +294,7 @@ impl NetworkBeaconProcessor { "slot" => %slot, ); } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, "Blobs have already been imported"; @@ -327,35 +327,38 @@ impl NetworkBeaconProcessor { _seen_timestamp: Duration, process_type: BlockProcessType, ) { - let result = self + let mut result = self .chain .process_rpc_custody_columns(custody_columns) .await; match &result { - Ok((availability, data_columns_to_publish)) => { - self.handle_data_columns_to_publish(data_columns_to_publish.clone()); - - match availability { - AvailabilityProcessingStatus::Imported(hash) => { - debug!( - self.log, - "Block components retrieved"; - "result" => "imported block and custody columns", - "block_hash" => %hash, - ); - self.chain.recompute_head_at_current_slot().await; - } - AvailabilityProcessingStatus::MissingComponents(_, _) => { - debug!( - self.log, - "Missing components over rpc"; - "block_hash" => %block_root, - ); + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components retrieved"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Missing components over rpc"; + "block_hash" => %block_root, + ); + // Attempt reconstruction here before notifying sync, to avoid sending out more requests + // that we may no longer need. + if let Some(availability) = + self.attempt_data_column_reconstruction(block_root).await + { + result = Ok(availability) } } - } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + }, + Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, "Custody columns have already been imported"; @@ -374,7 +377,7 @@ impl NetworkBeaconProcessor { self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, - result: result.map(|(r, _)| r).into(), + result: result.into(), }); } @@ -385,8 +388,8 @@ impl NetworkBeaconProcessor { data_columns: Vec>>, _seen_timestamp: Duration, ) -> Result<(), String> { - let kzg = self.chain.kzg.as_ref().ok_or("Kzg not initialized")?; - verify_kzg_for_data_column_list(data_columns.iter(), kzg).map_err(|err| format!("{err:?}")) + verify_kzg_for_data_column_list(data_columns.iter(), &self.chain.kzg) + .map_err(|err| format!("{err:?}")) } /// Process a sampling completed event, inserting it into fork-choice @@ -561,8 +564,7 @@ impl NetworkBeaconProcessor { }) .collect::>(), Err(e) => match e { - AvailabilityCheckError::StoreError(_) - | AvailabilityCheckError::KzgNotInitialized => { + AvailabilityCheckError::StoreError(_) => { return ( 0, Err(ChainSegmentFailed { @@ -716,7 +718,8 @@ impl NetworkBeaconProcessor { peer_action: Some(PeerAction::LowToleranceError), }) } - BlockError::BlockIsAlreadyKnown(_) => { + BlockError::DuplicateFullyImported(_) + | BlockError::DuplicateImportStatusUnknown(..) => { // This can happen for many reasons. Head sync's can download multiples and parent // lookups can download blocks before range sync Ok(()) diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 391175ccd41..9d774d97c15 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -16,12 +16,12 @@ use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::BlobsByRangeRequest; -use lighthouse_network::rpc::SubstreamId; +use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ discv5::enr::{self, CombinedKey}, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, - Client, MessageId, NetworkGlobals, PeerId, Response, + Client, MessageId, NetworkConfig, NetworkGlobals, PeerId, Response, }; use slot_clock::SlotClock; use std::iter::Iterator; @@ -91,6 +91,7 @@ impl TestRig { // This allows for testing voluntary exits without building out a massive chain. let mut spec = test_spec::(); spec.shard_committee_period = 2; + let spec = Arc::new(spec); let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) @@ -204,12 +205,14 @@ impl TestRig { }); let enr_key = CombinedKey::generate_secp256k1(); let enr = enr::Enr::builder().build(&enr_key).unwrap(); + let network_config = Arc::new(NetworkConfig::default()); let network_globals = Arc::new(NetworkGlobals::new( enr, meta_data, vec![], false, &log, + network_config, spec, )); @@ -357,7 +360,9 @@ impl TestRig { self.network_beacon_processor .send_blobs_by_range_request( PeerId::random(), - (ConnectionId::new_unchecked(42), SubstreamId::new(24)), + ConnectionId::new_unchecked(42), + SubstreamId::new(24), + RequestId::new_unchecked(0), BlobsByRangeRequest { start_slot: 0, count, @@ -1134,6 +1139,7 @@ async fn test_blobs_by_range() { peer_id: _, response: Response::BlobsByRange(blob), id: _, + request_id: _, } = next { if blob.is_some() { diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index 522ff0536eb..1e1420883e8 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -81,7 +81,8 @@ mod tests { MinimalEthSpec, MemoryStore, MemoryStore, - > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap(); + > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal().into(), log) + .unwrap(); let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()]; store .put_item(&DHT_DB_KEY, &PersistedDht { enrs: enrs.clone() }) diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 26c1d14f020..e1badfda9d5 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -15,10 +15,12 @@ use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, }; use futures::prelude::*; +use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::*; use lighthouse_network::{ + rpc, service::api_types::{AppRequestId, SyncRequestId}, - MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, + MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Response, }; use logging::TimeLatch; use slog::{crit, debug, o, trace}; @@ -56,7 +58,7 @@ pub enum RouterMessage { RPCRequestReceived { peer_id: PeerId, id: PeerRequestId, - request: Request, + request: rpc::Request, }, /// An RPC response has been received. RPCResponseReceived { @@ -191,51 +193,136 @@ impl Router { /* RPC - Related functionality */ /// A new RPC request has been received from the network. - fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: PeerRequestId, request: Request) { + fn handle_rpc_request( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + rpc_request: rpc::Request, + ) { if !self.network_globals.peers.read().is_connected(&peer_id) { - debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?request); + debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?rpc_request); return; } - match request { - Request::Status(status_message) => { - self.on_status_request(peer_id, request_id, status_message) - } - Request::BlocksByRange(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blocks_by_range_request(peer_id, request_id, request), + match rpc_request.r#type { + RequestType::Status(status_message) => self.on_status_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + status_message, ), - Request::BlocksByRoot(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blocks_by_roots_request(peer_id, request_id, request), + RequestType::BlocksByRange(request) => { + // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 + let mut count = *request.count(); + if *request.step() > 1 { + count = 1; + } + let blocks_request = match request { + methods::OldBlocksByRangeRequest::V1(req) => { + BlocksByRangeRequest::new_v1(req.start_slot, count) + } + methods::OldBlocksByRangeRequest::V2(req) => { + BlocksByRangeRequest::new(req.start_slot, count) + } + }; + + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blocks_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + blocks_request, + ), + ) + } + RequestType::BlocksByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blocks_by_roots_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::BlobsByRange(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blobs_by_range_request(peer_id, request_id, request), + RequestType::BlobsByRange(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blobs_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::BlobsByRoot(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blobs_by_roots_request(peer_id, request_id, request), + RequestType::BlobsByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blobs_by_roots_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( + RequestType::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_data_columns_by_roots_request(peer_id, request_id, request), + .send_data_columns_by_roots_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( + RequestType::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_data_columns_by_range_request(peer_id, request_id, request), + .send_data_columns_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( + RequestType::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_bootstrap_request(peer_id, request_id, request), + .send_light_client_bootstrap_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::LightClientOptimisticUpdate => self.handle_beacon_processor_send_result( + RequestType::LightClientOptimisticUpdate => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_optimistic_update_request(peer_id, request_id), + .send_light_client_optimistic_update_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + ), ), - Request::LightClientFinalityUpdate => self.handle_beacon_processor_send_result( + RequestType::LightClientFinalityUpdate => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_finality_update_request(peer_id, request_id), + .send_light_client_finality_update_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + ), ), + RequestType::LightClientUpdatesByRange(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_light_client_updates_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), + ), + _ => {} } } @@ -275,7 +362,8 @@ impl Router { // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) - | Response::LightClientFinalityUpdate(_) => unreachable!(), + | Response::LightClientFinalityUpdate(_) + | Response::LightClientUpdatesByRange(_) => unreachable!(), } } @@ -461,7 +549,7 @@ impl Router { let status_message = status_message(&self.chain); debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); self.network - .send_processor_request(peer_id, Request::Status(status_message)); + .send_processor_request(peer_id, RequestType::Status(status_message)); } fn send_to_sync(&mut self, message: SyncMessage) { @@ -493,7 +581,9 @@ impl Router { pub fn on_status_request( &mut self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, status: StatusMessage, ) { debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); @@ -502,6 +592,7 @@ impl Router { self.network.send_response( peer_id, Response::Status(status_message(&self.chain)), + (connection_id, substream_id), request_id, ); @@ -745,7 +836,7 @@ impl HandlerNetworkContext { } /// Sends a request to the network task. - pub fn send_processor_request(&mut self, peer_id: PeerId, request: Request) { + pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { self.inform_network(NetworkMessage::SendRequest { peer_id, request_id: AppRequestId::Router, @@ -754,8 +845,15 @@ impl HandlerNetworkContext { } /// Sends a response to the network task. - pub fn send_response(&mut self, peer_id: PeerId, response: Response, id: PeerRequestId) { + pub fn send_response( + &mut self, + peer_id: PeerId, + response: Response, + id: PeerRequestId, + request_id: RequestId, + ) { self.inform_network(NetworkMessage::SendResponse { + request_id, peer_id, id, response, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 5782fb00b6c..5a66cb7f30d 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -14,12 +14,13 @@ use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; use futures::StreamExt; +use lighthouse_network::rpc::{RequestId, RequestType}; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ - rpc::{GoodbyeReason, RPCResponseErrorCode}, - Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, + rpc::{GoodbyeReason, RpcErrorResponse}, + Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Response, Subnet, }; use lighthouse_network::{ service::api_types::AppRequestId, @@ -61,19 +62,21 @@ pub enum NetworkMessage { /// Send an RPC request to the libp2p service. SendRequest { peer_id: PeerId, - request: Request, + request: RequestType, request_id: AppRequestId, }, /// Send a successful Response to the libp2p service. SendResponse { peer_id: PeerId, + request_id: RequestId, response: Response, id: PeerRequestId, }, /// Sends an error response to an RPC request. SendErrorResponse { peer_id: PeerId, - error: RPCResponseErrorCode, + request_id: RequestId, + error: RpcErrorResponse, reason: String, id: PeerRequestId, }, @@ -205,7 +208,7 @@ pub struct NetworkService { impl NetworkService { async fn build( beacon_chain: Arc>, - config: &NetworkConfig, + config: Arc, executor: task_executor::TaskExecutor, libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, @@ -271,10 +274,10 @@ impl NetworkService { // construct the libp2p service context let service_context = Context { - config, + config: config.clone(), enr_fork_id, fork_context: fork_context.clone(), - chain_spec: &beacon_chain.spec, + chain_spec: beacon_chain.spec.clone(), libp2p_registry, }; @@ -318,12 +321,12 @@ impl NetworkService { let attestation_service = AttestationService::new( beacon_chain.clone(), network_globals.local_enr().node_id(), - config, + &config, &network_log, ); // sync committee subnet service let sync_committee_service = - SyncCommitteeService::new(beacon_chain.clone(), config, &network_log); + SyncCommitteeService::new(beacon_chain.clone(), &config, &network_log); // create a timer for updating network metrics let metrics_update = tokio::time::interval(Duration::from_secs(METRIC_UPDATE_INTERVAL)); @@ -368,7 +371,7 @@ impl NetworkService { #[allow(clippy::type_complexity)] pub async fn start( beacon_chain: Arc>, - config: &NetworkConfig, + config: Arc, executor: task_executor::TaskExecutor, libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, @@ -623,16 +626,19 @@ impl NetworkService { peer_id, response, id, + request_id, } => { - self.libp2p.send_response(peer_id, id, response); + self.libp2p.send_response(peer_id, id, request_id, response); } NetworkMessage::SendErrorResponse { peer_id, error, id, + request_id, reason, } => { - self.libp2p.send_error_response(peer_id, id, error, reason); + self.libp2p + .send_error_response(peer_id, id, request_id, error, reason); } NetworkMessage::ValidationResult { propagation_source, @@ -807,7 +813,7 @@ impl NetworkService { } } } else { - for column_subnet in &self.network_globals.custody_subnets { + for column_subnet in &self.network_globals.sampling_subnets { for fork_digest in self.required_gossip_fork_digests() { let gossip_kind = Subnet::DataColumn(*column_subnet).into(); let topic = diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index fec5f3f83f7..b55992c624e 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -73,6 +73,7 @@ mod tests { config.discv5_config.table_filter = |_| true; // Do not ignore local IPs config.upnp_enabled = false; config.boot_nodes_enr = enrs.clone(); + let config = Arc::new(config); runtime.block_on(async move { // Create a new network service which implicitly gets dropped at the // end of the block. @@ -86,7 +87,7 @@ mod tests { let _network_service = NetworkService::start( beacon_chain.clone(), - &config, + config, executor, None, beacon_processor_tx, @@ -125,7 +126,7 @@ mod tests { // Build beacon chain. let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec.clone()) + .spec(spec.clone().into()) .deterministic_keypairs(8) .fresh_ephemeral_store() .mock_execution_layer() @@ -149,12 +150,13 @@ mod tests { config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); config.discv5_config.table_filter = |_| true; // Do not ignore local IPs config.upnp_enabled = false; + let config = Arc::new(config); let beacon_processor_channels = BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); NetworkService::build( beacon_chain.clone(), - &config, + config, executor.clone(), None, beacon_processor_channels.beacon_processor_tx, diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index e8d9218ec4c..a784b05ea7a 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -2,6 +2,7 @@ use super::*; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::CachingEth1Backend, + test_utils::get_kzg, BeaconChain, }; use futures::prelude::*; @@ -37,7 +38,7 @@ pub struct TestBeaconChain { impl TestBeaconChain { pub fn new_with_system_clock() -> Self { - let spec = MainnetEthSpec::default_spec(); + let spec = Arc::new(MainnetEthSpec::default_spec()); let keypairs = generate_deterministic_keypairs(1); @@ -45,12 +46,14 @@ impl TestBeaconChain { let store = HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); + let kzg = get_kzg(&spec); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let test_runtime = TestRuntime::default(); let chain = Arc::new( - BeaconChainBuilder::new(MainnetEthSpec) + BeaconChainBuilder::new(MainnetEthSpec, kzg.clone()) .logger(log.clone()) .custom_spec(spec.clone()) .store(Arc::new(store)) diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index c7c043f53f8..5e336d9c38e 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -13,7 +13,7 @@ use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; use types::{DataColumnSidecarList, SignedBeaconBlock}; -use super::single_block_lookup::DownloadResult; +use super::single_block_lookup::{ComponentRequests, DownloadResult}; use super::SingleLookupId; #[derive(Debug, Copy, Clone)] @@ -42,7 +42,7 @@ pub trait RequestState { &self, id: Id, peer_id: PeerId, - downloaded_block: Option>>, + expected_blobs: usize, cx: &mut SyncNetworkContext, ) -> Result; @@ -61,7 +61,7 @@ pub trait RequestState { fn response_type() -> ResponseType; /// A getter for the `BlockRequestState` or `BlobRequestState` associated with this trait. - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str>; /// A getter for a reference to the `SingleLookupRequestState` associated with this trait. fn get_state(&self) -> &SingleLookupRequestState; @@ -77,7 +77,7 @@ impl RequestState for BlockRequestState { &self, id: SingleLookupId, peer_id: PeerId, - _: Option>>, + _: usize, cx: &mut SyncNetworkContext, ) -> Result { cx.block_lookup_request(id, peer_id, self.requested_block_root) @@ -107,8 +107,8 @@ impl RequestState for BlockRequestState { fn response_type() -> ResponseType { ResponseType::Block } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { - &mut request.block_request_state + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + Ok(&mut request.block_request_state) } fn get_state(&self) -> &SingleLookupRequestState { &self.state @@ -125,10 +125,10 @@ impl RequestState for BlobRequestState { &self, id: Id, peer_id: PeerId, - downloaded_block: Option>>, + expected_blobs: usize, cx: &mut SyncNetworkContext, ) -> Result { - cx.blob_lookup_request(id, peer_id, self.block_root, downloaded_block) + cx.blob_lookup_request(id, peer_id, self.block_root, expected_blobs) .map_err(LookupRequestError::SendFailedNetwork) } @@ -150,8 +150,13 @@ impl RequestState for BlobRequestState { fn response_type() -> ResponseType { ResponseType::Blob } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { - &mut request.blob_request_state + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + match &mut request.component_requests { + ComponentRequests::WaitingForBlock => Err("waiting for block"), + ComponentRequests::ActiveBlobRequest(request, _) => Ok(request), + ComponentRequests::ActiveCustodyRequest { .. } => Err("expecting custody request"), + ComponentRequests::NotNeeded { .. } => Err("not needed"), + } } fn get_state(&self) -> &SingleLookupRequestState { &self.state @@ -169,10 +174,10 @@ impl RequestState for CustodyRequestState { id: Id, // TODO(das): consider selecting peers that have custody but are in this set _peer_id: PeerId, - downloaded_block: Option>>, + _: usize, cx: &mut SyncNetworkContext, ) -> Result { - cx.custody_lookup_request(id, self.block_root, downloaded_block) + cx.custody_lookup_request(id, self.block_root) .map_err(LookupRequestError::SendFailedNetwork) } @@ -200,8 +205,13 @@ impl RequestState for CustodyRequestState { fn response_type() -> ResponseType { ResponseType::CustodyColumn } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { - &mut request.custody_request_state + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + match &mut request.component_requests { + ComponentRequests::WaitingForBlock => Err("waiting for block"), + ComponentRequests::ActiveBlobRequest { .. } => Err("expecting blob request"), + ComponentRequests::ActiveCustodyRequest(request) => Ok(request), + ComponentRequests::NotNeeded { .. } => Err("not needed"), + } } fn get_state(&self) -> &SingleLookupRequestState { &self.state diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index e31adb783c9..5a11bca4814 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -28,8 +28,11 @@ use super::network_context::{PeerGroup, RpcResponseError, SyncNetworkContext}; use crate::metrics; use crate::sync::block_lookups::common::ResponseType; use crate::sync::block_lookups::parent_chain::find_oldest_fork_ancestor; +use crate::sync::SyncMessage; use beacon_chain::block_verification_types::AsBlock; -use beacon_chain::data_availability_checker::AvailabilityCheckErrorCategory; +use beacon_chain::data_availability_checker::{ + AvailabilityCheckError, AvailabilityCheckErrorCategory, +}; use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; pub use common::RequestState; use fnv::FnvHashMap; @@ -47,13 +50,14 @@ use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; pub mod common; pub mod parent_chain; mod single_block_lookup; -#[cfg(test)] -mod tests; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth /// is further back than the most recent head slot. -pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; +/// +/// Have the same value as range's sync tolerance to consider a peer synced. Once sync lookup +/// reaches the maximum depth it will force trigger range sync. +pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE; const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 4; @@ -252,22 +256,59 @@ impl BlockLookups { // blocks on top of A forming A -> C. The malicious peer forces us to fetch C // from it, which will result in parent A hitting the chain_too_long error. Then // the valid chain A -> B is dropped too. - if let Ok(block_to_drop) = find_oldest_fork_ancestor(parent_chains, chain_idx) { - // Drop all lookups descending from the child of the too long parent chain - if let Some((lookup_id, lookup)) = self + // + // `find_oldest_fork_ancestor` should never return Err, unwrapping to tip for + // complete-ness + let parent_chain_tip = parent_chain.tip; + let block_to_drop = + find_oldest_fork_ancestor(parent_chains, chain_idx).unwrap_or(parent_chain_tip); + // Drop all lookups descending from the child of the too long parent chain + if let Some((lookup_id, lookup)) = self + .single_block_lookups + .iter() + .find(|(_, l)| l.block_root() == block_to_drop) + { + // If a lookup chain is too long, we can't distinguish a valid chain from a + // malicious one. We must attempt to sync this chain to not lose liveness. If + // the chain grows too long, we stop lookup sync and transition this head to + // forward range sync. We need to tell range sync which head to sync to, and + // from which peers. The lookup of the very tip of this chain may contain zero + // peers if it's the parent-child lookup. So we do a bit of a trick here: + // - Tell range sync to sync to the tip's root (if available, else its ancestor) + // - But use all peers in the ancestor lookup, which should have at least one + // peer, and its peer set is a strict superset of the tip's lookup. + if let Some((_, tip_lookup)) = self .single_block_lookups .iter() - .find(|(_, l)| l.block_root() == block_to_drop) + .find(|(_, l)| l.block_root() == parent_chain_tip) { - for &peer_id in lookup.all_peers() { - cx.report_peer( - peer_id, - PeerAction::LowToleranceError, - "chain_too_long", - ); - } - self.drop_lookup_and_children(*lookup_id); + cx.send_sync_message(SyncMessage::AddPeersForceRangeSync { + peers: lookup.all_peers().copied().collect(), + head_slot: tip_lookup.peek_downloaded_block_slot(), + head_root: parent_chain_tip, + }); + } else { + // Should never happen, log error and continue the lookup drop + error!(self.log, "Unable to transition lookup to range sync"; + "error" => "Parent chain tip lookup not found", + "block_root" => ?parent_chain_tip + ); } + + // Do not downscore peers here. Because we can't distinguish a valid chain from + // a malicious one we may penalize honest peers for attempting to discover us a + // valid chain. Until blocks_by_range allows to specify a tip, for example with + // https://github.com/ethereum/consensus-specs/pull/3845 we will have poor + // attributability. A peer can send us garbage blocks over blocks_by_root, and + // then correct blocks via blocks_by_range. + + self.drop_lookup_and_children(*lookup_id); + } else { + // Should never happen + error!(self.log, "Unable to transition lookup to range sync"; + "error" => "Block to drop lookup not found", + "block_root" => ?block_to_drop + ); } return false; @@ -407,7 +448,9 @@ impl BlockLookups { }; let block_root = lookup.block_root(); - let request_state = R::request_state_mut(lookup).get_state_mut(); + let request_state = R::request_state_mut(lookup) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))? + .get_state_mut(); match response { Ok((response, peer_group, seen_timestamp)) => { @@ -502,7 +545,9 @@ impl BlockLookups { }; let block_root = lookup.block_root(); - let request_state = R::request_state_mut(lookup).get_state_mut(); + let request_state = R::request_state_mut(lookup) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))? + .get_state_mut(); debug!( self.log, @@ -515,7 +560,7 @@ impl BlockLookups { let action = match result { BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) - | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown(_)) => { + | BlockProcessingResult::Err(BlockError::DuplicateFullyImported(..)) => { // Successfully imported request_state.on_processing_success()?; Action::Continue @@ -539,6 +584,16 @@ impl BlockLookups { Action::Retry } } + BlockProcessingResult::Err(BlockError::DuplicateImportStatusUnknown(..)) => { + // This is unreachable because RPC blocks do not undergo gossip verification, and + // this error can *only* come from gossip verification. + error!( + self.log, + "Single block lookup hit unreachable condition"; + "block_root" => ?block_root + ); + Action::Drop + } BlockProcessingResult::Ignored => { // Beacon processor signalled to ignore the block processing result. // This implies that the cpu is overloaded. Drop the request. @@ -591,8 +646,16 @@ impl BlockLookups { other => { debug!(self.log, "Invalid lookup component"; "block_root" => ?block_root, "component" => ?R::response_type(), "error" => ?other); let peer_group = request_state.on_processing_failure()?; - // TOOD(das): only downscore peer subgroup that provided the invalid proof - for peer in peer_group.all() { + let peers_to_penalize: Vec<_> = match other { + // Note: currenlty only InvalidColumn errors have index granularity, + // but future errors may follow the same pattern. Generalize this + // pattern with https://github.com/sigp/lighthouse/pull/6321 + BlockError::AvailabilityCheck( + AvailabilityCheckError::InvalidColumn(index, _), + ) => peer_group.of_index(index as usize).collect(), + _ => peer_group.all().collect(), + }; + for peer in peers_to_penalize { cx.report_peer( *peer, PeerAction::MidToleranceError, diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 73ffcd43845..d701cbbb8d3 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -4,7 +4,7 @@ use crate::sync::network_context::{ LookupRequestResult, PeerGroup, ReqId, RpcRequestSendError, SendErrorProcessor, SyncNetworkContext, }; -use beacon_chain::BeaconChainTypes; +use beacon_chain::{BeaconChainTypes, BlockProcessStatus}; use derivative::Derivative; use lighthouse_network::service::api_types::Id; use rand::seq::IteratorRandom; @@ -15,7 +15,7 @@ use std::time::{Duration, Instant}; use store::Hash256; use strum::IntoStaticStr; use types::blob_sidecar::FixedBlobSidecarList; -use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock}; +use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock, Slot}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -62,8 +62,7 @@ pub enum LookupRequestError { pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, - pub blob_request_state: BlobRequestState, - pub custody_request_state: CustodyRequestState, + pub component_requests: ComponentRequests, /// Peers that claim to have imported this set of block components #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] peers: HashSet, @@ -72,6 +71,16 @@ pub struct SingleBlockLookup { created: Instant, } +#[derive(Debug)] +pub(crate) enum ComponentRequests { + WaitingForBlock, + ActiveBlobRequest(BlobRequestState, usize), + ActiveCustodyRequest(CustodyRequestState), + // When printing in debug this state display the reason why it's not needed + #[allow(dead_code)] + NotNeeded(&'static str), +} + impl SingleBlockLookup { pub fn new( requested_block_root: Hash256, @@ -82,8 +91,7 @@ impl SingleBlockLookup { Self { id, block_request_state: BlockRequestState::new(requested_block_root), - blob_request_state: BlobRequestState::new(requested_block_root), - custody_request_state: CustodyRequestState::new(requested_block_root), + component_requests: ComponentRequests::WaitingForBlock, peers: HashSet::from_iter(peers.iter().copied()), block_root: requested_block_root, awaiting_parent, @@ -91,6 +99,14 @@ impl SingleBlockLookup { } } + /// Return the slot of this lookup's block if it's currently cached as `AwaitingProcessing` + pub fn peek_downloaded_block_slot(&self) -> Option { + self.block_request_state + .state + .peek_downloaded_data() + .map(|block| block.slot()) + } + /// Get the block root that is being requested. pub fn block_root(&self) -> Hash256 { self.block_root @@ -142,16 +158,28 @@ impl SingleBlockLookup { /// Returns true if the block has already been downloaded. pub fn all_components_processed(&self) -> bool { self.block_request_state.state.is_processed() - && self.blob_request_state.state.is_processed() - && self.custody_request_state.state.is_processed() + && match &self.component_requests { + ComponentRequests::WaitingForBlock => false, + ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), + ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), + ComponentRequests::NotNeeded { .. } => true, + } } /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { self.awaiting_parent.is_some() || self.block_request_state.state.is_awaiting_event() - || self.blob_request_state.state.is_awaiting_event() - || self.custody_request_state.state.is_awaiting_event() + || match &self.component_requests { + ComponentRequests::WaitingForBlock => true, + ComponentRequests::ActiveBlobRequest(request, _) => { + request.state.is_awaiting_event() + } + ComponentRequests::ActiveCustodyRequest(request) => { + request.state.is_awaiting_event() + } + ComponentRequests::NotNeeded { .. } => false, + } } /// Makes progress on all requests of this lookup. Any error is not recoverable and must result @@ -161,9 +189,66 @@ impl SingleBlockLookup { cx: &mut SyncNetworkContext, ) -> Result { // TODO: Check what's necessary to download, specially for blobs - self.continue_request::>(cx)?; - self.continue_request::>(cx)?; - self.continue_request::>(cx)?; + self.continue_request::>(cx, 0)?; + + if let ComponentRequests::WaitingForBlock = self.component_requests { + let downloaded_block = self + .block_request_state + .state + .peek_downloaded_data() + .cloned(); + + if let Some(block) = downloaded_block.or_else(|| { + // If the block is already being processed or fully validated, retrieve how many blobs + // it expects. Consider any stage of the block. If the block root has been validated, we + // can assert that this is the correct value of `blob_kzg_commitments_count`. + match cx.chain.get_block_process_status(&self.block_root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), + } + }) { + let expected_blobs = block.num_expected_blobs(); + let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + if expected_blobs == 0 { + self.component_requests = ComponentRequests::NotNeeded("no data"); + } + if cx.chain.should_fetch_blobs(block_epoch) { + self.component_requests = ComponentRequests::ActiveBlobRequest( + BlobRequestState::new(self.block_root), + expected_blobs, + ); + } else if cx.chain.should_fetch_custody_columns(block_epoch) { + self.component_requests = ComponentRequests::ActiveCustodyRequest( + CustodyRequestState::new(self.block_root), + ); + } else { + self.component_requests = ComponentRequests::NotNeeded("outside da window"); + } + } else { + // Wait to download the block before downloading blobs. Then we can be sure that the + // block has data, so there's no need to do "blind" requests for all possible blobs and + // latter handle the case where if the peer sent no blobs, penalize. + // + // Lookup sync event safety: Reaching this code means that a block is not in any pre-import + // cache nor in the request state of this lookup. Therefore, the block must either: (1) not + // be downloaded yet or (2) the block is already imported into the fork-choice. + // In case (1) the lookup must either successfully download the block or get dropped. + // In case (2) the block will be downloaded, processed, reach `DuplicateFullyImported` + // and get dropped as completed. + } + } + + match &self.component_requests { + ComponentRequests::WaitingForBlock => {} // do nothing + ComponentRequests::ActiveBlobRequest(_, expected_blobs) => { + self.continue_request::>(cx, *expected_blobs)? + } + ComponentRequests::ActiveCustodyRequest(_) => { + self.continue_request::>(cx, 0)? + } + ComponentRequests::NotNeeded { .. } => {} // do nothing + } // If all components of this lookup are already processed, there will be no future events // that can make progress so it must be dropped. Consider the lookup completed. @@ -179,15 +264,12 @@ impl SingleBlockLookup { fn continue_request>( &mut self, cx: &mut SyncNetworkContext, + expected_blobs: usize, ) -> Result<(), LookupRequestError> { let id = self.id; let awaiting_parent = self.awaiting_parent.is_some(); - let downloaded_block = self - .block_request_state - .state - .peek_downloaded_data() - .cloned(); - let request = R::request_state_mut(self); + let request = + R::request_state_mut(self).map_err(|e| LookupRequestError::BadState(e.to_owned()))?; // Attempt to progress awaiting downloads if request.get_state().is_awaiting_download() { @@ -206,13 +288,16 @@ impl SingleBlockLookup { // not receive any new peers for some time it will be dropped. If it receives a new // peer it must attempt to make progress. R::request_state_mut(self) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))? .get_state_mut() .update_awaiting_download_status("no peers"); return Ok(()); }; - let request = R::request_state_mut(self); - match request.make_request(id, peer_id, downloaded_block, cx)? { + let request = R::request_state_mut(self) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))?; + + match request.make_request(id, peer_id, expected_blobs, cx)? { LookupRequestResult::RequestSent(req_id) => { // Lookup sync event safety: If make_request returns `RequestSent`, we are // guaranteed that `BlockLookups::on_download_response` will be called exactly diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index ed91c73d8bf..882f199b52d 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -35,10 +35,12 @@ use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; use super::block_lookups::BlockLookups; -use super::network_context::{BlockOrBlob, RangeRequestId, RpcEvent, SyncNetworkContext}; +use super::network_context::{ + BlockOrBlob, CustodyByRootResult, RangeRequestId, RpcEvent, SyncNetworkContext, +}; +use super::peer_sampling::{Sampling, SamplingConfig, SamplingResult}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use super::sampling::{Sampling, SamplingConfig, SamplingResult}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; @@ -55,8 +57,8 @@ use beacon_chain::{ use futures::StreamExt; use lighthouse_network::rpc::RPCError; use lighthouse_network::service::api_types::{ - DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SamplingId, SamplingRequester, - SingleLookupReqId, SyncRequestId, + CustodyRequester, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SamplingId, + SamplingRequester, SingleLookupReqId, SyncRequestId, }; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; @@ -69,6 +71,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::{BlobSidecar, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; +#[cfg(test)] +use types::ColumnIndex; + /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a /// fully sync'd peer. @@ -89,6 +94,15 @@ pub enum SyncMessage { /// A useful peer has been discovered. AddPeer(PeerId, SyncInfo), + /// Force trigger range sync for a set of peers given a head they claim to have imported. Used + /// by block lookup to trigger range sync if a parent chain grows too large. + AddPeersForceRangeSync { + peers: Vec, + head_root: Hash256, + /// Sync lookup may not know the Slot of this head. However this situation is very rare. + head_slot: Option, + }, + /// A block has been received from the RPC. RpcBlock { request_id: SyncRequestId, @@ -317,6 +331,13 @@ impl SyncManager { .collect() } + #[cfg(test)] + pub(crate) fn get_range_sync_chains( + &self, + ) -> Result, &'static str> { + self.range_sync.state() + } + #[cfg(test)] pub(crate) fn get_failed_chains(&mut self) -> Vec { self.block_lookups.get_failed_chains() @@ -332,6 +353,15 @@ impl SyncManager { self.sampling.active_sampling_requests() } + #[cfg(test)] + pub(crate) fn get_sampling_request_status( + &self, + block_root: Hash256, + index: &ColumnIndex, + ) -> Option { + self.sampling.get_request_status(block_root, index) + } + fn network_globals(&self) -> &NetworkGlobals { self.network.network_globals() } @@ -360,14 +390,76 @@ impl SyncManager { let sync_type = remote_sync_type(&local, &remote, &self.chain); // update the state of the peer. - let should_add = self.update_peer_sync_state(&peer_id, &local, &remote, &sync_type); - - if matches!(sync_type, PeerSyncType::Advanced) && should_add { - self.range_sync - .add_peer(&mut self.network, local, peer_id, remote); + let is_still_connected = self.update_peer_sync_state(&peer_id, &local, &remote, &sync_type); + if is_still_connected { + match sync_type { + PeerSyncType::Behind => {} // Do nothing + PeerSyncType::Advanced => { + self.range_sync + .add_peer(&mut self.network, local, peer_id, remote); + } + PeerSyncType::FullySynced => { + // Sync considers this peer close enough to the head to not trigger range sync. + // Range sync handles well syncing large ranges of blocks, of a least a few blocks. + // However this peer may be in a fork that we should sync but we have not discovered + // yet. If the head of the peer is unknown, attempt block lookup first. If the + // unknown head turns out to be on a longer fork, it will trigger range sync. + // + // A peer should always be considered `Advanced` if its finalized root is + // unknown and ahead of ours, so we don't check for that root here. + // + // TODO: This fork-choice check is potentially duplicated, review code + if !self.chain.block_is_known_to_fork_choice(&remote.head_root) { + self.handle_unknown_block_root(peer_id, remote.head_root); + } + } + } } self.update_sync_state(); + + // Try to make progress on custody requests that are waiting for peers + for (id, result) in self.network.continue_custody_by_root_requests() { + self.on_custody_by_root_result(id, result); + } + } + + /// Trigger range sync for a set of peers that claim to have imported a head unknown to us. + fn add_peers_force_range_sync( + &mut self, + peers: &[PeerId], + head_root: Hash256, + head_slot: Option, + ) { + let status = self.chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, + }; + + let head_slot = head_slot.unwrap_or_else(|| { + debug!(self.log, + "On add peers force range sync assuming local head_slot"; + "local_head_slot" => local.head_slot, + "head_root" => ?head_root + ); + local.head_slot + }); + + let remote = SyncInfo { + head_slot, + head_root, + // Set finalized to same as local to trigger Head sync + finalized_epoch: local.finalized_epoch, + finalized_root: local.finalized_root, + }; + + for peer_id in peers { + self.range_sync + .add_peer(&mut self.network, local.clone(), *peer_id, remote.clone()); + } } /// Handles RPC errors related to requests that were emitted from the sync manager. @@ -380,13 +472,9 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } - SyncRequestId::DataColumnsByRoot(req_id, requester) => self - .on_data_columns_by_root_response( - req_id, - requester, - peer_id, - RpcEvent::RPCError(error), - ), + SyncRequestId::DataColumnsByRoot(req_id) => { + self.on_data_columns_by_root_response(req_id, peer_id, RpcEvent::RPCError(error)) + } SyncRequestId::RangeBlockAndBlobs { id } => { if let Some(sender_id) = self.network.range_request_failed(id) { match sender_id { @@ -444,9 +532,18 @@ impl SyncManager { self.update_sync_state(); } + /// Prune stale requests that are waiting for peers + fn prune_requests(&mut self) { + // continue_custody_by_root_requests attempts to make progress on all requests. If some + // exceed the stale duration limit they will fail and return a result. Re-using + // `continue_custody_by_root_requests` is just a convenience to have less code. + for (id, result) in self.network.continue_custody_by_root_requests() { + self.on_custody_by_root_result(id, result); + } + } + /// Updates the syncing state of a peer. - /// Return whether the peer should be used for range syncing or not, according to its - /// connection status. + /// Return true if the peer is still connected and known to the peers DB fn update_peer_sync_state( &mut self, peer_id: &PeerId, @@ -624,6 +721,8 @@ impl SyncManager { // unless there is a bug. let mut prune_lookups_interval = tokio::time::interval(Duration::from_secs(15)); + let mut prune_requests = tokio::time::interval(Duration::from_secs(15)); + let mut register_metrics_interval = tokio::time::interval(Duration::from_secs(5)); // process any inbound messages @@ -638,6 +737,9 @@ impl SyncManager { _ = prune_lookups_interval.tick() => { self.block_lookups.prune_lookups(); } + _ = prune_requests.tick() => { + self.prune_requests(); + } _ = register_metrics_interval.tick() => { self.network.register_metrics(); } @@ -650,6 +752,13 @@ impl SyncManager { SyncMessage::AddPeer(peer_id, info) => { self.add_peer(peer_id, info); } + SyncMessage::AddPeersForceRangeSync { + peers, + head_root, + head_slot, + } => { + self.add_peers_force_range_sync(&peers, head_root, head_slot); + } SyncMessage::RpcBlock { request_id, peer_id, @@ -991,10 +1100,9 @@ impl SyncManager { seen_timestamp: Duration, ) { match request_id { - SyncRequestId::DataColumnsByRoot(req_id, requester) => { + SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response( req_id, - requester, peer_id, match data_column { Some(data_column) => RpcEvent::Response(data_column, seen_timestamp), @@ -1036,7 +1144,6 @@ impl SyncManager { fn on_data_columns_by_root_response( &mut self, req_id: DataColumnsByRootRequestId, - requester: DataColumnsByRootRequester, peer_id: PeerId, data_column: RpcEvent>>, ) { @@ -1044,7 +1151,7 @@ impl SyncManager { self.network .on_data_columns_by_root_response(req_id, peer_id, data_column) { - match requester { + match req_id.requester { DataColumnsByRootRequester::Sampling(id) => { if let Some((requester, result)) = self.sampling @@ -1054,26 +1161,32 @@ impl SyncManager { } } DataColumnsByRootRequester::Custody(custody_id) => { - if let Some(custody_columns) = self + if let Some(result) = self .network .on_custody_by_root_response(custody_id, req_id, peer_id, resp) { - // TODO(das): get proper timestamp - let seen_timestamp = timestamp_now(); - self.block_lookups - .on_download_response::>( - custody_id.requester.0, - custody_columns.map(|(columns, peer_group)| { - (columns, peer_group, seen_timestamp) - }), - &mut self.network, - ); + self.on_custody_by_root_result(custody_id.requester, result); } } } } } + fn on_custody_by_root_result( + &mut self, + requester: CustodyRequester, + response: CustodyByRootResult, + ) { + // TODO(das): get proper timestamp + let seen_timestamp = timestamp_now(); + self.block_lookups + .on_download_response::>( + requester.0, + response.map(|(columns, peer_group)| (columns, peer_group, seen_timestamp)), + &mut self.network, + ); + } + fn on_sampling_result(&mut self, requester: SamplingRequester, result: SamplingResult) { // TODO(das): How is a consumer of sampling results? // - Fork-choice for trailing DA diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 6669add4453..0f5fd6fb9f1 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -6,9 +6,11 @@ mod block_lookups; mod block_sidecar_coupling; pub mod manager; mod network_context; +mod peer_sampling; mod peer_sync_info; mod range_sync; -mod sampling; +#[cfg(test)] +mod tests; pub use lighthouse_network::service::api_types::SamplingId; pub use manager::{BatchProcessResult, SyncMessage}; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index b9f6d180c13..5f7778ffcc6 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -2,11 +2,11 @@ //! channel and stores a global RPC ID to perform requests. use self::custody::{ActiveCustodyRequest, Error as CustodyRequestError}; -use self::requests::{ActiveBlobsByRootRequest, ActiveBlocksByRootRequest}; pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlockRequest}; use super::block_sidecar_coupling::RangeBlockComponentsRequest; use super::manager::BlockProcessType; use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; +use super::SyncMessage; use crate::metrics; use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::NetworkMessage; @@ -15,18 +15,25 @@ use crate::sync::block_lookups::SingleLookupId; use crate::sync::network_context::requests::BlobsByRootSingleBlockRequest; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; +use custody::CustodyRequestResult; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; -use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, + OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2, +}; +use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError, RequestType}; use lighthouse_network::service::api_types::{ AppRequestId, CustodyId, CustodyRequester, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, }; -use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; +use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; use rand::seq::SliceRandom; use rand::thread_rng; -use requests::ActiveDataColumnsByRootRequest; pub use requests::LookupVerifyError; +use requests::{ + ActiveRequests, BlobsByRootRequestItems, BlocksByRootRequestItems, + DataColumnsByRootRequestItems, +}; use slog::{debug, error, warn}; use std::collections::hash_map::Entry; use std::collections::HashMap; @@ -69,6 +76,8 @@ pub enum RpcEvent { pub type RpcResponseResult = Result<(T, Duration), RpcResponseError>; +pub type CustodyByRootResult = Result<(DataColumnSidecarList, PeerGroup), RpcResponseError>; + #[derive(Debug)] pub enum RpcResponseError { RpcError(RPCError), @@ -136,6 +145,15 @@ impl PeerGroup { pub fn all(&self) -> impl Iterator + '_ { self.peers.keys() } + pub fn of_index(&self, index: usize) -> impl Iterator + '_ { + self.peers.iter().filter_map(move |(peer, indices)| { + if indices.contains(&index) { + Some(peer) + } else { + None + } + }) + } } /// Sequential ID that uniquely identifies ReqResp outgoing requests @@ -164,18 +182,17 @@ pub struct SyncNetworkContext { request_id: Id, /// A mapping of active BlocksByRoot requests, including both current slot and parent lookups. - blocks_by_root_requests: FnvHashMap, - + blocks_by_root_requests: + ActiveRequests>, /// A mapping of active BlobsByRoot requests, including both current slot and parent lookups. - blobs_by_root_requests: FnvHashMap>, + blobs_by_root_requests: ActiveRequests>, + /// A mapping of active DataColumnsByRoot requests + data_columns_by_root_requests: + ActiveRequests>, /// Mapping of active custody column requests for a block root custody_by_root_requests: FnvHashMap>, - /// A mapping of active DataColumnsByRoot requests - data_columns_by_root_requests: - FnvHashMap>, - /// BlocksByRange requests paired with BlobsByRange range_block_components_requests: FnvHashMap)>, @@ -223,9 +240,9 @@ impl SyncNetworkContext { network_send, execution_engine_state: EngineState::Online, // always assume `Online` at the start request_id: 1, - blocks_by_root_requests: <_>::default(), - blobs_by_root_requests: <_>::default(), - data_columns_by_root_requests: <_>::default(), + blocks_by_root_requests: ActiveRequests::new("blocks_by_root"), + blobs_by_root_requests: ActiveRequests::new("blobs_by_root"), + data_columns_by_root_requests: ActiveRequests::new("data_columns_by_root"), custody_by_root_requests: <_>::default(), range_block_components_requests: FnvHashMap::default(), network_beacon_processor, @@ -234,6 +251,11 @@ impl SyncNetworkContext { } } + pub fn send_sync_message(&mut self, sync_message: SyncMessage) { + self.network_beacon_processor + .send_sync_message(sync_message); + } + /// Returns the ids of all the requests made to the given peer_id. pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Vec { let failed_range_ids = @@ -249,34 +271,19 @@ impl SyncNetworkContext { let failed_block_ids = self .blocks_by_root_requests - .iter() - .filter_map(|(id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::SingleBlock { id: *id }) - } else { - None - } - }); + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleBlock { id: *id }); let failed_blob_ids = self .blobs_by_root_requests - .iter() - .filter_map(|(id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::SingleBlob { id: *id }) - } else { - None - } - }); - let failed_data_column_by_root_ids = - self.data_columns_by_root_requests - .iter() - .filter_map(|(req_id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::DataColumnsByRoot(*req_id, request.requester)) - } else { - None - } - }); + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleBlob { id: *id }); + let failed_data_column_by_root_ids = self + .data_columns_by_root_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::DataColumnsByRoot(*req_id)); failed_range_ids .chain(failed_block_ids) @@ -324,7 +331,7 @@ impl SyncNetworkContext { "head_slot" => %status_message.head_slot, ); - let request = Request::Status(status_message.clone()); + let request = RequestType::Status(status_message.clone()); let request_id = AppRequestId::Router; let _ = self.send_network_msg(NetworkMessage::SendRequest { peer_id, @@ -353,10 +360,26 @@ impl SyncNetworkContext { "epoch" => epoch, "peer" => %peer_id, ); + let rpc_request = match request { + BlocksByRangeRequest::V1(ref req) => { + RequestType::BlocksByRange(OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { + start_slot: req.start_slot, + count: req.count, + step: 1, + })) + } + BlocksByRangeRequest::V2(ref req) => { + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: req.start_slot, + count: req.count, + step: 1, + })) + } + }; self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlocksByRange(request.clone()), + request: rpc_request, request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -375,7 +398,7 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlobsByRange(BlobsByRangeRequest { + request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: *request.start_slot(), count: *request.count(), }), @@ -387,13 +410,13 @@ impl SyncNetworkContext { false }; - let (expects_custody_columns, num_of_custody_column_req) = + let (expects_columns, num_of_column_req) = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { - let custody_indexes = self.network_globals().custody_columns.clone(); + let column_indexes = self.network_globals().sampling_columns.clone(); let mut num_of_custody_column_req = 0; for (peer_id, columns_by_range_request) in - self.make_columns_by_range_requests(request, &custody_indexes)? + self.make_columns_by_range_requests(request, &column_indexes)? { requested_peers.push(peer_id); @@ -409,7 +432,7 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request: Request::DataColumnsByRange(columns_by_range_request), + request: RequestType::DataColumnsByRange(columns_by_range_request), request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -417,15 +440,15 @@ impl SyncNetworkContext { num_of_custody_column_req += 1; } - (Some(custody_indexes), Some(num_of_custody_column_req)) + (Some(column_indexes), Some(num_of_custody_column_req)) } else { (None, None) }; let info = RangeBlockComponentsRequest::new( expected_blobs, - expects_custody_columns, - num_of_custody_column_req, + expects_columns, + num_of_column_req, requested_peers, ); self.range_block_components_requests @@ -573,13 +596,19 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlocksByRoot(request.into_request(&self.chain.spec)), + request: RequestType::BlocksByRoot(request.into_request(&self.chain.spec)), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; - self.blocks_by_root_requests - .insert(id, ActiveBlocksByRootRequest::new(request, peer_id)); + self.blocks_by_root_requests.insert( + id, + peer_id, + // true = enforce max_requests as returned for blocks_by_root. We always request a single + // block and the peer must have it. + true, + BlocksByRootRequestItems::new(request), + ); Ok(LookupRequestResult::RequestSent(req_id)) } @@ -595,49 +624,12 @@ impl SyncNetworkContext { lookup_id: SingleLookupId, peer_id: PeerId, block_root: Hash256, - downloaded_block: Option>>, + expected_blobs: usize, ) -> Result { - let Some(block) = downloaded_block.or_else(|| { - // If the block is already being processed or fully validated, retrieve how many blobs - // it expects. Consider any stage of the block. If the block root has been validated, we - // can assert that this is the correct value of `blob_kzg_commitments_count`. - match self.chain.get_block_process_status(&block_root) { - BlockProcessStatus::Unknown => None, - BlockProcessStatus::NotValidated(block) - | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), - } - }) else { - // Wait to download the block before downloading blobs. Then we can be sure that the - // block has data, so there's no need to do "blind" requests for all possible blobs and - // latter handle the case where if the peer sent no blobs, penalize. - // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. - // - if `num_expected_blobs` returns Some = block is processed. - // - // Lookup sync event safety: Reaching this code means that a block is not in any pre-import - // cache nor in the request state of this lookup. Therefore, the block must either: (1) not - // be downloaded yet or (2) the block is already imported into the fork-choice. - // In case (1) the lookup must either successfully download the block or get dropped. - // In case (2) the block will be downloaded, processed, reach `BlockIsAlreadyKnown` and - // get dropped as completed. - return Ok(LookupRequestResult::Pending("waiting for block download")); - }; - let expected_blobs = block.num_expected_blobs(); - let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - - // Check if we are in deneb, before peerdas and inside da window - if !self.chain.should_fetch_blobs(block_epoch) { - return Ok(LookupRequestResult::NoRequestNeeded("blobs not required")); - } - - // No data required for this block - if expected_blobs == 0 { - return Ok(LookupRequestResult::NoRequestNeeded("no data")); - } - let imported_blob_indexes = self .chain .data_availability_checker - .imported_blob_indexes(&block_root) + .cached_blob_indexes(&block_root) .unwrap_or_default(); // Include only the blob indexes not yet imported (received through gossip) let indices = (0..expected_blobs as u64) @@ -671,13 +663,20 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlobsByRoot(request.clone().into_request(&self.chain.spec)), + request: RequestType::BlobsByRoot(request.clone().into_request(&self.chain.spec)), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; - self.blobs_by_root_requests - .insert(id, ActiveBlobsByRootRequest::new(request, peer_id)); + self.blobs_by_root_requests.insert( + id, + peer_id, + // true = enforce max_requests are returned for blobs_by_root. We only issue requests for + // blocks after we know the block has data, and only request peers after they claim to + // have imported the block+blobs. + true, + BlobsByRootRequestItems::new(request), + ); Ok(LookupRequestResult::RequestSent(req_id)) } @@ -688,8 +687,12 @@ impl SyncNetworkContext { requester: DataColumnsByRootRequester, peer_id: PeerId, request: DataColumnsByRootSingleBlockRequest, + expect_max_responses: bool, ) -> Result, &'static str> { - let req_id = DataColumnsByRootRequestId(self.next_id()); + let req_id = DataColumnsByRootRequestId { + id: self.next_id(), + requester, + }; debug!( self.log, "Sending DataColumnsByRoot Request"; @@ -703,13 +706,15 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request: Request::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id, requester)), + request: RequestType::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id)), })?; self.data_columns_by_root_requests.insert( req_id, - ActiveDataColumnsByRootRequest::new(request, peer_id, requester), + peer_id, + expect_max_responses, + DataColumnsByRootRequestItems::new(request), ); Ok(LookupRequestResult::RequestSent(req_id)) @@ -723,45 +728,17 @@ impl SyncNetworkContext { &mut self, lookup_id: SingleLookupId, block_root: Hash256, - downloaded_block: Option>>, ) -> Result { - let Some(block) = - downloaded_block.or_else(|| match self.chain.get_block_process_status(&block_root) { - BlockProcessStatus::Unknown => None, - BlockProcessStatus::NotValidated(block) - | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), - }) - else { - // Wait to download the block before downloading columns. Then we can be sure that the - // block has data, so there's no need to do "blind" requests for all possible columns and - // latter handle the case where if the peer sent no columns, penalize. - // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. - // - if `num_expected_blobs` returns Some = block is processed. - return Ok(LookupRequestResult::Pending("waiting for block download")); - }; - let expected_blobs = block.num_expected_blobs(); - let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - - // Check if we are into peerdas and inside da window - if !self.chain.should_fetch_custody_columns(block_epoch) { - return Ok(LookupRequestResult::NoRequestNeeded("columns not required")); - } - - // No data required for this block - if expected_blobs == 0 { - return Ok(LookupRequestResult::NoRequestNeeded("no data")); - } - let custody_indexes_imported = self .chain .data_availability_checker - .imported_custody_column_indexes(&block_root) + .cached_data_column_indexes(&block_root) .unwrap_or_default(); // Include only the blob indexes not yet imported (received through gossip) let custody_indexes_to_fetch = self .network_globals() - .custody_columns + .sampling_columns .clone() .into_iter() .filter(|index| !custody_indexes_imported.contains(index)) @@ -915,144 +892,102 @@ impl SyncNetworkContext { .insert(id, (sender_id, info)); } + /// Attempt to make progress on all custody_by_root requests. Some request may be stale waiting + /// for custody peers. Returns a Vec of results as zero or more requests may fail in this + /// attempt. + pub fn continue_custody_by_root_requests( + &mut self, + ) -> Vec<(CustodyRequester, CustodyByRootResult)> { + let ids = self + .custody_by_root_requests + .keys() + .copied() + .collect::>(); + + // Need to collect ids and results in separate steps to re-borrow self. + ids.into_iter() + .filter_map(|id| { + let mut request = self + .custody_by_root_requests + .remove(&id) + .expect("key of hashmap"); + let result = request.continue_requests(self); + self.handle_custody_by_root_result(id, request, result) + .map(|result| (id, result)) + }) + .collect() + } + // Request handlers - pub fn on_single_block_response( + pub(crate) fn on_single_block_response( &mut self, - request_id: SingleLookupReqId, + id: SingleLookupReqId, peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>>> { - let Entry::Occupied(mut request) = self.blocks_by_root_requests.entry(request_id) else { - metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blocks_by_root"]); - return None; - }; - - let resp = match rpc_event { - RpcEvent::Response(block, seen_timestamp) => { - match request.get_mut().add_response(block) { - Ok(block) => Ok((block, seen_timestamp)), - Err(e) => { - // The request must be dropped after receiving an error. - request.remove(); - Err(e.into()) - } + let response = self.blocks_by_root_requests.on_response(id, rpc_event); + let response = response.map(|res| { + res.and_then(|(mut blocks, seen_timestamp)| { + // Enforce that exactly one chunk = one block is returned. ReqResp behavior limits the + // response count to at most 1. + match blocks.pop() { + Some(block) => Ok((block, seen_timestamp)), + // Should never happen, `blocks_by_root_requests` enforces that we receive at least + // 1 chunk. + None => Err(LookupVerifyError::NotEnoughResponsesReturned { actual: 0 }.into()), } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - Err(e) => Err(e.into()), - }, - RpcEvent::RPCError(e) => { - request.remove(); - Err(e.into()) - } - }; - - if let Err(RpcResponseError::VerifyError(e)) = &resp { + }) + }); + if let Some(Err(RpcResponseError::VerifyError(e))) = &response { self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } - Some(resp) + response } - pub fn on_single_blob_response( + pub(crate) fn on_single_blob_response( &mut self, - request_id: SingleLookupReqId, + id: SingleLookupReqId, peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>> { - let Entry::Occupied(mut request) = self.blobs_by_root_requests.entry(request_id) else { - metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blobs_by_root"]); - return None; - }; - - let resp = match rpc_event { - RpcEvent::Response(blob, seen_timestamp) => { - let request = request.get_mut(); - match request.add_response(blob) { - Ok(Some(blobs)) => to_fixed_blob_sidecar_list(blobs) - .map(|blobs| (blobs, seen_timestamp)) - .map_err(|e| (e.into(), request.resolve())), - Ok(None) => return None, - Err(e) => Err((e.into(), request.resolve())), - } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - // (err, false = not resolved) because terminate returns Ok() if resolved - Err(e) => Err((e.into(), false)), - }, - RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), - }; - - match resp { - Ok(resp) => Some(Ok(resp)), - // Track if this request has already returned some value downstream. Ensure that - // downstream code only receives a single Result per request. If the serving peer does - // multiple penalizable actions per request, downscore and return None. This allows to - // catch if a peer is returning more blobs than requested or if the excess blobs are - // invalid. - Err((e, resolved)) => { - if let RpcResponseError::VerifyError(e) = &e { - self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - if resolved { - None - } else { - Some(Err(e)) - } - } + let response = self.blobs_by_root_requests.on_response(id, rpc_event); + let response = response.map(|res| { + res.and_then( + |(blobs, seen_timestamp)| match to_fixed_blob_sidecar_list(blobs) { + Ok(blobs) => Ok((blobs, seen_timestamp)), + Err(e) => Err(e.into()), + }, + ) + }); + if let Some(Err(RpcResponseError::VerifyError(e))) = &response { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } + response } #[allow(clippy::type_complexity)] - pub fn on_data_columns_by_root_response( + pub(crate) fn on_data_columns_by_root_response( &mut self, id: DataColumnsByRootRequestId, - _peer_id: PeerId, + peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>>>> { - let Entry::Occupied(mut request) = self.data_columns_by_root_requests.entry(id) else { - return None; - }; - - let resp = match rpc_event { - RpcEvent::Response(data_column, seen_timestamp) => { - let request = request.get_mut(); - match request.add_response(data_column) { - Ok(Some(data_columns)) => Ok((data_columns, seen_timestamp)), - Ok(None) => return None, - Err(e) => Err((e.into(), request.resolve())), - } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - // (err, false = not resolved) because terminate returns Ok() if resolved - Err(e) => Err((e.into(), false)), - }, - RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), - }; + let resp = self + .data_columns_by_root_requests + .on_response(id, rpc_event); + self.report_rpc_response_errors(resp, peer_id) + } - match resp { - Ok(resp) => Some(Ok(resp)), - // Track if this request has already returned some value downstream. Ensure that - // downstream code only receives a single Result per request. If the serving peer does - // multiple penalizable actions per request, downscore and return None. This allows to - // catch if a peer is returning more columns than requested or if the excess blobs are - // invalid. - Err((e, resolved)) => { - if let RpcResponseError::VerifyError(_e) = &e { - // TODO(das): this is a bug, we should not penalise peer in this case. - // confirm this can be removed. - // self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - if resolved { - None - } else { - Some(Err(e)) - } - } + fn report_rpc_response_errors( + &mut self, + resp: Option>, + peer_id: PeerId, + ) -> Option> { + if let Some(Err(RpcResponseError::VerifyError(e))) = &resp { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } + resp } /// Insert a downloaded column into an active custody request. Then make progress on the @@ -1069,7 +1004,7 @@ impl SyncNetworkContext { req_id: DataColumnsByRootRequestId, peer_id: PeerId, resp: RpcResponseResult>>>, - ) -> Option, PeerGroup), RpcResponseError>> { + ) -> Option> { // Note: need to remove the request to borrow self again below. Otherwise we can't // do nested requests let Some(mut request) = self.custody_by_root_requests.remove(&id.requester) else { @@ -1078,28 +1013,35 @@ impl SyncNetworkContext { return None; }; - let result = request - .on_data_column_downloaded(peer_id, req_id, resp, self) + let result = request.on_data_column_downloaded(peer_id, req_id, resp, self); + + self.handle_custody_by_root_result(id.requester, request, result) + } + + fn handle_custody_by_root_result( + &mut self, + id: CustodyRequester, + request: ActiveCustodyRequest, + result: CustodyRequestResult, + ) -> Option> { + let result = result .map_err(RpcResponseError::CustodyRequestError) .transpose(); // Convert a result from internal format of `ActiveCustodyRequest` (error first to use ?) to // an Option first to use in an `if let Some() { act on result }` block. - if let Some(result) = result { - match result.as_ref() { - Ok((columns, peer_group)) => { - debug!(self.log, "Custody request success, removing"; "id" => ?id, "count" => columns.len(), "peers" => ?peer_group) - } - Err(e) => { - debug!(self.log, "Custody request failure, removing"; "id" => ?id, "error" => ?e) - } + match result.as_ref() { + Some(Ok((columns, peer_group))) => { + debug!(self.log, "Custody request success, removing"; "id" => ?id, "count" => columns.len(), "peers" => ?peer_group) + } + Some(Err(e)) => { + debug!(self.log, "Custody request failure, removing"; "id" => ?id, "error" => ?e) + } + None => { + self.custody_by_root_requests.insert(id, request); } - - Some(result) - } else { - self.custody_by_root_requests.insert(id.requester, request); - None } + result } pub fn send_block_for_processing( diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index dfe409f043d..e4bce3dafcd 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -9,7 +9,7 @@ use lighthouse_network::PeerId; use lru_cache::LRUTimeCache; use rand::Rng; use slog::{debug, warn}; -use std::time::Duration; +use std::time::{Duration, Instant}; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use types::EthSpec; use types::{data_column_sidecar::ColumnIndex, DataColumnSidecar, Hash256}; @@ -17,6 +17,7 @@ use types::{data_column_sidecar::ColumnIndex, DataColumnSidecar, Hash256}; use super::{LookupRequestResult, PeerGroup, RpcResponseResult, SyncNetworkContext}; const FAILED_PEERS_CACHE_EXPIRY_SECONDS: u64 = 5; +const MAX_STALE_NO_PEERS_DURATION: Duration = Duration::from_secs(30); type DataColumnSidecarList = Vec>>; @@ -56,7 +57,7 @@ struct ActiveBatchColumnsRequest { indices: Vec, } -type CustodyRequestResult = Result, PeerGroup)>, Error>; +pub type CustodyRequestResult = Result, PeerGroup)>, Error>; impl ActiveCustodyRequest { pub(crate) fn new( @@ -221,13 +222,13 @@ impl ActiveCustodyRequest { // - which peer returned what to have PeerGroup attributability for (column_index, request) in self.column_requests.iter_mut() { - if request.is_awaiting_download() { + if let Some(wait_duration) = request.is_awaiting_download() { if request.download_failures > MAX_CUSTODY_COLUMN_DOWNLOAD_ATTEMPTS { return Err(Error::TooManyFailures); } - // TODO: When is a fork and only a subset of your peers know about a block, we should only - // query the peers on that fork. Should this case be handled? How to handle it? + // TODO(das): When is a fork and only a subset of your peers know about a block, we should + // only query the peers on that fork. Should this case be handled? How to handle it? let custodial_peers = cx.get_custodial_peers(*column_index); // TODO(das): cache this computation in a OneCell or similar to prevent having to @@ -256,17 +257,20 @@ impl ActiveCustodyRequest { .collect::>(); priorized_peers.sort_unstable(); - let Some((_, _, _, peer_id)) = priorized_peers.first() else { - // Do not tolerate not having custody peers, hard error. - // TODO(das): we might implement some grace period. The request will pause for X - // seconds expecting the peer manager to find peers before failing the request. + if let Some((_, _, _, peer_id)) = priorized_peers.first() { + columns_to_request_by_peer + .entry(*peer_id) + .or_default() + .push(*column_index); + } else if wait_duration > MAX_STALE_NO_PEERS_DURATION { + // Allow to request to sit stale in `NotStarted` state for at most + // `MAX_STALE_NO_PEERS_DURATION`, else error and drop the request. Note that + // lookup will naturally retry when other peers send us attestations for + // descendants of this un-available lookup. return Err(Error::NoPeers(*column_index)); - }; - - columns_to_request_by_peer - .entry(*peer_id) - .or_default() - .push(*column_index); + } else { + // Do not issue requests if there is no custody peer on this column + } } } @@ -279,6 +283,10 @@ impl ActiveCustodyRequest { block_root: self.block_root, indices: indices.clone(), }, + // true = enforce max_requests are returned data_columns_by_root. We only issue requests + // for blocks after we know the block has data, and only request peers after they claim to + // have imported the block+columns and claim to be custodians + true, ) .map_err(Error::SendFailed)?; @@ -315,7 +323,7 @@ struct ColumnRequest { #[derive(Debug, Clone)] enum Status { - NotStarted, + NotStarted(Instant), Downloading(DataColumnsByRootRequestId), Downloaded(PeerId, Arc>), } @@ -323,28 +331,28 @@ enum Status { impl ColumnRequest { fn new() -> Self { Self { - status: Status::NotStarted, + status: Status::NotStarted(Instant::now()), download_failures: 0, } } - fn is_awaiting_download(&self) -> bool { + fn is_awaiting_download(&self) -> Option { match self.status { - Status::NotStarted => true, - Status::Downloading { .. } | Status::Downloaded { .. } => false, + Status::NotStarted(start_time) => Some(start_time.elapsed()), + Status::Downloading { .. } | Status::Downloaded { .. } => None, } } fn is_downloaded(&self) -> bool { match self.status { - Status::NotStarted | Status::Downloading { .. } => false, + Status::NotStarted { .. } | Status::Downloading { .. } => false, Status::Downloaded { .. } => true, } } fn on_download_start(&mut self, req_id: DataColumnsByRootRequestId) -> Result<(), Error> { match &self.status { - Status::NotStarted => { + Status::NotStarted { .. } => { self.status = Status::Downloading(req_id); Ok(()) } @@ -363,7 +371,7 @@ impl ColumnRequest { req_id, }); } - self.status = Status::NotStarted; + self.status = Status::NotStarted(Instant::now()); Ok(()) } other => Err(Error::BadState(format!( diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 0c2f59d143f..b9214bafcd7 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -1,23 +1,187 @@ +use std::{collections::hash_map::Entry, hash::Hash}; + +use beacon_chain::validator_monitor::timestamp_now; +use fnv::FnvHashMap; +use lighthouse_network::PeerId; use strum::IntoStaticStr; use types::Hash256; -pub use blobs_by_root::{ActiveBlobsByRootRequest, BlobsByRootSingleBlockRequest}; -pub use blocks_by_root::{ActiveBlocksByRootRequest, BlocksByRootSingleRequest}; +pub use blobs_by_root::{BlobsByRootRequestItems, BlobsByRootSingleBlockRequest}; +pub use blocks_by_root::{BlocksByRootRequestItems, BlocksByRootSingleRequest}; pub use data_columns_by_root::{ - ActiveDataColumnsByRootRequest, DataColumnsByRootSingleBlockRequest, + DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; +use crate::metrics; + +use super::{RpcEvent, RpcResponseResult}; + mod blobs_by_root; mod blocks_by_root; mod data_columns_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { - NoResponseReturned, - NotEnoughResponsesReturned { expected: usize, actual: usize }, + NotEnoughResponsesReturned { actual: usize }, TooManyResponses, UnrequestedBlockRoot(Hash256), UnrequestedIndex(u64), InvalidInclusionProof, DuplicateData, } + +/// Collection of active requests of a single ReqResp method, i.e. `blocks_by_root` +pub struct ActiveRequests { + requests: FnvHashMap>, + name: &'static str, +} + +/// Stateful container for a single active ReqResp request +struct ActiveRequest { + state: State, + peer_id: PeerId, + // Error if the request terminates before receiving max expected responses + expect_max_responses: bool, +} + +enum State { + Active(T), + CompletedEarly, + Errored, +} + +impl ActiveRequests { + pub fn new(name: &'static str) -> Self { + Self { + requests: <_>::default(), + name, + } + } + + pub fn insert(&mut self, id: K, peer_id: PeerId, expect_max_responses: bool, items: T) { + self.requests.insert( + id, + ActiveRequest { + state: State::Active(items), + peer_id, + expect_max_responses, + }, + ); + } + + /// Handle an `RpcEvent` for a specific request index by `id`. + /// + /// Lighthouse ReqResp protocol API promises to send 0 or more `RpcEvent::Response` chunks, + /// and EITHER a single `RpcEvent::RPCError` or RpcEvent::StreamTermination. + /// + /// Downstream code expects to receive a single `Result` value per request ID. However, + /// `add_item` may convert ReqResp success chunks into errors. This function handles the + /// multiple errors / stream termination internally ensuring that a single `Some` is + /// returned. + pub fn on_response( + &mut self, + id: K, + rpc_event: RpcEvent, + ) -> Option>> { + let Entry::Occupied(mut entry) = self.requests.entry(id) else { + metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &[self.name]); + return None; + }; + + match rpc_event { + // Handler of a success ReqResp chunk. Adds the item to the request accumulator. + // `ActiveRequestItems` validates the item before appending to its internal state. + RpcEvent::Response(item, seen_timestamp) => { + let request = &mut entry.get_mut(); + match &mut request.state { + State::Active(items) => { + match items.add(item) { + // Received all items we are expecting for, return early, but keep the request + // struct to handle the stream termination gracefully. + Ok(true) => { + let items = items.consume(); + request.state = State::CompletedEarly; + Some(Ok((items, seen_timestamp))) + } + // Received item, but we are still expecting more + Ok(false) => None, + // Received an invalid item + Err(e) => { + request.state = State::Errored; + Some(Err(e.into())) + } + } + } + // Should never happen, ReqResp network behaviour enforces a max count of chunks + // When `max_remaining_chunks <= 1` a the inbound stream in terminated in + // `rpc/handler.rs`. Handling this case adds complexity for no gain. Even if an + // attacker could abuse this, there's no gain in sending garbage chunks that + // will be ignored anyway. + State::CompletedEarly => None, + // Ignore items after errors. We may want to penalize repeated invalid chunks + // for the same response. But that's an optimization to ban peers sending + // invalid data faster that we choose to not adopt for now. + State::Errored => None, + } + } + RpcEvent::StreamTermination => { + // After stream termination we must forget about this request, there will be no more + // messages coming from the network + let request = entry.remove(); + match request.state { + // Received a stream termination in a valid sequence, consume items + State::Active(mut items) => { + if request.expect_max_responses { + Some(Err(LookupVerifyError::NotEnoughResponsesReturned { + actual: items.consume().len(), + } + .into())) + } else { + Some(Ok((items.consume(), timestamp_now()))) + } + } + // Items already returned, ignore stream termination + State::CompletedEarly => None, + // Returned an error earlier, ignore stream termination + State::Errored => None, + } + } + RpcEvent::RPCError(e) => { + // After an Error event from the network we must forget about this request as this + // may be the last message for this request. + match entry.remove().state { + // Received error while request is still active, propagate error. + State::Active(_) => Some(Err(e.into())), + // Received error after completing the request, ignore the error. This is okay + // because the network has already registered a downscore event if necessary for + // this message. + State::CompletedEarly => None, + // Received a network error after a validity error. Okay to ignore, see above + State::Errored => None, + } + } + } + } + + pub fn active_requests_of_peer(&self, peer_id: &PeerId) -> Vec<&K> { + self.requests + .iter() + .filter(|(_, request)| &request.peer_id == peer_id) + .map(|(id, _)| id) + .collect() + } + + pub fn len(&self) -> usize { + self.requests.len() + } +} + +pub trait ActiveRequestItems { + type Item; + + /// Add a new item into the accumulator. Returns true if all expected items have been received. + fn add(&mut self, item: Self::Item) -> Result; + + /// Return all accumulated items consuming them. + fn consume(&mut self) -> Vec; +} diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs index cb2b1a42ec4..fefb27a5efc 100644 --- a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs @@ -1,8 +1,8 @@ -use lighthouse_network::{rpc::methods::BlobsByRootRequest, PeerId}; +use lighthouse_network::rpc::methods::BlobsByRootRequest; use std::sync::Arc; use types::{blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Clone)] pub struct BlobsByRootSingleBlockRequest { @@ -25,34 +25,27 @@ impl BlobsByRootSingleBlockRequest { } } -pub struct ActiveBlobsByRootRequest { +pub struct BlobsByRootRequestItems { request: BlobsByRootSingleBlockRequest, - blobs: Vec>>, - resolved: bool, - pub(crate) peer_id: PeerId, + items: Vec>>, } -impl ActiveBlobsByRootRequest { - pub fn new(request: BlobsByRootSingleBlockRequest, peer_id: PeerId) -> Self { +impl BlobsByRootRequestItems { + pub fn new(request: BlobsByRootSingleBlockRequest) -> Self { Self { request, - blobs: vec![], - resolved: false, - peer_id, + items: vec![], } } +} + +impl ActiveRequestItems for BlobsByRootRequestItems { + type Item = Arc>; /// Appends a chunk to this multi-item request. If all expected chunks are received, this /// method returns `Some`, resolving the request before the stream terminator. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - blob: Arc>, - ) -> Result>>>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, blob: Self::Item) -> Result { let block_root = blob.block_root(); if self.request.block_root != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); @@ -63,34 +56,16 @@ impl ActiveBlobsByRootRequest { if !self.request.indices.contains(&blob.index) { return Err(LookupVerifyError::UnrequestedIndex(blob.index)); } - if self.blobs.iter().any(|b| b.index == blob.index) { + if self.items.iter().any(|b| b.index == blob.index) { return Err(LookupVerifyError::DuplicateData); } - self.blobs.push(blob); - if self.blobs.len() >= self.request.indices.len() { - // All expected chunks received, return result early - self.resolved = true; - Ok(Some(std::mem::take(&mut self.blobs))) - } else { - Ok(None) - } - } + self.items.push(blob); - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NotEnoughResponsesReturned { - expected: self.request.indices.len(), - actual: self.blobs.len(), - }) - } + Ok(self.items.len() >= self.request.indices.len()) } - /// Mark request as resolved (= has returned something downstream) while marking this status as - /// true for future calls. - pub fn resolve(&mut self) -> bool { - std::mem::replace(&mut self.resolved, true) + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs index a15d4e39353..f3cdcbe714f 100644 --- a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs @@ -1,9 +1,9 @@ use beacon_chain::get_block_root; -use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; +use lighthouse_network::rpc::BlocksByRootRequest; use std::sync::Arc; use types::{ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Copy, Clone)] pub struct BlocksByRootSingleRequest(pub Hash256); @@ -14,47 +14,38 @@ impl BlocksByRootSingleRequest { } } -pub struct ActiveBlocksByRootRequest { +pub struct BlocksByRootRequestItems { request: BlocksByRootSingleRequest, - resolved: bool, - pub(crate) peer_id: PeerId, + items: Vec>>, } -impl ActiveBlocksByRootRequest { - pub fn new(request: BlocksByRootSingleRequest, peer_id: PeerId) -> Self { +impl BlocksByRootRequestItems { + pub fn new(request: BlocksByRootSingleRequest) -> Self { Self { request, - resolved: false, - peer_id, + items: vec![], } } +} + +impl ActiveRequestItems for BlocksByRootRequestItems { + type Item = Arc>; /// Append a response to the single chunk request. If the chunk is valid, the request is /// resolved immediately. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - block: Arc>, - ) -> Result>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, block: Self::Item) -> Result { let block_root = get_block_root(&block); if self.request.0 != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); } - // Valid data, blocks by root expects a single response - self.resolved = true; - Ok(block) + self.items.push(block); + // Always returns true, blocks by root expects a single response + Ok(true) } - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NoResponseReturned) - } + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs index a42ae7ca41f..1b8d46ff072 100644 --- a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs @@ -1,9 +1,8 @@ -use lighthouse_network::service::api_types::DataColumnsByRootRequester; -use lighthouse_network::{rpc::methods::DataColumnsByRootRequest, PeerId}; +use lighthouse_network::rpc::methods::DataColumnsByRootRequest; use std::sync::Arc; use types::{ChainSpec, DataColumnIdentifier, DataColumnSidecar, EthSpec, Hash256}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Clone)] pub struct DataColumnsByRootSingleBlockRequest { @@ -26,40 +25,27 @@ impl DataColumnsByRootSingleBlockRequest { } } -pub struct ActiveDataColumnsByRootRequest { +pub struct DataColumnsByRootRequestItems { request: DataColumnsByRootSingleBlockRequest, items: Vec>>, - resolved: bool, - pub(crate) peer_id: PeerId, - pub(crate) requester: DataColumnsByRootRequester, } -impl ActiveDataColumnsByRootRequest { - pub fn new( - request: DataColumnsByRootSingleBlockRequest, - peer_id: PeerId, - requester: DataColumnsByRootRequester, - ) -> Self { +impl DataColumnsByRootRequestItems { + pub fn new(request: DataColumnsByRootSingleBlockRequest) -> Self { Self { request, items: vec![], - resolved: false, - peer_id, - requester, } } +} + +impl ActiveRequestItems for DataColumnsByRootRequestItems { + type Item = Arc>; /// Appends a chunk to this multi-item request. If all expected chunks are received, this /// method returns `Some`, resolving the request before the stream terminator. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - data_column: Arc>, - ) -> Result>>>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, data_column: Self::Item) -> Result { let block_root = data_column.block_root(); if self.request.block_root != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); @@ -75,29 +61,11 @@ impl ActiveDataColumnsByRootRequest { } self.items.push(data_column); - if self.items.len() >= self.request.indices.len() { - // All expected chunks received, return result early - self.resolved = true; - Ok(Some(std::mem::take(&mut self.items))) - } else { - Ok(None) - } - } - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NotEnoughResponsesReturned { - expected: self.request.indices.len(), - actual: self.items.len(), - }) - } + Ok(self.items.len() >= self.request.indices.len()) } - /// Mark request as resolved (= has returned something downstream) while marking this status as - /// true for future calls. - pub fn resolve(&mut self) -> bool { - std::mem::replace(&mut self.resolved, true) + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs similarity index 85% rename from beacon_node/network/src/sync/sampling.rs rename to beacon_node/network/src/sync/peer_sampling.rs index 524fe86bee9..7e725f5df5c 100644 --- a/beacon_node/network/src/sync/sampling.rs +++ b/beacon_node/network/src/sync/peer_sampling.rs @@ -1,4 +1,6 @@ use self::request::ActiveColumnSampleRequest; +#[cfg(test)] +pub(crate) use self::request::Status; use super::network_context::{ DataColumnsByRootSingleBlockRequest, RpcResponseError, SyncNetworkContext, }; @@ -42,6 +44,18 @@ impl Sampling { self.requests.values().map(|r| r.block_root).collect() } + #[cfg(test)] + pub fn get_request_status( + &self, + block_root: Hash256, + index: &ColumnIndex, + ) -> Option { + let requester = SamplingRequester::ImportedBlock(block_root); + self.requests + .get(&requester) + .and_then(|req| req.get_request_status(index)) + } + /// Create a new sampling request for a known block /// /// ### Returns @@ -74,7 +88,11 @@ impl Sampling { } }; - debug!(self.log, "Created new sample request"; "id" => ?id); + debug!(self.log, + "Created new sample request"; + "id" => ?id, + "column_selection" => ?request.column_selection() + ); // TOOD(das): If a node has very little peers, continue_sampling() will attempt to find enough // to sample here, immediately failing the sampling request. There should be some grace @@ -220,6 +238,20 @@ impl ActiveSamplingRequest { } } + #[cfg(test)] + pub fn get_request_status(&self, index: &ColumnIndex) -> Option { + self.column_requests.get(index).map(|req| req.status()) + } + + /// Return the current ordered list of columns that this requests has to sample to succeed + pub(crate) fn column_selection(&self) -> Vec { + self.column_shuffle + .iter() + .take(REQUIRED_SUCCESSES[0]) + .copied() + .collect() + } + /// Insert a downloaded column into an active sampling request. Then make progress on the /// entire request. /// @@ -244,22 +276,35 @@ impl ActiveSamplingRequest { .column_indexes_by_sampling_request .get(&sampling_request_id) else { - error!(self.log, "Column indexes for the sampling request ID not found"; "sampling_request_id" => ?sampling_request_id); + error!(self.log, + "Column indexes for the sampling request ID not found"; + "sampling_request_id" => ?sampling_request_id + ); return Ok(None); }; match resp { Ok((mut resp_data_columns, seen_timestamp)) => { - debug!(self.log, "Sample download success"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "count" => resp_data_columns.len()); + let resp_column_indexes = resp_data_columns + .iter() + .map(|r| r.index) + .collect::>(); + debug!(self.log, + "Sample download success"; + "block_root" => %self.block_root, + "column_indexes" => ?resp_column_indexes, + "count" => resp_data_columns.len() + ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::SUCCESS]); // Filter the data received in the response using the requested column indexes. let mut data_columns = vec![]; for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { - warn!( - self.log, - "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + warn!(self.log, + "Active column sample request not found"; + "block_root" => %self.block_root, + "column_index" => column_index ); continue; }; @@ -270,7 +315,11 @@ impl ActiveSamplingRequest { else { // Peer does not have the requested data. // TODO(das) what to do? - debug!(self.log, "Sampling peer claims to not have the data"; "block_root" => %self.block_root, "column_index" => column_index); + debug!(self.log, + "Sampling peer claims to not have the data"; + "block_root" => %self.block_root, + "column_index" => column_index + ); request.on_sampling_error()?; continue; }; @@ -283,15 +332,16 @@ impl ActiveSamplingRequest { .iter() .map(|d| d.index) .collect::>(); - debug!( - self.log, - "Received data that was not requested"; "block_root" => %self.block_root, "column_indexes" => ?resp_column_indexes + debug!(self.log, + "Received data that was not requested"; + "block_root" => %self.block_root, + "column_indexes" => ?resp_column_indexes ); } // Handle the downloaded data columns. if data_columns.is_empty() { - debug!(self.log,"Received empty response"; "block_root" => %self.block_root); + debug!(self.log, "Received empty response"; "block_root" => %self.block_root); self.column_indexes_by_sampling_request .remove(&sampling_request_id); } else { @@ -302,10 +352,18 @@ impl ActiveSamplingRequest { // Peer has data column, send to verify let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { // If processor is not available, error the entire sampling - debug!(self.log, "Dropping sampling"; "block" => %self.block_root, "reason" => "beacon processor unavailable"); + debug!(self.log, + "Dropping sampling"; + "block" => %self.block_root, + "reason" => "beacon processor unavailable" + ); return Err(SamplingError::ProcessorUnavailable); }; - debug!(self.log, "Sending data_column for verification"; "block" => ?self.block_root, "column_indexes" => ?column_indexes); + debug!(self.log, + "Sending data_column for verification"; + "block" => ?self.block_root, + "column_indexes" => ?column_indexes + ); if let Err(e) = beacon_processor.send_rpc_validate_data_columns( self.block_root, data_columns, @@ -316,22 +374,31 @@ impl ActiveSamplingRequest { }, ) { // TODO(das): Beacon processor is overloaded, what should we do? - error!(self.log, "Dropping sampling"; "block" => %self.block_root, "reason" => e.to_string()); + error!(self.log, + "Dropping sampling"; + "block" => %self.block_root, + "reason" => e.to_string() + ); return Err(SamplingError::SendFailed("beacon processor send failure")); } } } Err(err) => { - debug!(self.log, "Sample download error"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "error" => ?err); + debug!(self.log, "Sample download error"; + "block_root" => %self.block_root, + "column_indexes" => ?column_indexes, + "error" => ?err + ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::FAILURE]); // Error downloading, maybe penalize peer and retry again. // TODO(das) with different peer or different peer? for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { - warn!( - self.log, - "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + warn!(self.log, + "Active column sample request not found"; + "block_root" => %self.block_root, + "column_index" => column_index ); continue; }; @@ -477,6 +544,10 @@ impl ActiveSamplingRequest { block_root: self.block_root, indices: column_indexes.clone(), }, + // false = We issue request to custodians who may or may not have received the + // samples yet. We don't any signal (like an attestation or status messages that the + // custodian has received data). + false, ) .map_err(SamplingError::SendFailed)?; self.column_indexes_by_sampling_request @@ -522,8 +593,9 @@ mod request { peers_dont_have: HashSet, } + // Exposed only for testing assertions in lookup tests #[derive(Debug, Clone)] - enum Status { + pub(crate) enum Status { NoPeers, NotStarted, Sampling(PeerId), @@ -567,6 +639,11 @@ mod request { } } + #[cfg(test)] + pub(crate) fn status(&self) -> Status { + self.status.clone() + } + pub(crate) fn choose_peer( &mut self, cx: &SyncNetworkContext, diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index ed5946ada72..51d9d9da37f 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -8,9 +8,9 @@ use crate::sync::{network_context::SyncNetworkContext, BatchOperationOutcome, Ba use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; -use lighthouse_metrics::set_int_gauge; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; +use metrics::set_int_gauge; use rand::seq::SliceRandom; use rand::Rng; use slog::{crit, debug, o, warn}; @@ -444,9 +444,9 @@ impl SyncingChain { self.request_batches(network)?; } } - } else if !self.good_peers_on_custody_subnets(self.processing_target, network) { + } else if !self.good_peers_on_sampling_subnets(self.processing_target, network) { // This is to handle the case where no batch was sent for the current processing - // target when there is no custody peers available. This is a valid state and should not + // target when there is no sampling peers available. This is a valid state and should not // return an error. return Ok(KeepChain); } else { @@ -1075,10 +1075,10 @@ impl SyncingChain { // check if we have the batch for our optimistic start. If not, request it first. // We wait for this batch before requesting any other batches. if let Some(epoch) = self.optimistic_start { - if !self.good_peers_on_custody_subnets(epoch, network) { + if !self.good_peers_on_sampling_subnets(epoch, network) { debug!( self.log, - "Waiting for peers to be available on custody column subnets" + "Waiting for peers to be available on sampling column subnets" ); return Ok(KeepChain); } @@ -1107,14 +1107,18 @@ impl SyncingChain { Ok(KeepChain) } - /// Checks all custody column subnets for peers. Returns `true` if there is at least one peer in - /// every custody column subnet. - fn good_peers_on_custody_subnets(&self, epoch: Epoch, network: &SyncNetworkContext) -> bool { + /// Checks all sampling column subnets for peers. Returns `true` if there is at least one peer in + /// every sampling column subnet. + fn good_peers_on_sampling_subnets( + &self, + epoch: Epoch, + network: &SyncNetworkContext, + ) -> bool { if network.chain.spec.is_peer_das_enabled_for_epoch(epoch) { - // Require peers on all custody column subnets before sending batches + // Require peers on all sampling column subnets before sending batches let peers_on_all_custody_subnets = network .network_globals() - .custody_subnets + .sampling_subnets .iter() .all(|subnet_id| { let peer_count = network @@ -1167,11 +1171,11 @@ impl SyncingChain { return None; } - // don't send batch requests until we have peers on custody subnets + // don't send batch requests until we have peers on sampling subnets // TODO(das): this is a workaround to avoid sending out excessive block requests because // block and data column requests are currently coupled. This can be removed once we find a // way to decouple the requests and do retries individually, see issue #6258. - if !self.good_peers_on_custody_subnets(self.to_be_downloaded, network) { + if !self.good_peers_on_sampling_subnets(self.to_be_downloaded, network) { debug!( self.log, "Waiting for peers to be available on custody column subnets" diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 28dea8e4b5e..0ef99838dee 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -51,8 +51,7 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::service::api_types::Id; -use lighthouse_network::PeerId; -use lighthouse_network::SyncInfo; +use lighthouse_network::{PeerId, SyncInfo}; use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; @@ -387,6 +386,7 @@ where #[cfg(test)] mod tests { use crate::network_beacon_processor::NetworkBeaconProcessor; + use crate::sync::SyncMessage; use crate::NetworkMessage; use super::*; @@ -399,7 +399,7 @@ mod tests { use beacon_processor::WorkEvent as BeaconWorkEvent; use lighthouse_network::service::api_types::SyncRequestId; use lighthouse_network::{ - rpc::StatusMessage, service::api_types::AppRequestId, NetworkGlobals, + rpc::StatusMessage, service::api_types::AppRequestId, NetworkConfig, NetworkGlobals, }; use slog::{o, Drain}; use slot_clock::TestingSlotClock; @@ -538,21 +538,20 @@ mod tests { } else { panic!("Should have sent a batch request to the peer") }; - let blob_req_id = match fork_name { - ForkName::Deneb | ForkName::Electra => { - if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - Some(request_id) - } else { - panic!("Should have sent a batch request to the peer") - } + let blob_req_id = if fork_name.deneb_enabled() { + if let Ok(NetworkMessage::SendRequest { + peer_id, + request: _, + request_id, + }) = self.network_rx.try_recv() + { + assert_eq!(&peer_id, expected_peer); + Some(request_id) + } else { + panic!("Should have sent a batch request to the peer") } - _ => None, + } else { + None }; (block_req_id, blob_req_id) } @@ -692,14 +691,18 @@ mod tests { log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let (sync_tx, _sync_rx) = mpsc::unbounded_channel::>(); + let network_config = Arc::new(NetworkConfig::default()); let globals = Arc::new(NetworkGlobals::new_test_globals( Vec::new(), &log, + network_config, chain.spec.clone(), )); let (network_beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( globals.clone(), + sync_tx, chain.clone(), harness.runtime.task_executor.clone(), log.clone(), diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/tests/lookups.rs similarity index 91% rename from beacon_node/network/src/sync/block_lookups/tests.rs rename to beacon_node/network/src/sync/tests/lookups.rs index 5b4f17ac0dd..9f2c9ef66f0 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1,93 +1,50 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; -use crate::sync::manager::{BlockProcessType, SyncManager}; -use crate::sync::sampling::SamplingConfig; -use crate::sync::{SamplingId, SyncMessage}; +use crate::sync::block_lookups::{ + BlockLookupSummary, PARENT_DEPTH_TOLERANCE, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, +}; +use crate::sync::{ + manager::{BlockProcessType, BlockProcessingResult, SyncManager}, + peer_sampling::SamplingConfig, + SamplingId, SyncMessage, +}; use crate::NetworkMessage; use std::sync::Arc; +use std::time::Duration; use super::*; use crate::sync::block_lookups::common::ResponseType; -use beacon_chain::blob_verification::GossipVerifiedBlob; -use beacon_chain::block_verification_types::BlockImportData; -use beacon_chain::builder::Witness; -use beacon_chain::data_availability_checker::Availability; -use beacon_chain::eth1_chain::CachingEth1Backend; -use beacon_chain::test_utils::{ - build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, - BeaconChainHarness, EphemeralHarnessType, NumBlobs, -}; -use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ - AvailabilityPendingExecutedBlock, PayloadVerificationOutcome, PayloadVerificationStatus, + blob_verification::GossipVerifiedBlob, + block_verification_types::{AsBlock, BlockImportData}, + data_availability_checker::Availability, + test_utils::{ + build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, + BeaconChainHarness, EphemeralHarnessType, LoggerType, NumBlobs, + }, + validator_monitor::timestamp_now, + AvailabilityPendingExecutedBlock, AvailabilityProcessingStatus, BlockError, + PayloadVerificationOutcome, PayloadVerificationStatus, }; use beacon_processor::WorkEvent; -use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; -use lighthouse_network::service::api_types::{ - AppRequestId, DataColumnsByRootRequester, Id, SamplingRequester, SingleLookupReqId, - SyncRequestId, +use lighthouse_network::{ + rpc::{RPCError, RequestType, RpcErrorResponse}, + service::api_types::{ + AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, + SamplingRequester, SingleLookupReqId, SyncRequestId, + }, + types::SyncState, + NetworkConfig, NetworkGlobals, PeerId, }; -use lighthouse_network::types::SyncState; -use lighthouse_network::{NetworkGlobals, Request}; use slog::info; -use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; -use store::MemoryStore; +use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; -use types::data_column_sidecar::ColumnIndex; -use types::test_utils::TestRandom; use types::{ - test_utils::{SeedableRng, XorShiftRng}, - BlobSidecar, ForkName, MinimalEthSpec as E, SignedBeaconBlock, Slot, + data_column_sidecar::ColumnIndex, + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, Epoch, EthSpec, ForkName, + Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, }; -use types::{BeaconState, BeaconStateBase}; -use types::{DataColumnSidecar, Epoch}; - -type T = Witness, E, MemoryStore, MemoryStore>; - -/// This test utility enables integration testing of Lighthouse sync components. -/// -/// It covers the following: -/// 1. Sending `SyncMessage` to `SyncManager` to trigger `RangeSync`, `BackFillSync` and `BlockLookups` behaviours. -/// 2. Making assertions on `WorkEvent`s received from sync -/// 3. Making assertion on `NetworkMessage` received from sync (Outgoing RPC requests). -/// -/// The test utility covers testing the interactions from and to `SyncManager`. In diagram form: -/// +-----------------+ -/// | BeaconProcessor | -/// +---------+-------+ -/// ^ | -/// | | -/// WorkEvent | | SyncMsg -/// | | (Result) -/// | v -/// +--------+ +-----+-----------+ +----------------+ -/// | Router +----------->| SyncManager +------------>| NetworkService | -/// +--------+ SyncMsg +-----------------+ NetworkMsg +----------------+ -/// (RPC resp) | - RangeSync | (RPC req) -/// +-----------------+ -/// | - BackFillSync | -/// +-----------------+ -/// | - BlockLookups | -/// +-----------------+ -struct TestRig { - /// Receiver for `BeaconProcessor` events (e.g. block processing results). - beacon_processor_rx: mpsc::Receiver>, - beacon_processor_rx_queue: Vec>, - /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) - network_rx: mpsc::UnboundedReceiver>, - /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) - network_rx_queue: Vec>, - /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. - sync_manager: SyncManager, - /// To manipulate sync state and peer connection status - network_globals: Arc>, - /// Beacon chain harness - harness: BeaconChainHarness>, - /// `rng` for generating test blocks and blobs. - rng: XorShiftRng, - fork_name: ForkName, - log: Logger, -} const D: Duration = Duration::new(0, 0); const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; @@ -102,8 +59,14 @@ struct TestRigConfig { impl TestRig { fn test_setup_with_config(config: Option) -> Self { - let enable_log = cfg!(feature = "test_logger"); - let log = build_log(slog::Level::Trace, enable_log); + let logger_type = if cfg!(feature = "test_logger") { + LoggerType::Test + } else if cfg!(feature = "ci_logger") { + LoggerType::CI + } else { + LoggerType::Null + }; + let log = build_log(slog::Level::Trace, logger_type); // Use `fork_from_env` logic to set correct fork epochs let mut spec = test_spec::(); @@ -116,7 +79,7 @@ impl TestRig { // Initialise a new beacon chain let harness = BeaconChainHarness::>::builder(E) - .spec(spec) + .spec(Arc::new(spec)) .logger(log.clone()) .deterministic_keypairs(1) .fresh_ephemeral_store() @@ -130,22 +93,24 @@ impl TestRig { let chain = harness.chain.clone(); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let (sync_tx, sync_rx) = mpsc::unbounded_channel::>(); // TODO(das): make the generation of the ENR use the deterministic rng to have consistent // column assignments + let network_config = Arc::new(NetworkConfig::default()); let globals = Arc::new(NetworkGlobals::new_test_globals( Vec::new(), &log, + network_config, chain.spec.clone(), )); let (beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( globals, + sync_tx, chain.clone(), harness.runtime.task_executor.clone(), log.clone(), ); - let (_sync_send, sync_recv) = mpsc::unbounded_channel::>(); - let fork_name = chain.spec.fork_name_at_slot::(chain.slot().unwrap()); // All current tests expect synced and EL online state @@ -159,13 +124,15 @@ impl TestRig { beacon_processor_rx_queue: vec![], network_rx, network_rx_queue: vec![], + sync_rx, rng, network_globals: beacon_processor.network_globals.clone(), sync_manager: SyncManager::new( chain, network_tx, beacon_processor.into(), - sync_recv, + // Pass empty recv not tied to any tx + mpsc::unbounded_channel().1, SamplingConfig::Custom { required_successes: vec![SAMPLING_REQUIRED_SUCCESSES], }, @@ -228,6 +195,13 @@ impl TestRig { self.send_sync_message(SyncMessage::SampleBlock(block_root, block_slot)) } + /// Drain all sync messages in the sync_rx attached to the beacon processor + fn drain_sync_rx(&mut self) { + while let Ok(sync_message) = self.sync_rx.try_recv() { + self.send_sync_message(sync_message); + } + } + fn rand_block(&mut self) -> SignedBeaconBlock { self.rand_block_and_blobs(NumBlobs::None).0 } @@ -284,6 +258,10 @@ impl TestRig { self.sync_manager.active_parent_lookups().len() } + fn active_range_sync_chain(&self) -> (RangeSyncType, Slot, Slot) { + self.sync_manager.get_range_sync_chains().unwrap().unwrap() + } + fn assert_single_lookups_count(&self, count: usize) { assert_eq!( self.active_single_lookups_count(), @@ -301,6 +279,13 @@ impl TestRig { ); } + fn expect_active_sampling(&mut self, block_root: &Hash256) { + assert!(self + .sync_manager + .active_sampling_requests() + .contains(block_root)); + } + fn expect_clean_finished_sampling(&mut self) { self.expect_empty_network(); self.expect_sampling_result_work(); @@ -616,7 +601,7 @@ impl TestRig { id, peer_id, RPCError::ErrorResponse( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "older than deneb".into(), ), ); @@ -713,10 +698,10 @@ impl TestRig { let first_dc = data_columns.first().unwrap(); let block_root = first_dc.block_root(); let sampling_request_id = match id.0 { - SyncRequestId::DataColumnsByRoot( - _, - _requester @ DataColumnsByRootRequester::Sampling(sampling_id), - ) => sampling_id.sampling_request_id, + SyncRequestId::DataColumnsByRoot(DataColumnsByRootRequestId { + requester: DataColumnsByRootRequester::Sampling(sampling_id), + .. + }) => sampling_id.sampling_request_id, _ => unreachable!(), }; self.complete_data_columns_by_root_request(id, data_columns); @@ -741,14 +726,15 @@ impl TestRig { data_columns: Vec>>, missing_components: bool, ) { - let lookup_id = - if let SyncRequestId::DataColumnsByRoot(_, DataColumnsByRootRequester::Custody(id)) = - ids.first().unwrap().0 - { - id.requester.0.lookup_id - } else { - panic!("not a custody requester") - }; + let lookup_id = if let SyncRequestId::DataColumnsByRoot(DataColumnsByRootRequestId { + requester: DataColumnsByRootRequester::Custody(id), + .. + }) = ids.first().unwrap().0 + { + id.requester.0.lookup_id + } else { + panic!("not a custody requester") + }; let first_column = data_columns.first().cloned().unwrap(); @@ -892,7 +878,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlocksByRoot(request), + request: RequestType::BlocksByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, @@ -912,7 +898,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlobsByRoot(request), + request: RequestType::BlobsByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), } if request .blob_ids @@ -937,7 +923,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlocksByRoot(request), + request: RequestType::BlocksByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, @@ -959,7 +945,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlobsByRoot(request), + request: RequestType::BlobsByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), } if request .blob_ids @@ -987,7 +973,7 @@ impl TestRig { .pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::DataColumnsByRoot(request), + request: RequestType::DataColumnsByRoot(request), request_id: AppRequestId::Sync(id @ SyncRequestId::DataColumnsByRoot { .. }), } if request .data_column_ids @@ -1081,6 +1067,11 @@ impl TestRig { .unwrap_or_else(|e| panic!("Expected sampling result work: {e}")) } + fn expect_no_work_event(&mut self) { + self.drain_processor_rx(); + assert!(self.network_rx_queue.is_empty()); + } + fn expect_no_penalty_for(&mut self, peer_id: PeerId) { self.drain_network_rx(); let downscore_events = self @@ -1152,6 +1143,7 @@ impl TestRig { penalty_msg, expect_penalty_msg, "Unexpected penalty msg for {peer_id}" ); + self.log(&format!("Found expected penalty {penalty_msg}")); } pub fn expect_single_penalty(&mut self, peer_id: PeerId, expect_penalty_msg: &'static str) { @@ -1281,6 +1273,46 @@ impl TestRig { imported: false, }); } + + fn assert_sampling_request_ongoing(&self, block_root: Hash256, indices: &[ColumnIndex]) { + for index in indices { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + if !matches!(status, crate::sync::peer_sampling::Status::Sampling { .. }) { + panic!("expected {block_root} {index} request to be on going: {status:?}"); + } + } + } + + fn assert_sampling_request_nopeers(&self, block_root: Hash256, indices: &[ColumnIndex]) { + for index in indices { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + if !matches!(status, crate::sync::peer_sampling::Status::NoPeers { .. }) { + panic!("expected {block_root} {index} request to be no peers: {status:?}"); + } + } + } + + fn log_sampling_requests(&self, block_root: Hash256, indices: &[ColumnIndex]) { + let statuses = indices + .iter() + .map(|index| { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + (index, status) + }) + .collect::>(); + self.log(&format!( + "Sampling request status for {block_root}: {statuses:?}" + )); + } } #[test] @@ -1339,7 +1371,7 @@ fn test_single_block_lookup_empty_response() { // The peer does not have the block. It should be penalized. r.single_lookup_block_response(id, peer_id, None); - r.expect_penalty(peer_id, "NoResponseReturned"); + r.expect_penalty(peer_id, "NotEnoughResponsesReturned"); // it should be retried let id = r.expect_block_lookup_request(block_root); // Send the right block this time. @@ -1469,7 +1501,7 @@ fn test_parent_lookup_happy_path() { // Processing succeeds, now the rest of the chain should be sent for processing. rig.parent_block_processed( block_root, - BlockError::BlockIsAlreadyKnown(block_root).into(), + BlockError::DuplicateFullyImported(block_root).into(), ); rig.expect_parent_chain_process(); rig.parent_chain_processed_success(block_root, &[]); @@ -1665,7 +1697,18 @@ fn test_parent_lookup_too_deep_grow_ancestor() { ) } - rig.expect_penalty(peer_id, "chain_too_long"); + // Should create a new syncing chain + rig.drain_sync_rx(); + assert_eq!( + rig.active_range_sync_chain(), + ( + RangeSyncType::Head, + Slot::new(0), + Slot::new(PARENT_DEPTH_TOLERANCE as u64 - 1) + ) + ); + // Should not penalize peer, but network is not clear because of the blocks_by_range requests + rig.expect_no_penalty_for(peer_id); rig.assert_failed_chain(chain_hash); } @@ -1692,7 +1735,18 @@ fn test_parent_lookup_too_deep_grow_tip() { ); } - rig.expect_penalty(peer_id, "chain_too_long"); + // Should create a new syncing chain + rig.drain_sync_rx(); + assert_eq!( + rig.active_range_sync_chain(), + ( + RangeSyncType::Head, + Slot::new(0), + Slot::new(PARENT_DEPTH_TOLERANCE as u64 - 2) + ) + ); + // Should not penalize peer, but network is not clear because of the blocks_by_range requests + rig.expect_no_penalty_for(peer_id); rig.assert_failed_chain(tip.canonical_root()); } @@ -1837,7 +1891,7 @@ fn test_same_chain_race_condition() { rig.log(&format!("Block {i} was removed and is already known")); rig.parent_block_processed( chain_hash, - BlockError::BlockIsAlreadyKnown(block.canonical_root()).into(), + BlockError::DuplicateFullyImported(block.canonical_root()).into(), ) } else { rig.log(&format!("Block {i} ParentUnknown")); @@ -2014,6 +2068,77 @@ fn sampling_avoid_retrying_same_peer() { r.expect_empty_network(); } +#[test] +fn sampling_batch_requests() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + let _supernode = r.new_connected_supernode_peer(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + r.trigger_sample_block(block_root, block.slot()); + + // Retrieve the sample request, which should be batched. + let (sync_request_id, column_indexes) = r + .expect_only_data_columns_by_root_requests(block_root, 1) + .pop() + .unwrap(); + assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); + r.assert_sampling_request_ongoing(block_root, &column_indexes); + + // Resolve the request. + r.complete_valid_sampling_column_requests( + vec![(sync_request_id, column_indexes.clone())], + data_columns, + ); + r.expect_clean_finished_sampling(); +} + +#[test] +fn sampling_batch_requests_not_enough_responses_returned() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + let _supernode = r.new_connected_supernode_peer(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + r.trigger_sample_block(block_root, block.slot()); + + // Retrieve the sample request, which should be batched. + let (sync_request_id, column_indexes) = r + .expect_only_data_columns_by_root_requests(block_root, 1) + .pop() + .unwrap(); + assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); + + // The request status should be set to Sampling. + r.assert_sampling_request_ongoing(block_root, &column_indexes); + + // Split the indexes to simulate the case where the supernode doesn't have the requested column. + let (column_indexes_supernode_does_not_have, column_indexes_to_complete) = + column_indexes.split_at(1); + + // Complete the requests but only partially, so a NotEnoughResponsesReturned error occurs. + let data_columns_to_complete = data_columns + .iter() + .filter(|d| column_indexes_to_complete.contains(&d.index)) + .cloned() + .collect::>(); + r.complete_data_columns_by_root_request( + (sync_request_id, column_indexes.clone()), + &data_columns_to_complete, + ); + + // The request status should be set to NoPeers since the supernode, the only peer, returned not enough responses. + r.log_sampling_requests(block_root, &column_indexes); + r.assert_sampling_request_nopeers(block_root, column_indexes_supernode_does_not_have); + + // The sampling request stalls. + r.expect_empty_network(); + r.expect_no_work_event(); + r.expect_active_sampling(&block_root); +} + #[test] fn custody_lookup_happy_path() { let Some(mut r) = TestRig::test_setup_after_peerdas() else { @@ -2028,9 +2153,10 @@ fn custody_lookup_happy_path() { // Should not request blobs let id = r.expect_block_lookup_request(block.canonical_root()); r.complete_valid_block_request(id, block.into(), true); - let custody_column_count = spec.custody_requirement * spec.data_columns_per_subnet() as u64; + // for each slot we download `samples_per_slot` columns + let sample_column_count = spec.samples_per_slot * spec.data_columns_per_subnet() as u64; let custody_ids = - r.expect_only_data_columns_by_root_requests(block_root, custody_column_count as usize); + r.expect_only_data_columns_by_root_requests(block_root, sample_column_count as usize); r.complete_valid_custody_request(custody_ids, data_columns, false); r.expect_no_active_lookups(); } @@ -2449,7 +2575,7 @@ mod deneb_only { self.rig.single_blob_component_processed( self.blob_req_id.expect("blob request id").lookup_id, BlockProcessingResult::Err(BlockError::AvailabilityCheck( - AvailabilityCheckError::KzgVerificationFailed, + AvailabilityCheckError::InvalidBlobs(kzg::Error::KzgVerificationFailed), )), ); self.rig.assert_single_lookups_count(1); @@ -2550,11 +2676,6 @@ mod deneb_only { self.blobs.pop().expect("blobs"); self } - fn invalidate_blobs_too_many(mut self) -> Self { - let first_blob = self.blobs.first().expect("blob").clone(); - self.blobs.push(first_blob); - self - } fn expect_block_process(mut self) -> Self { self.rig.expect_block_process(ResponseType::Block); self @@ -2643,21 +2764,6 @@ mod deneb_only { .expect_no_block_request(); } - #[test] - fn single_block_response_then_too_many_blobs_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - tester - .block_response_triggering_process() - .invalidate_blobs_too_many() - .blobs_response() - .expect_penalty("TooManyResponses") - // Network context returns "download success" because the request has enough blobs + it - // downscores the peer for returning too many. - .expect_no_block_request(); - } - // Test peer returning block that has unknown parent, and a new lookup is created #[test] fn parent_block_unknown_parent() { @@ -2698,7 +2804,7 @@ mod deneb_only { }; tester .empty_block_response() - .expect_penalty("NoResponseReturned") + .expect_penalty("NotEnoughResponsesReturned") .expect_block_request() .expect_no_blobs_request() .block_response_and_expect_blob_request() diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs new file mode 100644 index 00000000000..47666b413c5 --- /dev/null +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -0,0 +1,67 @@ +use crate::sync::manager::SyncManager; +use crate::sync::range_sync::RangeSyncType; +use crate::sync::SyncMessage; +use crate::NetworkMessage; +use beacon_chain::builder::Witness; +use beacon_chain::eth1_chain::CachingEth1Backend; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use beacon_processor::WorkEvent; +use lighthouse_network::NetworkGlobals; +use slog::Logger; +use slot_clock::ManualSlotClock; +use std::sync::Arc; +use store::MemoryStore; +use tokio::sync::mpsc; +use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; + +mod lookups; +mod range; + +type T = Witness, E, MemoryStore, MemoryStore>; + +/// This test utility enables integration testing of Lighthouse sync components. +/// +/// It covers the following: +/// 1. Sending `SyncMessage` to `SyncManager` to trigger `RangeSync`, `BackFillSync` and `BlockLookups` behaviours. +/// 2. Making assertions on `WorkEvent`s received from sync +/// 3. Making assertion on `NetworkMessage` received from sync (Outgoing RPC requests). +/// +/// The test utility covers testing the interactions from and to `SyncManager`. In diagram form: +/// +-----------------+ +/// | BeaconProcessor | +/// +---------+-------+ +/// ^ | +/// | | +/// WorkEvent | | SyncMsg +/// | | (Result) +/// | v +/// +--------+ +-----+-----------+ +----------------+ +/// | Router +----------->| SyncManager +------------>| NetworkService | +/// +--------+ SyncMsg +-----------------+ NetworkMsg +----------------+ +/// (RPC resp) | - RangeSync | (RPC req) +/// +-----------------+ +/// | - BackFillSync | +/// +-----------------+ +/// | - BlockLookups | +/// +-----------------+ +struct TestRig { + /// Receiver for `BeaconProcessor` events (e.g. block processing results). + beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx_queue: Vec>, + /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) + network_rx: mpsc::UnboundedReceiver>, + /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) + network_rx_queue: Vec>, + /// Receiver for `SyncMessage` from the network + sync_rx: mpsc::UnboundedReceiver>, + /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. + sync_manager: SyncManager, + /// To manipulate sync state and peer connection status + network_globals: Arc>, + /// Beacon chain harness + harness: BeaconChainHarness>, + /// `rng` for generating test blocks and blobs. + rng: XorShiftRng, + fork_name: ForkName, + log: Logger, +} diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/beacon_node/network/src/sync/tests/range.rs @@ -0,0 +1 @@ + diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index cbf6284f2ae..5b48e3f0d88 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -7,7 +7,7 @@ edition = { workspace = true } [dependencies] derivative = { workspace = true } itertools = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } types = { workspace = true } state_processing = { workspace = true } @@ -25,4 +25,4 @@ tokio = { workspace = true } maplit = { workspace = true } [features] -portable = ["beacon_chain/portable"] \ No newline at end of file +portable = ["beacon_chain/portable"] diff --git a/beacon_node/operation_pool/src/attestation_id.rs b/beacon_node/operation_pool/src/attestation_id.rs deleted file mode 100644 index f0dc6536a54..00000000000 --- a/beacon_node/operation_pool/src/attestation_id.rs +++ /dev/null @@ -1,12 +0,0 @@ -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; - -/// Serialized `AttestationData` augmented with a domain to encode the fork info. -/// -/// [DEPRECATED] To be removed once all nodes have updated to schema v12. -#[derive( - PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode, Serialize, Deserialize, -)] -pub struct AttestationId { - v: Vec, -} diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs index 07fd72f02c5..cbab97e7199 100644 --- a/beacon_node/operation_pool/src/bls_to_execution_changes.rs +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -113,7 +113,7 @@ impl BlsToExecutionChanges { .validators() .get(validator_index as usize) .map_or(true, |validator| { - let prune = validator.has_eth1_withdrawal_credential(spec) + let prune = validator.has_execution_withdrawal_credential(spec) && head_block .message() .body() diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index c60480ef377..3a002bf8703 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1,5 +1,4 @@ mod attestation; -mod attestation_id; mod attestation_storage; mod attester_slashing; mod bls_to_execution_changes; @@ -585,7 +584,7 @@ impl OperationPool { && state .get_validator(address_change.as_inner().message.validator_index as usize) .map_or(false, |validator| { - !validator.has_eth1_withdrawal_credential(spec) + !validator.has_execution_withdrawal_credential(spec) }) }, |address_change| address_change.as_inner().clone(), @@ -801,7 +800,7 @@ mod release_tests { use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::{common::get_attesting_indices_from_state, VerifyOperation}; use std::collections::BTreeSet; - use std::sync::LazyLock; + use std::sync::{Arc, LazyLock}; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::*; @@ -816,7 +815,7 @@ mod release_tests { spec: Option, ) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(E::default()) - .spec_or_default(spec) + .spec_or_default(spec.map(Arc::new)) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/operation_pool/src/metrics.rs b/beacon_node/operation_pool/src/metrics.rs index e2a8b43ed17..14088688e5e 100644 --- a/beacon_node/operation_pool/src/metrics.rs +++ b/beacon_node/operation_pool/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static BUILD_REWARD_CACHE_TIME: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 4ae6a7386d2..620d0579292 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -401,15 +401,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("self-limiter") - .long("self-limiter") - .help("This flag is deprecated and has no effect.") - .hide(true) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("disable-self-limiter") .long("disable-self-limiter") @@ -525,16 +516,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("http-spec-fork") - .long("http-spec-fork") - .requires("enable_http") - .value_name("FORK") - .help("This flag is deprecated and has no effect.") - .hide(true) - .action(ArgAction::Set) - .display_order(0) - ) .arg( Arg::new("http-enable-tls") .long("http-enable-tls") @@ -564,16 +545,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("http-allow-sync-stalled") - .long("http-allow-sync-stalled") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .requires("enable_http") - .help("This flag is deprecated and has no effect.") - .hide(true) - .display_order(0) - ) .arg( Arg::new("http-sse-capacity-multiplier") .long("http-sse-capacity-multiplier") @@ -659,7 +630,15 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - + .arg( + Arg::new("idontwant-message-size-threshold") + .long("idontwant-message-size-threshold") + .help("Specifies the minimum message size for which IDONTWANT messages are sent. \ + This an optimization strategy to not send IDONTWANT messages for smaller messages.") + .action(ArgAction::Set) + .hide(true) + .display_order(0) + ) /* * Monitoring metrics */ @@ -1283,14 +1262,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("disable-lock-timeouts") - .long("disable-lock-timeouts") - .help("This flag is deprecated and has no effect.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("disable-proposer-reorgs") .long("disable-proposer-reorgs") @@ -1503,14 +1474,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("always-prefer-builder-payload") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .long("always-prefer-builder-payload") - .help("This flag is deprecated and has no effect.") - .display_order(0) - ) .arg( Arg::new("invalid-gossip-verified-blocks-path") .action(ArgAction::Set) @@ -1522,14 +1485,6 @@ pub fn cli_app() -> Command { filling up their disks.") .display_order(0) ) - .arg( - Arg::new("progressive-balances") - .long("progressive-balances") - .value_name("MODE") - .help("Deprecated. This optimisation is now the default and cannot be disabled.") - .action(ArgAction::Set) - .display_order(0) - ) .arg( Arg::new("beacon-processor-max-workers") .long("beacon-processor-max-workers") @@ -1591,14 +1546,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("disable-duplicate-warn-logs") - .long("disable-duplicate-warn-logs") - .help("This flag is deprecated and has no effect.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("beacon-node-backend") .long("beacon-node-backend") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 3c3f47f8f9f..194eb4f960f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -152,14 +152,6 @@ pub fn get_config( client_config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.get_one::("http-spec-fork").is_some() { - warn!( - log, - "Ignoring --http-spec-fork"; - "info" => "this flag is deprecated and will be removed" - ); - } - if cli_args.get_flag("http-enable-tls") { client_config.http_api.tls_config = Some(TlsConfig { cert: cli_args @@ -175,14 +167,6 @@ pub fn get_config( }); } - if cli_args.get_flag("http-allow-sync-stalled") { - warn!( - log, - "Ignoring --http-allow-sync-stalled"; - "info" => "this flag is deprecated and will be removed" - ); - } - client_config.http_api.sse_capacity_multiplier = parse_required(cli_args, "http-sse-capacity-multiplier")?; @@ -362,14 +346,6 @@ pub fn get_config( .map(Duration::from_millis); } - if cli_args.get_flag("always-prefer-builder-payload") { - warn!( - log, - "Ignoring --always-prefer-builder-payload"; - "info" => "this flag is deprecated and will be removed" - ); - } - // Set config values from parse values. el_config.secret_file = Some(secret_file.clone()); el_config.execution_endpoint = Some(execution_endpoint.clone()); @@ -396,13 +372,15 @@ pub fn get_config( } // 4844 params - client_config.trusted_setup = context + if let Some(trusted_setup) = context .eth2_network_config .as_ref() - .and_then(|config| config.kzg_trusted_setup.as_ref()) - .map(|trusted_setup_bytes| serde_json::from_slice(trusted_setup_bytes)) + .map(|config| serde_json::from_slice(&config.kzg_trusted_setup)) .transpose() - .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; + .map_err(|e| format!("Unable to read trusted setup file: {}", e))? + { + client_config.trusted_setup = trusted_setup; + }; // Override default trusted setup file if required if let Some(trusted_setup_file_path) = cli_args.get_one::("trusted-setup-file-override") @@ -411,7 +389,7 @@ pub fn get_config( .map_err(|e| format!("Failed to open trusted setup file: {}", e))?; let trusted_setup: TrustedSetup = serde_json::from_reader(file) .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; - client_config.trusted_setup = Some(trusted_setup); + client_config.trusted_setup = trusted_setup; } if let Some(freezer_dir) = cli_args.get_one::("freezer-dir") { @@ -791,14 +769,6 @@ pub fn get_config( .individual_tracking_threshold = count; } - if cli_args.get_flag("disable-lock-timeouts") { - warn!( - log, - "Ignoring --disable-lock-timeouts"; - "info" => "this flag is deprecated and will be removed" - ); - } - if cli_args.get_flag("disable-proposer-reorgs") { client_config.chain.re_org_head_threshold = None; client_config.chain.re_org_parent_threshold = None; @@ -898,14 +868,6 @@ pub fn get_config( client_config.network.invalid_block_storage = Some(path); } - if cli_args.get_one::("progressive-balances").is_some() { - warn!( - log, - "Progressive balances mode is deprecated"; - "info" => "please remove --progressive-balances" - ); - } - if let Some(max_workers) = clap_utils::parse_optional(cli_args, "beacon-processor-max-workers")? { client_config.beacon_processor.max_workers = max_workers; @@ -1491,6 +1453,20 @@ pub fn set_network_config( Some(Default::default()) } }; + + if let Some(idontwant_message_size_threshold) = + cli_args.get_one::("idontwant-message-size-threshold") + { + config.idontwant_message_size_threshold = idontwant_message_size_threshold + .parse::() + .map_err(|_| { + format!( + "Invalid idontwant message size threshold value passed: {}", + idontwant_message_size_threshold + ) + })?; + } + Ok(()) } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index e56e23c6dfe..db3ed75f03a 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -126,7 +126,7 @@ impl ProductionBeaconNode { let slasher = Arc::new( Slasher::open( slasher_config, - Arc::new(spec), + spec, log.new(slog::o!("service" => "slasher")), ) .map_err(|e| format!("Slasher open error: {:?}", e))?, @@ -181,7 +181,7 @@ impl ProductionBeaconNode { builder .build_beacon_chain()? - .network(&client_config.network) + .network(Arc::new(client_config.network)) .await? .notifier()? .http_metrics_config(client_config.http_metrics.clone()) diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index ff178592687..36a47d0358b 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -26,7 +26,7 @@ safe_arith = { workspace = true } state_processing = { workspace = true } slog = { workspace = true } serde = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } lru = { workspace = true } sloggers = { workspace = true } directory = { workspace = true } diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index 75c24cd7682..1600ecfa1b5 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -23,7 +23,6 @@ where .try_fold(vec![], |mut ops, state_root| { let state_root = state_root?; ops.push(StoreOp::DeleteState(state_root, None)); - ops.push(StoreOp::DeleteStateTemporaryFlag(state_root)); Result::<_, Error>::Ok(ops) })?; @@ -31,7 +30,7 @@ where debug!( self.log, "Garbage collecting {} temporary states", - delete_ops.len() / 2 + delete_ops.len() ); self.do_atomically_with_block_and_blobs_cache(delete_ops)?; } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 75caadc6c7e..324edcbefdb 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -43,7 +43,6 @@ use std::path::Path; use std::sync::Arc; use std::time::Duration; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; -use types::light_client_update::CurrentSyncCommitteeProofLen; use types::*; /// On-disk database that stores finalized states efficiently. @@ -81,7 +80,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// LRU cache of replayed states. historic_state_cache: Mutex>>, /// Chain spec. - pub(crate) spec: ChainSpec, + pub(crate) spec: Arc, /// Logger. pub log: Logger, /// Mere vessel for E. @@ -193,7 +192,7 @@ pub enum HotColdDBError { impl HotColdDB, MemoryStore> { pub fn open_ephemeral( config: StoreConfig, - spec: ChainSpec, + spec: Arc, log: Logger, ) -> Result, MemoryStore>, Error> { Self::verify_config(&config)?; @@ -230,7 +229,7 @@ impl HotColdDB, BeaconNodeBackend> { blobs_db_path: &Path, migrate_schema: impl FnOnce(Arc, SchemaVersion, SchemaVersion) -> Result<(), Error>, config: StoreConfig, - spec: ChainSpec, + spec: Arc, log: Logger, ) -> Result, Error> { Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; @@ -628,15 +627,14 @@ impl, Cold: ItemStore> HotColdDB pub fn get_sync_committee_branch( &self, block_root: &Hash256, - ) -> Result>, Error> { + ) -> Result, Error> { let column = DBColumn::SyncCommitteeBranch; if let Some(bytes) = self .hot_db .get_bytes(column.into(), &block_root.as_ssz_bytes())? { - let sync_committee_branch: FixedVector = - FixedVector::from_ssz_bytes(&bytes)?; + let sync_committee_branch = Vec::::from_ssz_bytes(&bytes)?; return Ok(Some(sync_committee_branch)); } @@ -664,7 +662,7 @@ impl, Cold: ItemStore> HotColdDB pub fn store_sync_committee_branch( &self, block_root: Hash256, - sync_committee_branch: &FixedVector, + sync_committee_branch: &MerkleProof, ) -> Result<(), Error> { let column = DBColumn::SyncCommitteeBranch; self.hot_db.put_bytes( @@ -1184,9 +1182,18 @@ impl, Cold: ItemStore> HotColdDB } StoreOp::DeleteState(state_root, slot) => { - let column_name: &str = DBColumn::BeaconStateSummary.into(); + // Delete the hot state summary. + let summary_column_name: &str = DBColumn::BeaconStateSummary.into(); key_value_batch.push(KeyValueStoreOp::DeleteKey( - column_name.to_owned(), + summary_column_name.to_owned(), + state_root.as_slice().to_vec(), + )); + + // Delete the state temporary flag (if any). Temporary flags are commonly + // created by the state advance routine. + let temporary_column_name: &str = DBColumn::BeaconStateSummary.into(); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + temporary_column_name.into(), state_root.as_slice().to_vec(), )); @@ -1903,7 +1910,7 @@ impl, Cold: ItemStore> HotColdDB } /// Get a reference to the `ChainSpec` used by the database. - pub fn get_chain_spec(&self) -> &ChainSpec { + pub fn get_chain_spec(&self) -> &Arc { &self.spec } diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index a7e0c09ed1f..71dc96d99e9 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -385,6 +385,7 @@ mod test { use beacon_chain::test_utils::BeaconChainHarness; use beacon_chain::types::{ChainSpec, MainnetEthSpec}; use sloggers::{null::NullLoggerBuilder, Build}; + use std::sync::Arc; use types::FixedBytesExtended; fn get_state() -> BeaconState { @@ -401,7 +402,8 @@ mod test { fn block_root_iter() { let log = NullLoggerBuilder.build().unwrap(); let store = - HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(); + HotColdDB::open_ephemeral(Config::default(), Arc::new(ChainSpec::minimal()), log) + .unwrap(); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); @@ -449,7 +451,8 @@ mod test { fn state_root_iter() { let log = NullLoggerBuilder.build().unwrap(); let store = - HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(); + HotColdDB::open_ephemeral(Config::default(), Arc::new(ChainSpec::minimal()), log) + .unwrap(); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 2b70603068d..f278302e8cb 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::{set_gauge, try_create_int_gauge, *}; +pub use metrics::{set_gauge, try_create_int_gauge, *}; use directory::size_of_dir; use std::path::Path; diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 7fb0b2f4e70..86c97af0da1 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -54,13 +54,13 @@ * [Merge Migration](./merge-migration.md) * [Late Block Re-orgs](./late-block-re-orgs.md) * [Blobs](./advanced-blobs.md) -* [Built-In Documentation](./help_general.md) +* [Command Line Reference (CLI)](./help_general.md) * [Beacon Node](./help_bn.md) * [Validator Client](./help_vc.md) * [Validator Manager](./help_vm.md) * [Create](./help_vm_create.md) * [Import](./help_vm_import.md) - * [Move](./help_vm_move.md) + * [Move](./help_vm_move.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 6cb66859128..80eba7a0590 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -230,7 +230,6 @@ Example Response Body "TERMINAL_TOTAL_DIFFICULTY": "10790000", "TERMINAL_BLOCK_HASH": "0x0000000000000000000000000000000000000000000000000000000000000000", "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH": "18446744073709551615", - "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY": "128", "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT": "16384", "MIN_GENESIS_TIME": "1614588812", "GENESIS_FORK_VERSION": "0x00001020", @@ -263,7 +262,6 @@ Example Response Body "HYSTERESIS_QUOTIENT": "4", "HYSTERESIS_DOWNWARD_MULTIPLIER": "1", "HYSTERESIS_UPWARD_MULTIPLIER": "5", - "SAFE_SLOTS_TO_UPDATE_JUSTIFIED": "8", "MIN_DEPOSIT_AMOUNT": "1000000000", "MAX_EFFECTIVE_BALANCE": "32000000000", "EFFECTIVE_BALANCE_INCREMENT": "1000000000", diff --git a/book/src/cli.md b/book/src/cli.md deleted file mode 100644 index f9e7df07488..00000000000 --- a/book/src/cli.md +++ /dev/null @@ -1,55 +0,0 @@ -# Command-Line Interface (CLI) - -The `lighthouse` binary provides all necessary Ethereum consensus client functionality. It -has two primary sub-commands: - -- `$ lighthouse beacon_node`: the largest and most fundamental component which connects to - the p2p network, processes messages and tracks the head of the beacon - chain. -- `$ lighthouse validator_client`: a lightweight but important component which loads a validators private - key and signs messages using a `beacon_node` as a source-of-truth. - -There are also some ancillary binaries like `lcli` and `account_manager`, but -these are primarily for testing. - -> **Note:** documentation sometimes uses `$ lighthouse bn` and `$ lighthouse -> vc` instead of the long-form `beacon_node` and `validator_client`. These -> commands are valid on the CLI too. - -## Installation - -Typical users may install `lighthouse` to `CARGO_HOME` with `cargo install ---path lighthouse` from the root of the repository. See ["Configuring the -`PATH` environment variable"](https://www.rust-lang.org/tools/install) for more -information. - -For developers, we recommend building Lighthouse using the `$ cargo build --release ---bin lighthouse` command and executing binaries from the -`/target/release` directory. This is more ergonomic when -modifying and rebuilding regularly. - -## Documentation - -Each binary supports the `--help` flag, this is the best source of -documentation. - -```bash -lighthouse beacon_node --help -``` - -```bash -lighthouse validator_client --help -``` - -## Creating a new database/testnet - -Lighthouse should run out-of-the box and connect to the current testnet -maintained by Sigma Prime. - -However, for developers, testnets can be created by following the instructions -outlined in [testnets](./testnets.md). The steps listed here will create a -local database specified to a new testnet. - -## Resuming from an existing database - -Once a database/testnet has been created, it can be resumed by running `$ lighthouse bn`. diff --git a/book/src/developers.md b/book/src/developers.md index 244c935ac2f..d90708c5a97 100644 --- a/book/src/developers.md +++ b/book/src/developers.md @@ -20,6 +20,7 @@ Lighthouse currently uses the following ENR fields: ### Lighthouse Custom Fields Lighthouse is currently using the following custom ENR fields. + | Field | Description | | ---- | ---- | | `quic` | The UDP port on which the QUIC transport is listening on IPv4 | diff --git a/book/src/help_bn.md b/book/src/help_bn.md index d53e56eaf31..79377d73f0f 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -295,9 +295,6 @@ Options: which don't improve their payload after the first call, and high values are useful for ensuring the EL is given ample notice. Default: 1/3 of a slot. - --progressive-balances - Deprecated. This optimisation is now the default and cannot be - disabled. --proposer-reorg-cutoff Maximum delay after the start of the slot at which to propose a reorging block. Lower values can prevent failed reorgs by ensuring the @@ -332,14 +329,6 @@ Options: --quic-port6 The UDP port that quic will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to `port6` + 1 - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --self-limiter-protocols Enables the outbound rate limiter (requests made by this node).Rate limit quotas per protocol can be set in the form of @@ -393,27 +382,6 @@ Options: database. --target-peers The target number of peers. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --trusted-peers One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system. @@ -448,8 +416,6 @@ Flags: incompatible with data availability checks. Checkpoint syncing is the preferred method for syncing a node. Only use this flag when testing. DO NOT use on mainnet! - --always-prefer-builder-payload - This flag is deprecated and has no effect. --always-prepare-payload Send payload attributes with every fork choice update. This is intended for use by block builders, relays and developers. You should @@ -473,8 +439,6 @@ Flags: Explicitly disables syncing of deposit logs from the execution node. This overrides any previous option that depends on it. Useful if you intend to run a non-validating beacon node. - --disable-duplicate-warn-logs - This flag is deprecated and has no effect. --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. This @@ -482,8 +446,6 @@ Flags: boot. --disable-inbound-rate-limiter Disables the inbound rate limiter (requests received by this node). - --disable-lock-timeouts - This flag is deprecated and has no effect. --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning diff --git a/book/src/help_general.md b/book/src/help_general.md index 1c2d1266d08..996b048d10a 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -1,4 +1,4 @@ -# Lighthouse General Commands +# Lighthouse CLI Reference ``` Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a @@ -77,39 +77,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir

Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. -V, --version Print version @@ -122,9 +93,6 @@ Flags: debugging specific memory allocation issues. -h, --help Prints help information - -l - DEPRECATED Enables environment logging giving access to sub-protocol - logs such as discv5 and libp2p --log-color Force outputting colors when emitting logs to the terminal. --logfile-compress diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 7f2cfab8e3a..2cfbfbc857a 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -118,14 +118,6 @@ Options: specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes. - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --secrets-dir The directory which contains the password to unlock the validator voting keypairs. Each password should be contained in a file where the @@ -140,27 +132,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validator-registration-batch-size Defines the number of validators per validator/register_validator request sent to the BN. This value can be reduced to avoid timeouts @@ -177,6 +148,22 @@ Options: Default is unlimited. Flags: + --beacon-nodes-sync-tolerances + A comma-separated list of 3 values which sets the size of each sync + distance range when determining the health of each connected beacon + node. The first value determines the `Synced` range. If a connected + beacon node is synced to within this number of slots it is considered + 'Synced'. The second value determines the `Small` sync distance range. + This range starts immediately after the `Synced` range. The third + value determines the `Medium` sync distance range. This range starts + immediately after the `Small` range. Any sync distance value beyond + that is considered `Large`. For example, a value of `8,8,48` would + have ranges like the following: `Synced`: 0..=8 `Small`: 9..=16 + `Medium`: 17..=64 `Large`: 65.. These values are used to determine + what ordering beacon node fallbacks are used in. Generally, `Synced` + nodes are preferred over `Small` and so on. Nodes in the `Synced` + range will tie-break based on their ordering in `--beacon-nodes`. This + ensures the primary beacon node is prioritised. [default: 8,8,48] --builder-proposals If this flag is set, Lighthouse will query the Beacon Node for only block headers during proposals and will sign over headers. Useful for @@ -194,12 +181,6 @@ Flags: If present, do not configure the system allocator. Providing this flag will generally increase memory usage, it should only be provided when debugging specific memory allocation issues. - --disable-run-on-all - DEPRECATED. Use --broadcast. By default, Lighthouse publishes - attestation, sync committee subscriptions and proposer preparation - messages to all beacon nodes provided in the `--beacon-nodes flag`. - This option changes that behaviour such that these api calls only go - out to the first available and synced beacon node --disable-slashing-protection-web3signer Disable Lighthouse's slashing protection for all web3signer keys. This can reduce the I/O burden on the VC but is only safe if slashing @@ -264,8 +245,6 @@ Flags: --prefer-builder-proposals If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. - --produce-block-v3 - This flag is deprecated and is no longer in use. --stdin-inputs If present, read all user inputs from stdin instead of tty. --unencrypted-http-transport diff --git a/book/src/help_vm.md b/book/src/help_vm.md index f787985b215..9b6c5d4f3bd 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -69,39 +69,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. Flags: --disable-log-timestamp diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 1803bb534c6..2743117eae2 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -91,14 +91,6 @@ Options: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --suggested-fee-recipient All created validators will use this value for the suggested fee recipient. Omit this flag to use the default value from the VC. @@ -106,34 +98,15 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. Flags: --disable-deposits When provided don't generate the deposits JSON file that is commonly used for submitting validator deposits via a web UI. Using this flag will save several seconds per validator if the user has an alternate - strategy for submitting deposits. + strategy for submitting deposits. If used, the + --force-bls-withdrawal-credentials is also required to ensure users + are aware that an --eth1-withdrawal-address is not set. --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 0883139ad21..b4999d3fe31 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -50,39 +50,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validators-file The path to a JSON file containing a list of validators to be imported to the validator client. This file is usually named "validators.json". diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index 12dd1e91402..99eee32c782 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -74,14 +74,6 @@ Options: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --src-vc-token The file containing a token required by the source validator client. --src-vc-url @@ -95,27 +87,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validators The validators to be moved. Either a list of 0x-prefixed validator pubkeys or the keyword "all". diff --git a/book/src/redundancy.md b/book/src/redundancy.md index ee685a17cf7..daf0eb4a5b4 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -74,8 +74,7 @@ lighthouse bn \ Prior to v3.2.0 fallback beacon nodes also required the `--subscribe-all-subnets` and `--import-all-attestations` flags. These flags are no longer required as the validator client will now broadcast subscriptions to all connected beacon nodes by default. This broadcast behaviour -can be disabled using the `--broadcast none` flag for `lighthouse vc` (or `--disable-run-on-all` -[deprecated]). +can be disabled using the `--broadcast none` flag for `lighthouse vc`. ### Broadcast modes diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index cba7399c9bf..a4b5f4dc1c4 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,6 +1,5 @@ //! A helper library for parsing values from `clap::ArgMatches`. -use alloy_primitives::U256 as Uint256; use clap::builder::styling::*; use clap::ArgMatches; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; @@ -30,38 +29,9 @@ pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result(cli_args, "terminal-total-difficulty-override")? - { - let stripped = string.replace(',', ""); - let terminal_total_difficulty = Uint256::from_str(&stripped).map_err(|e| { - format!( - "Could not parse --terminal-total-difficulty-override as decimal value: {:?}", - e - ) - })?; - - eth2_network_config.config.terminal_total_difficulty = terminal_total_difficulty; - } - - if let Some(hash) = parse_optional(cli_args, "terminal-block-hash-override")? { - eth2_network_config.config.terminal_block_hash = hash; - } - - if let Some(epoch) = parse_optional(cli_args, "terminal-block-hash-epoch-override")? { - eth2_network_config - .config - .terminal_block_hash_activation_epoch = epoch; - } - - if let Some(slots) = parse_optional(cli_args, "safe-slots-to-import-optimistically")? { - eth2_network_config - .config - .safe_slots_to_import_optimistically = slots; - } - Ok(eth2_network_config) } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 10b4755ba26..d23a4068f1b 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -29,6 +29,7 @@ store = { workspace = true } slashing_protection = { workspace = true } mediatype = "0.19.13" pretty_reqwest_error = { workspace = true } +derivative = { workspace = true } [dev-dependencies] tokio = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 2805d36b90c..522c6414eae 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -16,6 +16,7 @@ pub mod types; use self::mixin::{RequestAccept, ResponseOptional}; use self::types::{Error as ResponseError, *}; +use derivative::Derivative; use futures::Stream; use futures_util::StreamExt; use lighthouse_network::PeerId; @@ -117,7 +118,7 @@ impl fmt::Display for Error { /// A struct to define a variety of different timeouts for different validator tasks to ensure /// proper fallback behaviour. -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct Timeouts { pub attestation: Duration, pub attester_duties: Duration, @@ -154,13 +155,17 @@ impl Timeouts { /// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a /// Lighthouse Beacon Node HTTP server (`http_api`). -#[derive(Clone)] +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq)] pub struct BeaconNodeHttpClient { + #[derivative(PartialEq = "ignore")] client: reqwest::Client, server: SensitiveUrl, timeouts: Timeouts, } +impl Eq for BeaconNodeHttpClient {} + impl fmt::Display for BeaconNodeHttpClient { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.server.fmt(f) diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 3925d2deda8..c187399ebd7 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1678,27 +1678,23 @@ impl FullBlockContents { bytes: &[u8], fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - .map(|block| FullBlockContents::Block(block)) - } - ForkName::Deneb | ForkName::Electra => { - let mut builder = ssz::SszDecoderBuilder::new(bytes); + if fork_name.deneb_enabled() { + let mut builder = ssz::SszDecoderBuilder::new(bytes); - builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; - builder.register_type::>()?; + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + builder.register_type::>()?; - let mut decoder = builder.build()?; - let block = decoder.decode_next_with(|bytes| { - BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - })?; - let kzg_proofs = decoder.decode_next()?; - let blobs = decoder.decode_next()?; + let mut decoder = builder.build()?; + let block = decoder + .decode_next_with(|bytes| BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name))?; + let kzg_proofs = decoder.decode_next()?; + let blobs = decoder.decode_next()?; - Ok(FullBlockContents::new(block, Some((kzg_proofs, blobs)))) - } + Ok(FullBlockContents::new(block, Some((kzg_proofs, blobs)))) + } else { + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + .map(|block| FullBlockContents::Block(block)) } } @@ -1738,15 +1734,14 @@ impl ForkVersionDeserialize for FullBlockContents { value: serde_json::value::Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - Ok(FullBlockContents::Block( - BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, - )) - } - ForkName::Deneb | ForkName::Electra => Ok(FullBlockContents::BlockContents( + if fork_name.deneb_enabled() { + Ok(FullBlockContents::BlockContents( BlockContents::deserialize_by_fork::<'de, D>(value, fork_name)?, - )), + )) + } else { + Ok(FullBlockContents::Block( + BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, + )) } } } @@ -1838,28 +1833,25 @@ impl PublishBlockRequest { /// SSZ decode with fork variant determined by `fork_name`. pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { + if fork_name.deneb_enabled() { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + builder.register_type::>()?; + + let mut decoder = builder.build()?; + let block = decoder.decode_next_with(|bytes| { SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - .map(|block| PublishBlockRequest::Block(Arc::new(block))) - } - ForkName::Deneb | ForkName::Electra => { - let mut builder = ssz::SszDecoderBuilder::new(bytes); - builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; - builder.register_type::>()?; - - let mut decoder = builder.build()?; - let block = decoder.decode_next_with(|bytes| { - SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - })?; - let kzg_proofs = decoder.decode_next()?; - let blobs = decoder.decode_next()?; - Ok(PublishBlockRequest::new( - Arc::new(block), - Some((kzg_proofs, blobs)), - )) - } + })?; + let kzg_proofs = decoder.decode_next()?; + let blobs = decoder.decode_next()?; + Ok(PublishBlockRequest::new( + Arc::new(block), + Some((kzg_proofs, blobs)), + )) + } else { + SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + .map(|block| PublishBlockRequest::Block(Arc::new(block))) } } @@ -1883,42 +1875,6 @@ impl PublishBlockRequest { } } -/// Converting from a `SignedBlindedBeaconBlock` into a full `SignedBlockContents`. -pub fn into_full_block_and_blobs( - blinded_block: SignedBlindedBeaconBlock, - maybe_full_payload_contents: Option>, -) -> Result, String> { - match maybe_full_payload_contents { - None => { - let signed_block = blinded_block - .try_into_full_block(None) - .ok_or("Failed to build full block with payload".to_string())?; - Ok(PublishBlockRequest::new(Arc::new(signed_block), None)) - } - // This variant implies a pre-deneb block - Some(FullPayloadContents::Payload(execution_payload)) => { - let signed_block = blinded_block - .try_into_full_block(Some(execution_payload)) - .ok_or("Failed to build full block with payload".to_string())?; - Ok(PublishBlockRequest::new(Arc::new(signed_block), None)) - } - // This variant implies a post-deneb block - Some(FullPayloadContents::PayloadAndBlobs(payload_and_blobs)) => { - let signed_block = blinded_block - .try_into_full_block(Some(payload_and_blobs.execution_payload)) - .ok_or("Failed to build full block with payload".to_string())?; - - Ok(PublishBlockRequest::new( - Arc::new(signed_block), - Some(( - payload_and_blobs.blobs_bundle.proofs, - payload_and_blobs.blobs_bundle.blobs, - )), - )) - } - } -} - impl TryFrom>> for PublishBlockRequest { type Error = &'static str; fn try_from(block: Arc>) -> Result { diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 9104db8f67d..cd5d7a8bd4d 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -7,6 +7,7 @@ use std::env; use std::path::PathBuf; +use std::sync::Arc; use types::{ChainSpec, EthSpecId}; pub use paste::paste; @@ -44,15 +45,12 @@ const CHIADO_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url #[derive(Debug, Clone)] pub struct Eth2Config { pub eth_spec_id: EthSpecId, - pub spec: ChainSpec, + pub spec: Arc, } impl Default for Eth2Config { fn default() -> Self { - Self { - eth_spec_id: EthSpecId::Minimal, - spec: ChainSpec::minimal(), - } + Self::minimal() } } @@ -60,21 +58,21 @@ impl Eth2Config { pub fn mainnet() -> Self { Self { eth_spec_id: EthSpecId::Mainnet, - spec: ChainSpec::mainnet(), + spec: Arc::new(ChainSpec::mainnet()), } } pub fn minimal() -> Self { Self { eth_spec_id: EthSpecId::Minimal, - spec: ChainSpec::minimal(), + spec: Arc::new(ChainSpec::minimal()), } } pub fn gnosis() -> Self { Self { eth_spec_id: EthSpecId::Gnosis, - spec: ChainSpec::gnosis(), + spec: Arc::new(ChainSpec::gnosis()), } } } diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 4b34405e5b3..09cf2072d2f 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -28,3 +28,4 @@ sensitive_url = { workspace = true } slog = { workspace = true } logging = { workspace = true } bytes = { workspace = true } +kzg = { workspace = true } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 74fca4c5010..1eca01bbeef 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -140,4 +140,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 07bd21b35c2..500555a2694 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -123,4 +123,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 67f1e5b6831..d67d77d3bea 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -127,4 +127,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index acf4d83f323..18591fecdcd 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -149,4 +149,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 8b84d870103..b08a6180bf0 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -123,4 +123,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 472ac55ca09..3d0ffc5b9e8 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -14,6 +14,7 @@ use bytes::Bytes; use discv5::enr::{CombinedKey, Enr}; use eth2_config::{instantiate_hardcoded_nets, HardcodedNet}; +use kzg::trusted_setup::get_trusted_setup; use pretty_reqwest_error::PrettyReqwestError; use reqwest::{Client, Error}; use sensitive_url::SensitiveUrl; @@ -24,7 +25,7 @@ use std::io::{Read, Write}; use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; -use types::{BeaconState, ChainSpec, Config, Epoch, EthSpec, EthSpecId, Hash256}; +use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId, Hash256}; use url::Url; pub use eth2_config::GenesisStateSource; @@ -43,26 +44,6 @@ instantiate_hardcoded_nets!(eth2_config); pub const DEFAULT_HARDCODED_NETWORK: &str = "mainnet"; -/// Contains the bytes from the trusted setup json. -/// The mainnet trusted setup is also reused in testnets. -/// -/// This is done to ensure that testnets also inherit the high security and -/// randomness of the mainnet kzg trusted setup ceremony. -/// -/// Note: The trusted setup for both mainnet and minimal presets are the same. -pub const TRUSTED_SETUP_BYTES: &[u8] = - include_bytes!("../built_in_network_configs/trusted_setup.json"); - -/// Returns `Some(TrustedSetup)` if the deneb fork epoch is set and `None` otherwise. -/// -/// Returns an error if the trusted setup parsing failed. -fn get_trusted_setup_from_config(config: &Config) -> Option> { - config - .deneb_fork_epoch - .filter(|epoch| epoch.value != Epoch::max_value()) - .map(|_| TRUSTED_SETUP_BYTES.to_vec()) -} - /// A simple slice-or-vec enum to avoid cloning the beacon state bytes in the /// binary whilst also supporting loading them from a file at runtime. #[derive(Clone, PartialEq, Debug)] @@ -104,7 +85,7 @@ pub struct Eth2NetworkConfig { pub genesis_state_source: GenesisStateSource, pub genesis_state_bytes: Option, pub config: Config, - pub kzg_trusted_setup: Option>, + pub kzg_trusted_setup: Vec, } impl Eth2NetworkConfig { @@ -122,7 +103,7 @@ impl Eth2NetworkConfig { fn from_hardcoded_net(net: &HardcodedNet) -> Result { let config: Config = serde_yaml::from_reader(net.config) .map_err(|e| format!("Unable to parse yaml config: {:?}", e))?; - let kzg_trusted_setup = get_trusted_setup_from_config(&config); + let kzg_trusted_setup = get_trusted_setup(); Ok(Self { deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block) .map_err(|e| format!("Unable to parse deploy block: {:?}", e))?, @@ -359,7 +340,7 @@ impl Eth2NetworkConfig { (None, GenesisStateSource::Unknown) }; - let kzg_trusted_setup = get_trusted_setup_from_config(&config); + let kzg_trusted_setup = get_trusted_setup(); Ok(Self { deposit_contract_deploy_block, @@ -577,7 +558,7 @@ mod tests { GenesisStateSource::Unknown }; // With Deneb enabled by default we must set a trusted setup here. - let kzg_trusted_setup = get_trusted_setup_from_config(&config).unwrap(); + let kzg_trusted_setup = get_trusted_setup(); let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, @@ -588,7 +569,7 @@ mod tests { .map(Encode::as_ssz_bytes) .map(Into::into), config, - kzg_trusted_setup: Some(kzg_trusted_setup), + kzg_trusted_setup, }; testnet diff --git a/common/lighthouse_metrics/Cargo.toml b/common/lighthouse_metrics/Cargo.toml deleted file mode 100644 index fe966f4a9c6..00000000000 --- a/common/lighthouse_metrics/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "lighthouse_metrics" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -prometheus = "0.13.0" diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index cac6d073f25..73cbdf44d42 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -9,7 +9,7 @@ test_logger = [] # Print log output to stderr when running tests instead of drop [dependencies] chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index a4a1acabd48..4bb37392984 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -1,6 +1,4 @@ -use lighthouse_metrics::{ - inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult, -}; +use metrics::{inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult}; use slog::Logger; use slog_term::Decorator; use std::io::{Result, Write}; @@ -217,6 +215,19 @@ impl TimeLatch { } pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { + let mut tracing_log_path = PathBuf::new(); + + // Ensure that `tracing_log_path` only contains directories. + for p in base_tracing_log_path.iter() { + tracing_log_path = tracing_log_path.join(p); + if let Ok(metadata) = tracing_log_path.metadata() { + if !metadata.is_dir() { + tracing_log_path.pop(); + break; + } + } + } + let filter_layer = match tracing_subscriber::EnvFilter::try_from_default_env() .or_else(|_| tracing_subscriber::EnvFilter::try_new("warn")) { @@ -232,7 +243,7 @@ pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { .max_log_files(2) .filename_prefix("libp2p") .filename_suffix("log") - .build(base_tracing_log_path.clone()) + .build(tracing_log_path.clone()) else { eprintln!("Failed to initialize libp2p rolling file appender"); return; @@ -243,7 +254,7 @@ pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { .max_log_files(2) .filename_prefix("discv5") .filename_suffix("log") - .build(base_tracing_log_path.clone()) + .build(tracing_log_path) else { eprintln!("Failed to initialize discv5 rolling file appender"); return; diff --git a/common/logging/src/tracing_metrics_layer.rs b/common/logging/src/tracing_metrics_layer.rs index 89a1f4d1f16..5d272adbf59 100644 --- a/common/logging/src/tracing_metrics_layer.rs +++ b/common/logging/src/tracing_metrics_layer.rs @@ -1,6 +1,5 @@ //! Exposes [`MetricsLayer`]: A tracing layer that registers metrics of logging events. -use lighthouse_metrics as metrics; use std::sync::LazyLock; use tracing_log::NormalizeEvent; diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index b91e68c518e..79a07eed166 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } libc = "0.2.79" parking_lot = { workspace = true } tikv-jemalloc-ctl = { version = "0.6.0", optional = true, features = ["stats"] } diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 41d8d28291d..30313d06723 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -4,7 +4,7 @@ //! https://www.gnu.org/software/libc/manual/html_node/The-GNU-Allocator.html //! //! These functions are generally only suitable for Linux systems. -use lighthouse_metrics::*; +use metrics::*; use parking_lot::Mutex; use std::env; use std::os::raw::c_int; @@ -38,60 +38,57 @@ pub static GLOBAL_LOCK: LazyLock> = LazyLock::new(|| <_>::default()); // Metrics for the malloc. For more information, see: // // https://man7.org/linux/man-pages/man3/mallinfo.3.html -pub static MALLINFO_ARENA: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_ARENA: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_arena", "The total amount of memory allocated by means other than mmap(2). \ This figure includes both in-use blocks and blocks on the free list.", ) }); -pub static MALLINFO_ORDBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_ORDBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_ordblks", "The number of ordinary (i.e., non-fastbin) free blocks.", ) }); -pub static MALLINFO_SMBLKS: LazyLock> = +pub static MALLINFO_SMBLKS: LazyLock> = LazyLock::new(|| try_create_int_gauge("mallinfo_smblks", "The number of fastbin free blocks.")); -pub static MALLINFO_HBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_HBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_hblks", "The number of blocks currently allocated using mmap.", ) }); -pub static MALLINFO_HBLKHD: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_HBLKHD: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_hblkhd", "The number of bytes in blocks currently allocated using mmap.", ) }); -pub static MALLINFO_FSMBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_FSMBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_fsmblks", "The total number of bytes in fastbin free blocks.", ) }); -pub static MALLINFO_UORDBLKS: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_uordblks", - "The total number of bytes used by in-use allocations.", - ) - }); -pub static MALLINFO_FORDBLKS: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_fordblks", - "The total number of bytes in free blocks.", - ) - }); -pub static MALLINFO_KEEPCOST: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_keepcost", - "The total amount of releasable free space at the top of the heap..", - ) - }); +pub static MALLINFO_UORDBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_uordblks", + "The total number of bytes used by in-use allocations.", + ) +}); +pub static MALLINFO_FORDBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_fordblks", + "The total number of bytes in free blocks.", + ) +}); +pub static MALLINFO_KEEPCOST: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_keepcost", + "The total amount of releasable free space at the top of the heap..", + ) +}); /// Calls `mallinfo` and updates Prometheus metrics with the results. pub fn scrape_mallinfo_metrics() { diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs index a392a74e8f1..0e2e00cb0ef 100644 --- a/common/malloc_utils/src/jemalloc.rs +++ b/common/malloc_utils/src/jemalloc.rs @@ -7,7 +7,7 @@ //! //! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. //! B) `_RJEM_MALLOC_CONF` at runtime. -use lighthouse_metrics::{set_gauge, try_create_int_gauge, IntGauge}; +use metrics::{set_gauge, try_create_int_gauge, IntGauge}; use std::sync::LazyLock; use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; @@ -15,22 +15,22 @@ use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; // Metrics for jemalloc. -pub static NUM_ARENAS: LazyLock> = +pub static NUM_ARENAS: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_num_arenas", "The number of arenas in use")); -pub static BYTES_ALLOCATED: LazyLock> = LazyLock::new(|| { +pub static BYTES_ALLOCATED: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_allocated", "Equivalent to stats.allocated") }); -pub static BYTES_ACTIVE: LazyLock> = +pub static BYTES_ACTIVE: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_bytes_active", "Equivalent to stats.active")); -pub static BYTES_MAPPED: LazyLock> = +pub static BYTES_MAPPED: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_bytes_mapped", "Equivalent to stats.mapped")); -pub static BYTES_METADATA: LazyLock> = LazyLock::new(|| { +pub static BYTES_METADATA: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_metadata", "Equivalent to stats.metadata") }); -pub static BYTES_RESIDENT: LazyLock> = LazyLock::new(|| { +pub static BYTES_RESIDENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_resident", "Equivalent to stats.resident") }); -pub static BYTES_RETAINED: LazyLock> = LazyLock::new(|| { +pub static BYTES_RETAINED: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_retained", "Equivalent to stats.retained") }); diff --git a/common/metrics/Cargo.toml b/common/metrics/Cargo.toml new file mode 100644 index 00000000000..a7f4f4b967e --- /dev/null +++ b/common/metrics/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "metrics" +version = "0.2.0" +edition = { workspace = true } + +[dependencies] +prometheus = { workspace = true } diff --git a/common/lighthouse_metrics/src/lib.rs b/common/metrics/src/lib.rs similarity index 99% rename from common/lighthouse_metrics/src/lib.rs rename to common/metrics/src/lib.rs index 2a1e99defaf..1f2ac71aea5 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/metrics/src/lib.rs @@ -20,10 +20,10 @@ //! ## Example //! //! ```rust -//! use lighthouse_metrics::*; +//! use metrics::*; //! use std::sync::LazyLock; //! -//! // These metrics are "magically" linked to the global registry defined in `lighthouse_metrics`. +//! // These metrics are "magically" linked to the global registry defined in `metrics`. //! pub static RUN_COUNT: LazyLock> = LazyLock::new(|| try_create_int_counter( //! "runs_total", //! "Total number of runs" diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 55f18edd526..2da32c307ee 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -14,7 +14,7 @@ eth2 = { workspace = true } serde_json = { workspace = true } serde = { workspace = true } lighthouse_version = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } slog = { workspace = true } store = { workspace = true } regex = { workspace = true } diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index e157d82c11b..2f6c820f562 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -1,5 +1,5 @@ use super::types::{BeaconProcessMetrics, ValidatorProcessMetrics}; -use lighthouse_metrics::{MetricFamily, MetricType}; +use metrics::{MetricFamily, MetricType}; use serde_json::json; use std::collections::HashMap; use std::path::Path; @@ -155,7 +155,7 @@ fn get_value(mf: &MetricFamily) -> Option { /// Collects all metrics and returns a `serde_json::Value` object with the required metrics /// from the metrics hashmap. pub fn gather_metrics(metrics_map: &HashMap) -> Option { - let metric_families = lighthouse_metrics::gather(); + let metric_families = metrics::gather(); let mut res = serde_json::Map::with_capacity(metrics_map.len()); for mf in metric_families.iter() { let metric_name = mf.get_name(); diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index 13bcf006a9e..c2f330cd507 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -6,5 +6,5 @@ edition = { workspace = true } [dependencies] types = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } diff --git a/common/slot_clock/src/metrics.rs b/common/slot_clock/src/metrics.rs index 24023c9ed75..ec95e90d4af 100644 --- a/common/slot_clock/src/metrics.rs +++ b/common/slot_clock/src/metrics.rs @@ -1,5 +1,5 @@ use crate::SlotClock; -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; use types::{EthSpec, Slot}; diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 7928d4a3c97..26bcd7b339c 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -4,11 +4,17 @@ version = "0.1.0" authors = ["Sigma Prime "] edition = { workspace = true } +[features] +default = ["slog"] +slog = ["dep:slog", "dep:sloggers", "dep:logging"] +tracing = ["dep:tracing"] + [dependencies] async-channel = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } +slog = { workspace = true, optional = true } futures = { workspace = true } -lighthouse_metrics = { workspace = true } -sloggers = { workspace = true } -logging = { workspace = true } +metrics = { workspace = true } +sloggers = { workspace = true, optional = true } +logging = { workspace = true, optional = true } +tracing = { workspace = true, optional = true } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index d6edfd3121c..92ddb7c0be2 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -1,14 +1,20 @@ mod metrics; +#[cfg(not(feature = "tracing"))] pub mod test_utils; use futures::channel::mpsc::Sender; use futures::prelude::*; -use slog::{debug, o, trace}; use std::sync::Weak; use tokio::runtime::{Handle, Runtime}; pub use tokio::task::JoinHandle; +// Set up logging framework +#[cfg(not(feature = "tracing"))] +use slog::{debug, o}; +#[cfg(feature = "tracing")] +use tracing::debug; + /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { @@ -79,7 +85,7 @@ pub struct TaskExecutor { /// /// The task must provide a reason for shutting down. signal_tx: Sender, - + #[cfg(not(feature = "tracing"))] log: slog::Logger, } @@ -94,18 +100,20 @@ impl TaskExecutor { pub fn new>( handle: T, exit: async_channel::Receiver<()>, - log: slog::Logger, + #[cfg(not(feature = "tracing"))] log: slog::Logger, signal_tx: Sender, ) -> Self { Self { handle_provider: handle.into(), exit, signal_tx, + #[cfg(not(feature = "tracing"))] log, } } /// Clones the task executor adding a service name. + #[cfg(not(feature = "tracing"))] pub fn clone_with_name(&self, service_name: String) -> Self { TaskExecutor { handle_provider: self.handle_provider.clone(), @@ -115,6 +123,16 @@ impl TaskExecutor { } } + /// Clones the task executor adding a service name. + #[cfg(feature = "tracing")] + pub fn clone(&self) -> Self { + TaskExecutor { + handle_provider: self.handle_provider.clone(), + exit: self.exit.clone(), + signal_tx: self.signal_tx.clone(), + } + } + /// A convenience wrapper for `Self::spawn` which ignores a `Result` as long as both `Ok`/`Err` /// are of type `()`. /// @@ -150,10 +168,13 @@ impl TaskExecutor { drop(timer); }); } else { + #[cfg(not(feature = "tracing"))] debug!( self.log, "Couldn't spawn monitor task. Runtime shutting down" - ) + ); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn monitor task. Runtime shutting down"); } } @@ -175,7 +196,7 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime. This function does not wrap the task in an `async-channel::Receiver` /// like [spawn](#method.spawn). /// The caller of this function is responsible for wrapping up the task with an `async-channel::Receiver` to - /// ensure that the task gets canceled appropriately. + /// ensure that the task gets cancelled appropriately. /// This function generates prometheus metrics on number of tasks and task duration. /// /// This is useful in cases where the future to be spawned needs to do additional cleanup work when @@ -197,7 +218,10 @@ impl TaskExecutor { if let Some(handle) = self.handle() { handle.spawn(future); } else { + #[cfg(not(feature = "tracing"))] debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); } } } @@ -215,7 +239,7 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime wrapped in an `async-channel::Receiver` returning an optional /// join handle to the future. - /// The task is canceled when the corresponding async-channel is dropped. + /// The task is cancelled when the corresponding async-channel is dropped. /// /// This function generates prometheus metrics on number of tasks and task duration. pub fn spawn_handle( @@ -224,6 +248,8 @@ impl TaskExecutor { name: &'static str, ) -> Option>> { let exit = self.exit(); + + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { @@ -234,12 +260,12 @@ impl TaskExecutor { Some(handle.spawn(async move { futures::pin_mut!(exit); let result = match future::select(Box::pin(task), exit).await { - future::Either::Left((value, _)) => { - trace!(log, "Async task completed"; "task" => name); - Some(value) - } + future::Either::Left((value, _)) => Some(value), future::Either::Right(_) => { + #[cfg(not(feature = "tracing"))] debug!(log, "Async task shutdown, exit received"; "task" => name); + #[cfg(feature = "tracing")] + debug!(task = name, "Async task shutdown, exit received"); None } }; @@ -247,7 +273,10 @@ impl TaskExecutor { result })) } else { - debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(not(feature = "tracing"))] + debug!(log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); None } } else { @@ -270,6 +299,7 @@ impl TaskExecutor { F: FnOnce() -> R + Send + 'static, R: Send + 'static, { + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); let timer = metrics::start_timer_vec(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]); @@ -278,19 +308,22 @@ impl TaskExecutor { let join_handle = if let Some(handle) = self.handle() { handle.spawn_blocking(task) } else { + #[cfg(not(feature = "tracing"))] debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); return None; }; let future = async move { let result = match join_handle.await { - Ok(result) => { - trace!(log, "Blocking task completed"; "task" => name); - Ok(result) - } - Err(e) => { - debug!(log, "Blocking task ended unexpectedly"; "error" => %e); - Err(e) + Ok(result) => Ok(result), + Err(error) => { + #[cfg(not(feature = "tracing"))] + debug!(log, "Blocking task ended unexpectedly"; "error" => %error); + #[cfg(feature = "tracing")] + debug!(%error, "Blocking task ended unexpectedly"); + Err(error) } }; drop(timer); @@ -321,32 +354,48 @@ impl TaskExecutor { ) -> Option { let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]); metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); let handle = self.handle()?; let exit = self.exit(); - + #[cfg(not(feature = "tracing"))] debug!( log, "Starting block_on task"; "name" => name ); + #[cfg(feature = "tracing")] + debug!(name, "Starting block_on task"); + handle.block_on(async { let output = tokio::select! { output = future => { + #[cfg(not(feature = "tracing"))] debug!( log, "Completed block_on task"; "name" => name ); + #[cfg(feature = "tracing")] + debug!( + name, + "Completed block_on task" + ); Some(output) }, _ = exit => { + #[cfg(not(feature = "tracing"))] debug!( log, "Cancelled block_on task"; "name" => name, ); + #[cfg(feature = "tracing")] + debug!( + name, + "Cancelled block_on task" + ); None } }; @@ -376,6 +425,7 @@ impl TaskExecutor { } /// Returns a reference to the logger. + #[cfg(not(feature = "tracing"))] pub fn log(&self) -> &slog::Logger { &self.log } diff --git a/common/task_executor/src/metrics.rs b/common/task_executor/src/metrics.rs index a40bfdf4e72..bd4d6a50b9e 100644 --- a/common/task_executor/src/metrics.rs +++ b/common/task_executor/src/metrics.rs @@ -1,5 +1,5 @@ /// Handles async task metrics -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static ASYNC_TASKS_COUNT: LazyLock> = LazyLock::new(|| { diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 84f5ce5f189..a9407c392d9 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -17,6 +17,6 @@ serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } headers = "0.3.2" -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } serde_array_query = "0.1.0" bytes = { workspace = true } diff --git a/common/warp_utils/src/metrics.rs b/common/warp_utils/src/metrics.rs index 505d2775833..fabcf936507 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/warp_utils/src/metrics.rs @@ -1,5 +1,5 @@ use eth2::lighthouse::{ProcessHealth, SystemHealth}; -use lighthouse_metrics::*; +use metrics::*; use std::sync::LazyLock; pub static PROCESS_NUM_THREADS: LazyLock> = LazyLock::new(|| { diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 4a4f6e9086a..b32e0aa6656 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -12,7 +12,7 @@ state_processing = { workspace = true } proto_array = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } slog = { workspace = true } [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index ca59a6adfb6..85704042df4 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1300,43 +1300,6 @@ where } } - /// Returns `Ok(false)` if a block is not viable to be imported optimistically. - /// - /// ## Notes - /// - /// Equivalent to the function with the same name in the optimistic sync specs: - /// - /// https://github.com/ethereum/consensus-specs/blob/dev/sync/optimistic.md#helpers - pub fn is_optimistic_candidate_block( - &self, - current_slot: Slot, - block_slot: Slot, - block_parent_root: &Hash256, - spec: &ChainSpec, - ) -> Result> { - // If the block is sufficiently old, import it. - if block_slot + spec.safe_slots_to_import_optimistically <= current_slot { - return Ok(true); - } - - // If the parent block has execution enabled, always import the block. - // - // See: - // - // https://github.com/ethereum/consensus-specs/pull/2844 - if self - .proto_array - .get_block(block_parent_root) - .map_or(false, |parent| { - parent.execution_status.is_execution_enabled() - }) - { - return Ok(true); - } - - Ok(false) - } - /// Return the current finalized checkpoint. pub fn finalized_checkpoint(&self) -> Checkpoint { *self.fc_store.finalized_checkpoint() diff --git a/consensus/fork_choice/src/metrics.rs b/consensus/fork_choice/src/metrics.rs index eb0dbf435e3..b5cda2f5871 100644 --- a/consensus/fork_choice/src/metrics.rs +++ b/consensus/fork_choice/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; use types::EthSpec; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index b1ef833be0f..29265e34e4d 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -55,7 +55,7 @@ impl ForkChoiceTest { // Run fork choice tests against the latest fork. let spec = ForkName::latest().make_genesis_spec(ChainSpec::default()); let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.into()) .chain_config(chain_config) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() @@ -256,36 +256,6 @@ impl ForkChoiceTest { self } - /// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - /// - /// If the chain is presently in an unsafe period, transition through it and the following safe - /// period. - /// - /// Note: the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` variable has been removed - /// from the fork choice spec in Q1 2023. We're still leaving references to - /// it in our tests because (a) it's easier and (b) it allows us to easily - /// test for the absence of that parameter. - pub fn move_to_next_unsafe_period(self) -> Self { - self.move_inside_safe_to_update() - .move_outside_safe_to_update() - } - - /// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - pub fn move_outside_safe_to_update(self) -> Self { - while is_safe_to_update(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - self.harness.advance_slot() - } - self - } - - /// Moves to the next slot that is *inside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - pub fn move_inside_safe_to_update(self) -> Self { - while !is_safe_to_update(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - self.harness.advance_slot() - } - self - } - /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts the block was applied successfully. @@ -516,10 +486,6 @@ impl ForkChoiceTest { } } -fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { - slot % E::slots_per_epoch() < spec.safe_slots_to_update_justified -} - #[test] fn justified_and_finalized_blocks() { let tester = ForkChoiceTest::new(); @@ -536,15 +502,13 @@ fn justified_and_finalized_blocks() { assert!(fork_choice.get_finalized_block().is_ok()); } -/// - The new justified checkpoint descends from the current. -/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` +/// - The new justified checkpoint descends from the current. Near genesis. #[tokio::test] -async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { +async fn justified_checkpoint_updates_with_descendent_first_justification() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .await .unwrap() - .move_inside_safe_to_update() .assert_justified_epoch(0) .apply_blocks(1) .await @@ -552,77 +516,29 @@ async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { } /// - The new justified checkpoint descends from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is **not** the first justification since genesis #[tokio::test] -async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { +async fn justified_checkpoint_updates_with_descendent() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2) .await .unwrap() - .move_outside_safe_to_update() .assert_justified_epoch(2) .apply_blocks(1) .await .assert_justified_epoch(3); } -/// - The new justified checkpoint descends from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - This is the first justification since genesis -#[tokio::test] -async fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .move_to_next_unsafe_period() - .assert_justified_epoch(0) - .apply_blocks(1) - .await - .assert_justified_epoch(2); -} - -/// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - Finalized epoch has **not** increased. -#[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .apply_blocks(1) - .await - .move_inside_safe_to_update() - .assert_justified_epoch(2) - .apply_block_directly_to_fork_choice(|_, state| { - // The finalized checkpoint should not change. - state.finalized_checkpoint().epoch = Epoch::new(0); - - // The justified checkpoint has changed. - state.current_justified_checkpoint_mut().epoch = Epoch::new(3); - // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint_mut().root = *state - .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) - .unwrap(); - }) - .await - .assert_justified_epoch(3); -} - /// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`. /// - Finalized epoch has **not** increased. #[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { +async fn justified_checkpoint_updates_with_non_descendent() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .await .unwrap() .apply_blocks(1) .await - .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { // The finalized checkpoint should not change. @@ -636,36 +552,6 @@ async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_wit .unwrap(); }) .await - // Now that `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` has been removed, the new - // block should have updated the justified checkpoint. - .assert_justified_epoch(3); -} - -/// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - Finalized epoch has increased. -#[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .apply_blocks(1) - .await - .move_to_next_unsafe_period() - .assert_justified_epoch(2) - .apply_block_directly_to_fork_choice(|_, state| { - // The finalized checkpoint should change. - state.finalized_checkpoint_mut().epoch = Epoch::new(1); - - // The justified checkpoint has changed. - state.current_justified_checkpoint_mut().epoch = Epoch::new(3); - // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint_mut().root = *state - .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) - .unwrap(); - }) - .await .assert_justified_epoch(3); } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 7b7c6eb0c48..b7f6ef7b2a9 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -25,7 +25,7 @@ ethereum_hashing = { workspace = true } int_to_bytes = { workspace = true } smallvec = { workspace = true } arbitrary = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } derivative = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } rand = { workspace = true } diff --git a/consensus/state_processing/src/common/update_progressive_balances_cache.rs b/consensus/state_processing/src/common/update_progressive_balances_cache.rs index af843b3acbc..101e8616835 100644 --- a/consensus/state_processing/src/common/update_progressive_balances_cache.rs +++ b/consensus/state_processing/src/common/update_progressive_balances_cache.rs @@ -4,7 +4,7 @@ use crate::metrics::{ PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, }; use crate::{BlockProcessingError, EpochProcessingError}; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use types::{ is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, EpochTotalBalances, EthSpec, ParticipationFlags, ProgressiveBalancesCache, Validator, diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index b0eaf3422d3..0c176d4ab14 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -147,6 +147,8 @@ impl ConsensusContext { } } + #[allow(unknown_lints)] + #[allow(elided_named_lifetimes)] pub fn get_indexed_attestation<'a>( &'a mut self, state: &BeaconState, diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index e6fe483776f..b53dee96d93 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index e7655b453a8..f289b6e0817 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -579,8 +579,7 @@ pub fn get_expected_withdrawals( .get_execution_withdrawal_address(spec) .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, amount: balance.safe_sub( - validator - .get_validator_max_effective_balance(spec, state.fork_name_unchecked()), + validator.get_max_effective_balance(spec, state.fork_name_unchecked()), )?, }); withdrawal_index.safe_add_assign(1)?; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 74166f67130..a53dc15126f 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -40,15 +40,13 @@ pub fn process_operations>( if state.fork_name_unchecked().electra_enabled() { state.update_pubkey_cache()?; - if let Some(deposit_requests) = block_body.execution_payload()?.deposit_requests()? { - process_deposit_requests(state, &deposit_requests, spec)?; - } - if let Some(withdrawal_requests) = block_body.execution_payload()?.withdrawal_requests()? { - process_withdrawal_requests(state, &withdrawal_requests, spec)?; - } - if let Some(consolidations) = block_body.execution_payload()?.consolidation_requests()? { - process_consolidation_requests(state, &consolidations, spec)?; - } + process_deposit_requests(state, &block_body.execution_requests()?.deposits, spec)?; + process_withdrawal_requests(state, &block_body.execution_requests()?.withdrawals, spec)?; + process_consolidation_requests( + state, + &block_body.execution_requests()?.consolidations, + spec, + )?; } Ok(()) @@ -477,50 +475,13 @@ pub fn apply_deposit( return Ok(()); } - let new_validator_index = state.validators().len(); - - // [Modified in Electra:EIP7251] - let (effective_balance, state_balance) = if state.fork_name_unchecked() >= ForkName::Electra - { - (0, 0) - } else { - ( - std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ), - amount, - ) - }; - // Create a new validator. - let validator = Validator { - pubkey: deposit_data.pubkey, - withdrawal_credentials: deposit_data.withdrawal_credentials, - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance, - slashed: false, - }; - state.validators_mut().push(validator)?; - state.balances_mut().push(state_balance)?; - - // Altair or later initializations. - if let Ok(previous_epoch_participation) = state.previous_epoch_participation_mut() { - previous_epoch_participation.push(ParticipationFlags::default())?; - } - if let Ok(current_epoch_participation) = state.current_epoch_participation_mut() { - current_epoch_participation.push(ParticipationFlags::default())?; - } - if let Ok(inactivity_scores) = state.inactivity_scores_mut() { - inactivity_scores.push(0)?; - } + state.add_validator_to_registry(&deposit_data, spec)?; + let new_validator_index = state.validators().len().safe_sub(1)? as u64; // [New in Electra:EIP7251] if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { pending_balance_deposits.push(PendingBalanceDeposit { - index: new_validator_index as u64, + index: new_validator_index, amount, })?; } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index f8b354d92df..c59449634ac 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -12,7 +12,7 @@ use crate::{ }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use ssz_types::Bitfield; -use std::sync::LazyLock; +use std::sync::{Arc, LazyLock}; use test_utils::generate_deterministic_keypairs; use types::*; @@ -1017,6 +1017,7 @@ async fn fork_spanning_exit() { spec.altair_fork_epoch = Some(Epoch::new(2)); spec.bellatrix_fork_epoch = Some(Epoch::new(4)); spec.shard_committee_period = 0; + let spec = Arc::new(spec); let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 51f45b87e80..fcb480a37cf 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -82,6 +82,7 @@ struct RewardsAndPenaltiesContext { struct SlashingsContext { adjusted_total_slashing_balance: u64, target_withdrawable_epoch: Epoch, + penalty_per_effective_balance_increment: u64, } struct PendingBalanceDepositsContext { @@ -775,9 +776,16 @@ impl SlashingsContext { .current_epoch .safe_add(E::EpochsPerSlashingsVector::to_u64().safe_div(2)?)?; + let penalty_per_effective_balance_increment = adjusted_total_slashing_balance.safe_div( + state_ctxt + .total_active_balance + .safe_div(spec.effective_balance_increment)?, + )?; + Ok(Self { adjusted_total_slashing_balance, target_withdrawable_epoch, + penalty_per_effective_balance_increment, }) } } @@ -792,14 +800,20 @@ fn process_single_slashing( if validator.slashed && slashings_ctxt.target_withdrawable_epoch == validator.withdrawable_epoch { let increment = spec.effective_balance_increment; - let penalty_numerator = validator - .effective_balance - .safe_div(increment)? - .safe_mul(slashings_ctxt.adjusted_total_slashing_balance)?; - let penalty = penalty_numerator - .safe_div(state_ctxt.total_active_balance)? - .safe_mul(increment)?; - + let penalty = if state_ctxt.fork_name.electra_enabled() { + let effective_balance_increments = validator.effective_balance.safe_div(increment)?; + slashings_ctxt + .penalty_per_effective_balance_increment + .safe_mul(effective_balance_increments)? + } else { + let penalty_numerator = validator + .effective_balance + .safe_div(increment)? + .safe_mul(slashings_ctxt.adjusted_total_slashing_balance)?; + penalty_numerator + .safe_div(state_ctxt.total_active_balance)? + .safe_mul(increment)? + }; *balance.make_mut()? = balance.saturating_sub(penalty); } Ok(()) @@ -1022,8 +1036,7 @@ fn process_single_effective_balance_update( ) -> Result<(), Error> { // Use the higher effective balance limit if post-Electra and compounding withdrawal credentials // are set. - let effective_balance_limit = - validator.get_validator_max_effective_balance(spec, state_ctxt.fork_name); + let effective_balance_limit = validator.get_max_effective_balance(spec, state_ctxt.fork_name); let old_effective_balance = validator.effective_balance; let new_effective_balance = if balance.safe_add(eb_ctxt.downward_threshold)? diff --git a/consensus/state_processing/src/per_epoch_processing/tests.rs b/consensus/state_processing/src/per_epoch_processing/tests.rs index 8c240548b04..b93ede248ca 100644 --- a/consensus/state_processing/src/per_epoch_processing/tests.rs +++ b/consensus/state_processing/src/per_epoch_processing/tests.rs @@ -45,6 +45,7 @@ mod release_tests { per_slot_processing::per_slot_processing, EpochProcessingError, SlotProcessingError, }; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; + use std::sync::Arc; use types::{Epoch, ForkName, InconsistentFork, MainnetEthSpec}; #[tokio::test] @@ -56,7 +57,7 @@ mod release_tests { let altair_state = { let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.clone()) + .spec(Arc::new(spec.clone())) .deterministic_keypairs(8) .fresh_ephemeral_store() .build(); @@ -116,7 +117,7 @@ mod release_tests { let base_state = { let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.clone()) + .spec(Arc::new(spec.clone())) .deterministic_keypairs(8) .fresh_ephemeral_store() .build(); diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index aff0225edd4..dac83e7553f 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -18,4 +18,3 @@ fixed_bytes = { workspace = true } [features] arbitrary = ["alloy-primitives/arbitrary"] -getrandom = ["alloy-primitives/getrandom"] diff --git a/consensus/swap_or_not_shuffle/src/shuffle_list.rs b/consensus/swap_or_not_shuffle/src/shuffle_list.rs index b49a26cc373..3e93974fe0f 100644 --- a/consensus/swap_or_not_shuffle/src/shuffle_list.rs +++ b/consensus/swap_or_not_shuffle/src/shuffle_list.rs @@ -45,7 +45,7 @@ impl Buf { /// Hash the entire buffer. fn hash(&self) -> Hash256 { - Hash256::from_slice(&hash_fixed(&self.0)) + Hash256::from(hash_fixed(&self.0)) } } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index c1559a407cf..21a15fc5174 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -9,7 +9,7 @@ name = "benches" harness = false [dependencies] -alloy-primitives = { workspace = true, features = ["rlp", "getrandom"] } +alloy-primitives = { workspace = true } merkle_proof = { workspace = true } bls = { workspace = true, features = ["arbitrary"] } kzg = { workspace = true } diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index effc6a21068..0c8bf36c813 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -78,7 +78,7 @@ fn all_benches(c: &mut Criterion) { || (bytes.clone(), spec.clone()), |(bytes, spec)| { let state: BeaconState = - BeaconState::from_ssz_bytes(&bytes, &spec).expect("should decode"); + BeaconState::from_ssz_bytes(bytes, spec).expect("should decode"); black_box(state) }, BatchSize::SmallInput, diff --git a/consensus/types/presets/gnosis/phase0.yaml b/consensus/types/presets/gnosis/phase0.yaml index 87c73e6fb7a..48129cb47ea 100644 --- a/consensus/types/presets/gnosis/phase0.yaml +++ b/consensus/types/presets/gnosis/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/presets/mainnet/phase0.yaml b/consensus/types/presets/mainnet/phase0.yaml index 89bb97d6a87..02bc96c8cdb 100644 --- a/consensus/types/presets/mainnet/phase0.yaml +++ b/consensus/types/presets/mainnet/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/presets/minimal/phase0.yaml b/consensus/types/presets/minimal/phase0.yaml index c9c81325f1b..1f756031421 100644 --- a/consensus/types/presets/minimal/phase0.yaml +++ b/consensus/types/presets/minimal/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**1 (= 1) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 4a6816c024d..a2983035138 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -670,6 +670,7 @@ impl> BeaconBlockElectra graffiti: Graffiti::default(), execution_payload: Payload::Electra::default(), blob_kzg_commitments: VariableList::empty(), + execution_requests: ExecutionRequests::default(), }, } } @@ -700,6 +701,7 @@ impl> EmptyBlock for BeaconBlockElec execution_payload: Payload::Electra::default(), bls_to_execution_changes: VariableList::empty(), blob_kzg_commitments: VariableList::empty(), + execution_requests: ExecutionRequests::default(), }, } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 305ef105445..c81e7bcde93 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -114,6 +114,8 @@ pub struct BeaconBlockBody = FullPay VariableList, #[superstruct(only(Deneb, Electra))] pub blob_kzg_commitments: KzgCommitments, + #[superstruct(only(Electra))] + pub execution_requests: ExecutionRequests, #[superstruct(only(Base, Altair))] #[metastruct(exclude_from(fields))] #[ssz(skip_serializing, skip_deserializing)] @@ -662,6 +664,7 @@ impl From>> execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, } = body; ( @@ -680,6 +683,7 @@ impl From>> }, bls_to_execution_changes, blob_kzg_commitments: blob_kzg_commitments.clone(), + execution_requests, }, Some(execution_payload), ) @@ -818,6 +822,7 @@ impl BeaconBlockBodyElectra> { execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, } = self; BeaconBlockBodyElectra { @@ -835,6 +840,7 @@ impl BeaconBlockBodyElectra> { }, bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), + execution_requests: execution_requests.clone(), } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a08f6d720c7..f214991d516 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1548,6 +1548,35 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + pub fn add_validator_to_registry( + &mut self, + deposit_data: &DepositData, + spec: &ChainSpec, + ) -> Result<(), Error> { + let fork = self.fork_name_unchecked(); + let amount = if fork.electra_enabled() { + 0 + } else { + deposit_data.amount + }; + self.validators_mut() + .push(Validator::from_deposit(deposit_data, amount, fork, spec))?; + self.balances_mut().push(amount)?; + + // Altair or later initializations. + if let Ok(previous_epoch_participation) = self.previous_epoch_participation_mut() { + previous_epoch_participation.push(ParticipationFlags::default())?; + } + if let Ok(current_epoch_participation) = self.current_epoch_participation_mut() { + current_epoch_participation.push(ParticipationFlags::default())?; + } + if let Ok(inactivity_scores) = self.inactivity_scores_mut() { + inactivity_scores.push(0)?; + } + + Ok(()) + } + /// Safe copy-on-write accessor for the `validators` list. pub fn get_validator_cow( &mut self, @@ -2131,7 +2160,7 @@ impl BeaconState { let max_effective_balance = self .validators() .get(validator_index) - .map(|validator| validator.get_validator_max_effective_balance(spec, current_fork)) + .map(|validator| validator.get_max_effective_balance(spec, current_fork)) .ok_or(Error::UnknownValidator(validator_index))?; Ok(std::cmp::min( *self @@ -2477,33 +2506,64 @@ impl BeaconState { Ok(()) } - pub fn compute_merkle_proof(&self, generalized_index: usize) -> Result, Error> { - // 1. Convert generalized index to field index. - let field_index = match generalized_index { + pub fn compute_current_sync_committee_proof(&self) -> Result, Error> { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + let field_index = if self.fork_name_unchecked().electra_enabled() { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + } else { light_client_update::CURRENT_SYNC_COMMITTEE_INDEX - | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { - // Sync committees are top-level fields, subtract off the generalized indices - // for the internal nodes. Result should be 22 or 23, the field offset of the committee - // in the `BeaconState`: - // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate - generalized_index - .checked_sub(self.num_fields_pow2()) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - light_client_update::FINALIZED_ROOT_INDEX => { - // Finalized root is the right child of `finalized_checkpoint`, divide by two to get - // the generalized index of `state.finalized_checkpoint`. - let finalized_checkpoint_generalized_index = generalized_index / 2; - // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches - // position of `finalized_checkpoint` in `BeaconState`. - finalized_checkpoint_generalized_index - .checked_sub(self.num_fields_pow2()) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - _ => return Err(Error::IndexNotSupported(generalized_index)), }; + let leaves = self.get_beacon_state_leaves(); + self.generate_proof(field_index, &leaves) + } - // 2. Get all `BeaconState` leaves. + pub fn compute_next_sync_committee_proof(&self) -> Result, Error> { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + let field_index = if self.fork_name_unchecked().electra_enabled() { + light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + } else { + light_client_update::NEXT_SYNC_COMMITTEE_INDEX + }; + let leaves = self.get_beacon_state_leaves(); + self.generate_proof(field_index, &leaves) + } + + pub fn compute_finalized_root_proof(&self) -> Result, Error> { + // Finalized root is the right child of `finalized_checkpoint`, divide by two to get + // the generalized index of `state.finalized_checkpoint`. + let field_index = if self.fork_name_unchecked().electra_enabled() { + // Index should be 169/2 - 64 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState` + light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + } else { + // Index should be 105/2 - 32 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState` + light_client_update::FINALIZED_ROOT_INDEX + }; + let leaves = self.get_beacon_state_leaves(); + let mut proof = self.generate_proof(field_index, &leaves)?; + proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); + Ok(proof) + } + + fn generate_proof( + &self, + field_index: usize, + leaves: &[Hash256], + ) -> Result, Error> { + let depth = self.num_fields_pow2().ilog2() as usize; + let tree = merkle_proof::MerkleTree::create(leaves, depth); + let (_, proof) = tree.generate_proof(field_index, depth)?; + Ok(proof) + } + + fn get_beacon_state_leaves(&self) -> Vec { let mut leaves = vec![]; #[allow(clippy::arithmetic_side_effects)] match self { @@ -2539,18 +2599,7 @@ impl BeaconState { } }; - // 3. Make deposit tree. - // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). - let depth = light_client_update::CURRENT_SYNC_COMMITTEE_PROOF_LEN; - let tree = merkle_proof::MerkleTree::create(&leaves, depth); - let (_, mut proof) = tree.generate_proof(field_index, depth)?; - - // 4. If we're proving the finalized root, patch in the finalized epoch to complete the proof. - if generalized_index == light_client_update::FINALIZED_ROOT_INDEX { - proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); - } - - Ok(proof) + leaves } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index e31427121ec..1c4effb4aec 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -26,7 +26,6 @@ pub enum Domain { SyncCommittee, ContributionAndProof, SyncCommitteeSelectionProof, - Consolidation, ApplicationMask(ApplicationDomain), } @@ -111,12 +110,10 @@ pub struct ChainSpec { pub(crate) domain_voluntary_exit: u32, pub(crate) domain_selection_proof: u32, pub(crate) domain_aggregate_and_proof: u32, - pub(crate) domain_consolidation: u32, /* * Fork choice */ - pub safe_slots_to_update_justified: u64, pub proposer_score_boost: Option, pub reorg_head_weight_threshold: Option, pub reorg_parent_weight_threshold: Option, @@ -159,7 +156,6 @@ pub struct ChainSpec { pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, pub terminal_block_hash_activation_epoch: Epoch, - pub safe_slots_to_import_optimistically: u64, /* * Capella hard fork params @@ -198,6 +194,7 @@ pub struct ChainSpec { pub custody_requirement: u64, pub data_column_sidecar_subnet_count: u64, pub number_of_columns: usize, + pub samples_per_slot: u64, /* * Networking @@ -478,7 +475,6 @@ impl ChainSpec { Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), Domain::BlsToExecutionChange => self.domain_bls_to_execution_change, - Domain::Consolidation => self.domain_consolidation, } } @@ -703,12 +699,10 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, - domain_consolidation: 0x0B, /* * Fork choice */ - safe_slots_to_update_justified: 8, proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), @@ -759,7 +753,6 @@ impl ChainSpec { .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), - safe_slots_to_import_optimistically: 128u64, /* * Capella hard fork params @@ -811,6 +804,7 @@ impl ChainSpec { custody_requirement: 4, data_column_sidecar_subnet_count: 128, number_of_columns: 128, + samples_per_slot: 8, /* * Network specific @@ -888,7 +882,6 @@ impl ChainSpec { inactivity_penalty_quotient: u64::checked_pow(2, 25).expect("pow does not overflow"), min_slashing_penalty_quotient: 64, proportional_slashing_multiplier: 2, - safe_slots_to_update_justified: 2, // Altair epochs_per_sync_committee_period: Epoch::new(8), altair_fork_version: [0x01, 0x00, 0x00, 0x01], @@ -1024,12 +1017,10 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, - domain_consolidation: 0x0B, /* * Fork choice */ - safe_slots_to_update_justified: 8, proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), @@ -1080,7 +1071,6 @@ impl ChainSpec { .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), - safe_slots_to_import_optimistically: 128u64, /* * Capella hard fork params @@ -1132,6 +1122,7 @@ impl ChainSpec { custody_requirement: 4, data_column_sidecar_subnet_count: 128, number_of_columns: 128, + samples_per_slot: 8, /* * Network specific */ @@ -1214,9 +1205,6 @@ pub struct Config { pub terminal_block_hash: ExecutionBlockHash, #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, - #[serde(default = "default_safe_slots_to_import_optimistically")] - #[serde(with = "serde_utils::quoted_u64")] - pub safe_slots_to_import_optimistically: u64, #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, @@ -1382,6 +1370,9 @@ pub struct Config { #[serde(default = "default_number_of_columns")] #[serde(with = "serde_utils::quoted_u64")] number_of_columns: u64, + #[serde(default = "default_samples_per_slot")] + #[serde(with = "serde_utils::quoted_u64")] + samples_per_slot: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1424,10 +1415,6 @@ fn default_terminal_block_hash_activation_epoch() -> Epoch { Epoch::new(u64::MAX) } -fn default_safe_slots_to_import_optimistically() -> u64 { - 128u64 -} - fn default_subnets_per_node() -> u8 { 2u8 } @@ -1521,17 +1508,21 @@ const fn default_maximum_gossip_clock_disparity_millis() -> u64 { } const fn default_custody_requirement() -> u64 { - 1 + 4 } const fn default_data_column_sidecar_subnet_count() -> u64 { - 32 + 128 } const fn default_number_of_columns() -> u64 { 128 } +const fn default_samples_per_slot() -> u64 { + 8 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::from_vec( @@ -1644,7 +1635,6 @@ impl Config { terminal_total_difficulty: spec.terminal_total_difficulty, terminal_block_hash: spec.terminal_block_hash, terminal_block_hash_activation_epoch: spec.terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically: spec.safe_slots_to_import_optimistically, min_genesis_active_validator_count: spec.min_genesis_active_validator_count, min_genesis_time: spec.min_genesis_time, @@ -1727,6 +1717,7 @@ impl Config { custody_requirement: spec.custody_requirement, data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, number_of_columns: spec.number_of_columns as u64, + samples_per_slot: spec.samples_per_slot, } } @@ -1745,7 +1736,6 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically, min_genesis_active_validator_count, min_genesis_time, genesis_fork_version, @@ -1802,6 +1792,7 @@ impl Config { custody_requirement, data_column_sidecar_subnet_count, number_of_columns, + samples_per_slot, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -1844,7 +1835,6 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically, gossip_max_size, min_epochs_for_block_requests, max_chunk_size, @@ -1881,6 +1871,7 @@ impl Config { custody_requirement, data_column_sidecar_subnet_count, number_of_columns: number_of_columns as usize, + samples_per_slot, ..chain_spec.clone() }) @@ -1946,7 +1937,6 @@ mod tests { &spec, ); test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); - test_domain(Domain::Consolidation, spec.domain_consolidation, &spec); // The builder domain index is zero let builder_domain_pre_mask = [0; 4]; @@ -2096,7 +2086,6 @@ mod yaml_tests { #TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638911 #TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000001 #TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551614 - #SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY: 2 MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 MIN_GENESIS_TIME: 1606824000 GENESIS_FORK_VERSION: 0x00000000 @@ -2125,6 +2114,7 @@ mod yaml_tests { CUSTODY_REQUIREMENT: 1 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 + SAMPLES_PER_SLOT: 8 "#; let chain_spec: Config = serde_yaml::from_str(spec).unwrap(); @@ -2144,7 +2134,6 @@ mod yaml_tests { check_default!(terminal_total_difficulty); check_default!(terminal_block_hash); check_default!(terminal_block_hash_activation_epoch); - check_default!(safe_slots_to_import_optimistically); check_default!(bellatrix_fork_version); check_default!(gossip_max_size); check_default!(min_epochs_for_block_requests); diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 110392d4b77..c80d678b2a3 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -41,6 +41,7 @@ pub struct ConfigAndPreset { } impl ConfigAndPreset { + // DEPRECATED: the `fork_name` argument is never used, we should remove it. pub fn from_chain_spec(spec: &ChainSpec, fork_name: Option) -> Self { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); @@ -126,7 +127,6 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "compounding_withdrawal_prefix".to_uppercase() => u8_hex(spec.compounding_withdrawal_prefix_byte), "unset_deposit_requests_start_index".to_uppercase() => spec.unset_deposit_requests_start_index.to_string().into(), "full_exit_request_amount".to_uppercase() => spec.full_exit_request_amount.to_string().into(), - "domain_consolidation".to_uppercase()=> u32_hex(spec.domain_consolidation), } } diff --git a/consensus/types/src/consolidation_request.rs b/consensus/types/src/consolidation_request.rs index b21f34e7bba..e2df0bb9726 100644 --- a/consensus/types/src/consolidation_request.rs +++ b/consensus/types/src/consolidation_request.rs @@ -1,5 +1,6 @@ use crate::{test_utils::TestRandom, Address, PublicKeyBytes, SignedRoot}; use serde::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -24,6 +25,18 @@ pub struct ConsolidationRequest { pub target_pubkey: PublicKeyBytes, } +impl ConsolidationRequest { + pub fn max_size() -> usize { + Self { + source_address: Address::repeat_byte(0), + source_pubkey: PublicKeyBytes::empty(), + target_pubkey: PublicKeyBytes::empty(), + } + .as_ssz_bytes() + .len() + } +} + impl SignedRoot for ConsolidationRequest {} #[cfg(test)] diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 90c05aea1f7..57251e319a4 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -161,6 +161,7 @@ pub enum DataColumnSidecarError { DataColumnIndexOutOfBounds, KzgCommitmentInclusionProofOutOfBounds, KzgError(KzgError), + KzgNotInitialized, MissingBlobSidecars, PreDeneb, SszError(SszError), diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit_request.rs index f6ddf8b63a8..7af949fef3a 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit_request.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Hash256, PublicKeyBytes, Signature}; use serde::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -29,6 +30,20 @@ pub struct DepositRequest { pub index: u64, } +impl DepositRequest { + pub fn max_size() -> usize { + Self { + pubkey: PublicKeyBytes::empty(), + withdrawal_credentials: Hash256::ZERO, + amount: 0, + signature: Signature::empty(), + index: 0, + } + .as_ssz_bytes() + .len() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 4d41d568308..9f16b676a6a 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -13,12 +13,6 @@ pub type Transactions = VariableList< >; pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; -pub type DepositRequests = - VariableList::MaxDepositRequestsPerPayload>; -pub type WithdrawalRequests = - VariableList::MaxWithdrawalRequestsPerPayload>; -pub type ConsolidationRequests = - VariableList::MaxConsolidationRequestsPerPayload>; #[superstruct( variants(Bellatrix, Capella, Deneb, Electra), @@ -96,13 +90,6 @@ pub struct ExecutionPayload { #[superstruct(only(Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, - #[superstruct(only(Electra))] - pub deposit_requests: VariableList, - #[superstruct(only(Electra))] - pub withdrawal_requests: VariableList, - #[superstruct(only(Electra))] - pub consolidation_requests: - VariableList, } impl<'a, E: EthSpec> ExecutionPayloadRef<'a, E> { diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 90dd8c54e21..e9690435f1f 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -86,12 +86,6 @@ pub struct ExecutionPayloadHeader { #[superstruct(only(Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, - #[superstruct(only(Electra), partial_getter(copy))] - pub deposit_requests_root: Hash256, - #[superstruct(only(Electra), partial_getter(copy))] - pub withdrawal_requests_root: Hash256, - #[superstruct(only(Electra), partial_getter(copy))] - pub consolidation_requests_root: Hash256, } impl ExecutionPayloadHeader { @@ -214,9 +208,6 @@ impl ExecutionPayloadHeaderDeneb { withdrawals_root: self.withdrawals_root, blob_gas_used: self.blob_gas_used, excess_blob_gas: self.excess_blob_gas, - deposit_requests_root: Hash256::zero(), - withdrawal_requests_root: Hash256::zero(), - consolidation_requests_root: Hash256::zero(), } } } @@ -308,9 +299,6 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadElectra> for ExecutionPayloadHe withdrawals_root: payload.withdrawals.tree_hash_root(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - deposit_requests_root: payload.deposit_requests.tree_hash_root(), - withdrawal_requests_root: payload.withdrawal_requests.tree_hash_root(), - consolidation_requests_root: payload.consolidation_requests.tree_hash_root(), } } } diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs new file mode 100644 index 00000000000..778260dd841 --- /dev/null +++ b/consensus/types/src/execution_requests.rs @@ -0,0 +1,59 @@ +use crate::test_utils::TestRandom; +use crate::{ConsolidationRequest, DepositRequest, EthSpec, WithdrawalRequest}; +use alloy_primitives::Bytes; +use derivative::Derivative; +use serde::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +pub type DepositRequests = + VariableList::MaxDepositRequestsPerPayload>; +pub type WithdrawalRequests = + VariableList::MaxWithdrawalRequestsPerPayload>; +pub type ConsolidationRequests = + VariableList::MaxConsolidationRequestsPerPayload>; + +#[derive( + arbitrary::Arbitrary, + Debug, + Derivative, + Default, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +#[serde(bound = "E: EthSpec")] +#[arbitrary(bound = "E: EthSpec")] +#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +pub struct ExecutionRequests { + pub deposits: DepositRequests, + pub withdrawals: WithdrawalRequests, + pub consolidations: ConsolidationRequests, +} + +impl ExecutionRequests { + /// Returns the encoding according to EIP-7685 to send + /// to the execution layer over the engine api. + pub fn get_execution_requests_list(&self) -> Vec { + let deposit_bytes = Bytes::from(self.deposits.as_ssz_bytes()); + let withdrawal_bytes = Bytes::from(self.withdrawals.as_ssz_bytes()); + let consolidation_bytes = Bytes::from(self.consolidations.as_ssz_bytes()); + vec![deposit_bytes, withdrawal_bytes, consolidation_bytes] + } +} + +#[cfg(test)] +mod tests { + use crate::MainnetEthSpec; + + use super::*; + + ssz_and_tree_hash_tests!(ExecutionRequests); +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 281a84d8592..eff52378342 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -81,6 +81,7 @@ pub mod slot_epoch_macros; pub mod activation_queue; pub mod config_and_preset; pub mod execution_block_header; +pub mod execution_requests; pub mod fork_context; pub mod participation_flags; pub mod payload; @@ -169,6 +170,7 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; +pub use crate::execution_requests::ExecutionRequests; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; @@ -198,7 +200,7 @@ pub use crate::light_client_optimistic_update::{ }; pub use crate::light_client_update::{ Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, + LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, MerkleProof, }; pub use crate::participation_flags::ParticipationFlags; pub use crate::payload::{ diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 7c716e6bb2d..21a7e5416f2 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -57,7 +57,16 @@ pub struct LightClientBootstrap { /// The `SyncCommittee` used in the requested period. pub current_sync_committee: Arc>, /// Merkle proof for sync committee + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "current_sync_committee_branch_altair") + )] pub current_sync_committee_branch: FixedVector, + #[superstruct( + only(Electra), + partial_getter(rename = "current_sync_committee_branch_electra") + )] + pub current_sync_committee_branch: FixedVector, } impl LightClientBootstrap { @@ -115,7 +124,7 @@ impl LightClientBootstrap { pub fn new( block: &SignedBlindedBeaconBlock, current_sync_committee: Arc>, - current_sync_committee_branch: FixedVector, + current_sync_committee_branch: Vec, chain_spec: &ChainSpec, ) -> Result { let light_client_bootstrap = match block @@ -126,22 +135,22 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), }; @@ -155,9 +164,7 @@ impl LightClientBootstrap { ) -> Result { let mut header = beacon_state.latest_block_header().clone(); header.state_root = beacon_state.update_tree_hash_cache()?; - let current_sync_committee_branch = - FixedVector::new(beacon_state.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?)?; - + let current_sync_committee_branch = beacon_state.compute_current_sync_committee_proof()?; let current_sync_committee = beacon_state.current_sync_committee()?.clone(); let light_client_bootstrap = match block @@ -168,22 +175,22 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), }; @@ -196,21 +203,42 @@ impl ForkVersionDeserialize for LightClientBootstrap { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base => Err(serde::de::Error::custom(format!( + if fork_name.altair_enabled() { + Ok(serde_json::from_value::>(value) + .map_err(serde::de::Error::custom))? + } else { + Err(serde::de::Error::custom(format!( "LightClientBootstrap failed to deserialize: unsupported fork '{}'", fork_name - ))), - _ => Ok(serde_json::from_value::>(value) - .map_err(serde::de::Error::custom))?, + ))) } } } #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientBootstrapAltair, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientBootstrapCapella, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapCapella); + } - ssz_tests!(LightClientBootstrapDeneb); + #[cfg(test)] + mod deneb { + use crate::{LightClientBootstrapDeneb, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientBootstrapElectra, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapElectra); + } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index dc7561f5fcc..ba2f2083cd9 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -63,8 +63,13 @@ pub struct LightClientFinalityUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. - #[test_random(default)] + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "finality_branch_altair") + )] pub finality_branch: FixedVector, + #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + pub finality_branch: FixedVector, /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -75,7 +80,7 @@ impl LightClientFinalityUpdate { pub fn new( attested_block: &SignedBlindedBeaconBlock, finalized_block: &SignedBlindedBeaconBlock, - finality_branch: FixedVector, + finality_branch: Vec, sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, @@ -92,7 +97,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderAltair::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }) @@ -104,7 +109,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderCapella::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -115,7 +120,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderDeneb::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -126,7 +131,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderElectra::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -212,23 +217,42 @@ impl ForkVersionDeserialize for LightClientFinalityUpdate { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base => Err(serde::de::Error::custom(format!( + if fork_name.altair_enabled() { + serde_json::from_value::>(value) + .map_err(serde::de::Error::custom) + } else { + Err(serde::de::Error::custom(format!( "LightClientFinalityUpdate failed to deserialize: unsupported fork '{}'", fork_name - ))), - _ => Ok( - serde_json::from_value::>(value) - .map_err(serde::de::Error::custom), - )?, + ))) } } } #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientFinalityUpdateAltair, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientFinalityUpdateCapella, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateCapella); + } - ssz_tests!(LightClientFinalityUpdateDeneb); + #[cfg(test)] + mod deneb { + use crate::{LightClientFinalityUpdateDeneb, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientFinalityUpdateElectra, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateElectra); + } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index c0de114b357..52800f18ac2 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -129,11 +129,10 @@ impl LightClientHeader { } pub fn ssz_max_var_len_for_fork(fork_name: ForkName) -> usize { - match fork_name { - ForkName::Base | ForkName::Altair => 0, - ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { - ExecutionPayloadHeader::::ssz_max_var_len_for_fork(fork_name) - } + if fork_name.capella_enabled() { + ExecutionPayloadHeader::::ssz_max_var_len_for_fork(fork_name) + } else { + 0 } } } @@ -308,3 +307,31 @@ impl ForkVersionDeserialize for LightClientHeader { } } } + +#[cfg(test)] +mod tests { + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientHeaderAltair, MainnetEthSpec}; + ssz_tests!(LightClientHeaderAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientHeaderCapella, MainnetEthSpec}; + ssz_tests!(LightClientHeaderCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientHeaderDeneb, MainnetEthSpec}; + ssz_tests!(LightClientHeaderDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientHeaderElectra, MainnetEthSpec}; + ssz_tests!(LightClientHeaderElectra); + } +} diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 3cae31edf80..209388af87b 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -198,23 +198,44 @@ impl ForkVersionDeserialize for LightClientOptimisticUpdate { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base => Err(serde::de::Error::custom(format!( - "LightClientOptimisticUpdate failed to deserialize: unsupported fork '{}'", - fork_name - ))), - _ => Ok( + if fork_name.altair_enabled() { + Ok( serde_json::from_value::>(value) .map_err(serde::de::Error::custom), - )?, + )? + } else { + Err(serde::de::Error::custom(format!( + "LightClientOptimisticUpdate failed to deserialize: unsupported fork '{}'", + fork_name + ))) } } } #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientOptimisticUpdateAltair, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateAltair); + } - ssz_tests!(LightClientOptimisticUpdateDeneb); + #[cfg(test)] + mod capella { + use crate::{LightClientOptimisticUpdateCapella, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientOptimisticUpdateDeneb, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientOptimisticUpdateElectra, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateElectra); + } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 3b48a68df31..a7ddf8eb314 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,5 +1,6 @@ use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; use crate::light_client_header::LightClientHeaderElectra; +use crate::LightClientHeader; use crate::{ beacon_state, test_utils::TestRandom, ChainSpec, Epoch, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, @@ -10,10 +11,10 @@ use safe_arith::ArithError; use safe_arith::SafeArith; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; -use ssz::Decode; +use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; -use ssz_types::typenum::{U4, U5, U6}; +use ssz_types::typenum::{U4, U5, U6, U7}; use std::sync::Arc; use superstruct::superstruct; use test_random_derive::TestRandom; @@ -24,20 +25,39 @@ pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; pub const EXECUTION_PAYLOAD_INDEX: usize = 25; +pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; +pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; +pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; + pub type FinalizedRootProofLen = U6; pub type CurrentSyncCommitteeProofLen = U5; pub type ExecutionPayloadProofLen = U4; - pub type NextSyncCommitteeProofLen = U5; +pub type FinalizedRootProofLenElectra = U7; +pub type CurrentSyncCommitteeProofLenElectra = U6; +pub type NextSyncCommitteeProofLenElectra = U6; + pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; +pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; + +pub type MerkleProof = Vec; +// Max light client updates by range request limits +// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration +pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; + type FinalityBranch = FixedVector; +type FinalityBranchElectra = FixedVector; type NextSyncCommitteeBranch = FixedVector; +type NextSyncCommitteeBranchElectra = FixedVector; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), @@ -119,8 +139,17 @@ pub struct LightClientUpdate { pub attested_header: LightClientHeaderElectra, /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, - /// Merkle proof for next sync committee + // Merkle proof for next sync committee + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "next_sync_committee_branch_altair") + )] pub next_sync_committee_branch: NextSyncCommitteeBranch, + #[superstruct( + only(Electra), + partial_getter(rename = "next_sync_committee_branch_electra") + )] + pub next_sync_committee_branch: NextSyncCommitteeBranchElectra, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -131,7 +160,13 @@ pub struct LightClientUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "finality_branch_altair") + )] pub finality_branch: FinalityBranch, + #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + pub finality_branch: FinalityBranchElectra, /// current sync aggreggate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -160,8 +195,8 @@ impl LightClientUpdate { sync_aggregate: &SyncAggregate, block_slot: Slot, next_sync_committee: Arc>, - next_sync_committee_branch: FixedVector, - finality_branch: FixedVector, + next_sync_committee_branch: Vec, + finality_branch: Vec, attested_block: &SignedBlindedBeaconBlock, finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, @@ -184,9 +219,9 @@ impl LightClientUpdate { Self::Altair(LightClientUpdateAltair { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -204,9 +239,9 @@ impl LightClientUpdate { Self::Capella(LightClientUpdateCapella { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -224,9 +259,9 @@ impl LightClientUpdate { Self::Deneb(LightClientUpdateDeneb { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -244,9 +279,9 @@ impl LightClientUpdate { Self::Electra(LightClientUpdateElectra { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -386,23 +421,54 @@ impl LightClientUpdate { return Ok(new.signature_slot() < self.signature_slot()); } - fn is_next_sync_committee_branch_empty(&self) -> bool { - for index in self.next_sync_committee_branch().iter() { - if *index != Hash256::default() { - return false; - } + fn is_next_sync_committee_branch_empty<'a>(&'a self) -> bool { + map_light_client_update_ref!(&'a _, self.to_ref(), |update, cons| { + cons(update); + is_empty_branch(update.next_sync_committee_branch.as_ref()) + }) + } + + pub fn is_finality_branch_empty<'a>(&'a self) -> bool { + map_light_client_update_ref!(&'a _, self.to_ref(), |update, cons| { + cons(update); + is_empty_branch(update.finality_branch.as_ref()) + }) + } + + // A `LightClientUpdate` has two `LightClientHeader`s + // Spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientupdate + #[allow(clippy::arithmetic_side_effects)] + pub fn ssz_max_len_for_fork(fork_name: ForkName) -> usize { + let fixed_len = match fork_name { + ForkName::Base | ForkName::Bellatrix => 0, + ForkName::Altair => as Encode>::ssz_fixed_len(), + ForkName::Capella => as Encode>::ssz_fixed_len(), + ForkName::Deneb => as Encode>::ssz_fixed_len(), + ForkName::Electra => as Encode>::ssz_fixed_len(), + }; + fixed_len + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) + } + + pub fn map_with_fork_name(&self, func: F) -> R + where + F: Fn(ForkName) -> R, + { + match self { + Self::Altair(_) => func(ForkName::Altair), + Self::Capella(_) => func(ForkName::Capella), + Self::Deneb(_) => func(ForkName::Deneb), + Self::Electra(_) => func(ForkName::Electra), } - true } +} - pub fn is_finality_branch_empty(&self) -> bool { - for index in self.finality_branch().iter() { - if *index != Hash256::default() { - return false; - } +fn is_empty_branch(branch: &[Hash256]) -> bool { + for index in branch.iter() { + if *index != Hash256::default() { + return false; } - true } + true } fn compute_sync_committee_period_at_slot( @@ -416,16 +482,53 @@ fn compute_sync_committee_period_at_slot( #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; use ssz_types::typenum::Unsigned; - ssz_tests!(LightClientUpdateDeneb); + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateAltair); + } + + #[cfg(test)] + mod capella { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateElectra); + } #[test] fn finalized_root_params() { assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX); assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32 + 1) > FINALIZED_ROOT_INDEX); assert_eq!(FinalizedRootProofLen::to_usize(), FINALIZED_ROOT_PROOF_LEN); + + assert!( + 2usize.pow(FINALIZED_ROOT_PROOF_LEN_ELECTRA as u32) <= FINALIZED_ROOT_INDEX_ELECTRA + ); + assert!( + 2usize.pow(FINALIZED_ROOT_PROOF_LEN_ELECTRA as u32 + 1) > FINALIZED_ROOT_INDEX_ELECTRA + ); + assert_eq!( + FinalizedRootProofLenElectra::to_usize(), + FINALIZED_ROOT_PROOF_LEN_ELECTRA + ); } #[test] @@ -440,6 +543,19 @@ mod tests { CurrentSyncCommitteeProofLen::to_usize(), CURRENT_SYNC_COMMITTEE_PROOF_LEN ); + + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32) + <= CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32 + 1) + > CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert_eq!( + CurrentSyncCommitteeProofLenElectra::to_usize(), + CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA + ); } #[test] @@ -450,5 +566,18 @@ mod tests { NextSyncCommitteeProofLen::to_usize(), NEXT_SYNC_COMMITTEE_PROOF_LEN ); + + assert!( + 2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32) + <= NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert!( + 2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32 + 1) + > NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert_eq!( + NextSyncCommitteeProofLenElectra::to_usize(), + NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA + ); } } diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index cee8b8cc219..80a70c171f5 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -39,18 +39,6 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + /// fork-specific fields fn withdrawals_root(&self) -> Result; fn blob_gas_used(&self) -> Result; - fn withdrawal_requests( - &self, - ) -> Result>, Error>; - fn deposit_requests( - &self, - ) -> Result>, Error>; - fn consolidation_requests( - &self, - ) -> Result< - Option>, - Error, - >; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -290,51 +278,6 @@ impl ExecPayload for FullPayload { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - match self { - FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { - Err(Error::IncorrectStateVariant) - } - FullPayload::Electra(inner) => { - Ok(Some(inner.execution_payload.withdrawal_requests.clone())) - } - } - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - match self { - FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { - Err(Error::IncorrectStateVariant) - } - FullPayload::Electra(inner) => { - Ok(Some(inner.execution_payload.deposit_requests.clone())) - } - } - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - match self { - FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { - Err(Error::IncorrectStateVariant) - } - FullPayload::Electra(inner) => { - Ok(Some(inner.execution_payload.consolidation_requests.clone())) - } - } - } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -467,51 +410,6 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - match self { - FullPayloadRef::Bellatrix(_) - | FullPayloadRef::Capella(_) - | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), - FullPayloadRef::Electra(inner) => { - Ok(Some(inner.execution_payload.withdrawal_requests.clone())) - } - } - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - match self { - FullPayloadRef::Bellatrix(_) - | FullPayloadRef::Capella(_) - | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), - FullPayloadRef::Electra(inner) => { - Ok(Some(inner.execution_payload.deposit_requests.clone())) - } - } - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - match self { - FullPayloadRef::Bellatrix(_) - | FullPayloadRef::Capella(_) - | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), - FullPayloadRef::Electra(inner) => { - Ok(Some(inner.execution_payload.consolidation_requests.clone())) - } - } - } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -692,30 +590,6 @@ impl ExecPayload for BlindedPayload { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - Ok(None) - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - Ok(None) - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - Ok(None) - } - fn is_default_with_zero_roots(&self) -> bool { self.to_ref().is_default_with_zero_roots() } @@ -817,30 +691,6 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - Ok(None) - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - Ok(None) - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - Ok(None) - } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_blinded_payload_ref!(&'b _, self, move |payload, cons| { cons(payload); @@ -867,10 +717,7 @@ macro_rules! impl_exec_payload_common { $is_default_with_empty_roots:block, $f:block, $g:block, - $h:block, - $i:block, - $j:block, - $k:block) => { + $h:block) => { impl ExecPayload for $wrapper_type { fn block_type() -> BlockType { BlockType::$block_type_variant @@ -933,30 +780,6 @@ macro_rules! impl_exec_payload_common { let h = $h; h(self) } - - fn withdrawal_requests( - &self, - ) -> Result< - Option>, - Error, - > { - let i = $i; - i(self) - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - let j = $j; - j(self) - } - - fn consolidation_requests( - &self, - ) -> Result::MaxConsolidationRequestsPerPayload>>, Error> { - let k = $k; - k(self) - } } impl From<$wrapped_type> for $wrapper_type { @@ -1002,10 +825,7 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.blob_gas_used() }; c - }, - { |_| { Ok(None) } }, - { |_| { Ok(None) } }, - { |_| { Ok(None) } } + } ); impl TryInto<$wrapper_type_header> for BlindedPayload { @@ -1092,47 +912,6 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.blob_gas_used() }; c - }, - { - let c: for<'a> fn( - &'a $wrapper_type_full, - ) -> Result< - Option>, - Error, - > = |payload: &$wrapper_type_full| { - let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); - wrapper_ref_type.withdrawal_requests() - }; - c - }, - { - let c: for<'a> fn( - &'a $wrapper_type_full, - ) -> Result< - Option>, - Error, - > = |payload: &$wrapper_type_full| { - let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); - wrapper_ref_type.deposit_requests() - }; - c - }, - { - let c: for<'a> fn( - &'a $wrapper_type_full, - ) -> Result< - Option< - VariableList< - ConsolidationRequest, - ::MaxConsolidationRequestsPerPayload, - >, - >, - Error, - > = |payload: &$wrapper_type_full| { - let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); - wrapper_ref_type.consolidation_requests() - }; - c } ); diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 2c576ed332c..435a74bdc35 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -27,8 +27,6 @@ pub struct BasePreset { #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_upward_multiplier: u64, #[serde(with = "serde_utils::quoted_u64")] - pub safe_slots_to_update_justified: u64, - #[serde(with = "serde_utils::quoted_u64")] pub min_deposit_amount: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance: u64, @@ -90,7 +88,6 @@ impl BasePreset { hysteresis_quotient: spec.hysteresis_quotient, hysteresis_downward_multiplier: spec.hysteresis_downward_multiplier, hysteresis_upward_multiplier: spec.hysteresis_upward_multiplier, - safe_slots_to_update_justified: spec.safe_slots_to_update_justified, min_deposit_amount: spec.min_deposit_amount, max_effective_balance: spec.max_effective_balance, effective_balance_increment: spec.effective_balance_increment, diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 4d3279a7f77..b52adcfe412 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -498,6 +498,7 @@ impl SignedBeaconBlockElectra> { execution_payload: BlindedPayloadElectra { .. }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, }, }, signature, @@ -521,6 +522,7 @@ impl SignedBeaconBlockElectra> { execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, }, }, signature, diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 3c6037e23e3..8cf118eea59 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, DepositData, Epoch, + EthSpec, FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -35,6 +35,34 @@ pub struct Validator { } impl Validator { + #[allow(clippy::arithmetic_side_effects)] + pub fn from_deposit( + deposit_data: &DepositData, + amount: u64, + fork_name: ForkName, + spec: &ChainSpec, + ) -> Self { + let mut validator = Validator { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: 0, + slashed: false, + }; + + let max_effective_balance = validator.get_max_effective_balance(spec, fork_name); + // safe math is unnecessary here since the spec.effecive_balance_increment is never <= 0 + validator.effective_balance = std::cmp::min( + amount - (amount % spec.effective_balance_increment), + max_effective_balance, + ); + + validator + } + /// Returns `true` if the validator is considered active at some epoch. pub fn is_active_at(&self, epoch: Epoch) -> bool { self.activation_epoch <= epoch && epoch < self.exit_epoch @@ -236,7 +264,7 @@ impl Validator { spec: &ChainSpec, current_fork: ForkName, ) -> bool { - let max_effective_balance = self.get_validator_max_effective_balance(spec, current_fork); + let max_effective_balance = self.get_max_effective_balance(spec, current_fork); let has_max_effective_balance = self.effective_balance == max_effective_balance; let has_excess_balance = balance > max_effective_balance; self.has_execution_withdrawal_credential(spec) @@ -251,11 +279,7 @@ impl Validator { } /// Returns the max effective balance for a validator in gwei. - pub fn get_validator_max_effective_balance( - &self, - spec: &ChainSpec, - current_fork: ForkName, - ) -> u64 { + pub fn get_max_effective_balance(&self, spec: &ChainSpec, current_fork: ForkName) -> u64 { if current_fork >= ForkName::Electra { if self.has_compounding_withdrawal_credential(spec) { spec.max_effective_balance_electra @@ -273,7 +297,7 @@ impl Validator { spec: &ChainSpec, current_fork: ForkName, ) -> u64 { - let max_effective_balance = self.get_validator_max_effective_balance(spec, current_fork); + let max_effective_balance = self.get_max_effective_balance(spec, current_fork); std::cmp::min(validator_balance, max_effective_balance) } } diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 4c7c16757ed..153506f47aa 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -41,12 +41,11 @@ impl VoluntaryExit { spec: &ChainSpec, ) -> SignedVoluntaryExit { let fork_name = spec.fork_name_at_epoch(self.epoch); - let fork_version = match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - spec.fork_version_for_name(fork_name) - } + let fork_version = if fork_name.deneb_enabled() { // EIP-7044 - ForkName::Deneb | ForkName::Electra => spec.fork_version_for_name(ForkName::Capella), + spec.fork_version_for_name(ForkName::Capella) + } else { + spec.fork_version_for_name(fork_name) }; let domain = spec.compute_domain(Domain::VoluntaryExit, fork_version, genesis_validators_root); diff --git a/consensus/types/src/withdrawal_request.rs b/consensus/types/src/withdrawal_request.rs index b6db0efb26d..1296426ac05 100644 --- a/consensus/types/src/withdrawal_request.rs +++ b/consensus/types/src/withdrawal_request.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Address, PublicKeyBytes}; use serde::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -27,6 +28,18 @@ pub struct WithdrawalRequest { pub amount: u64, } +impl WithdrawalRequest { + pub fn max_size() -> usize { + Self { + source_address: Address::repeat_byte(0), + validator_pubkey: PublicKeyBytes::empty(), + amount: 0, + } + .as_ssz_bytes() + .len() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index e940fe2e20c..ce55f83639b 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -18,11 +18,11 @@ hex = { workspace = true } ethereum_hashing = { workspace = true } c-kzg = { workspace = true } rust_eth_kzg = { workspace = true } +serde_json = { workspace = true } [dev-dependencies] criterion = { workspace = true } serde_json = { workspace = true } -eth2_network_config = { workspace = true } [[bench]] name = "benchmark" diff --git a/crypto/kzg/benches/benchmark.rs b/crypto/kzg/benches/benchmark.rs index 35e370cd0fd..234e624698e 100644 --- a/crypto/kzg/benches/benchmark.rs +++ b/crypto/kzg/benches/benchmark.rs @@ -1,15 +1,14 @@ use c_kzg::KzgSettings; use criterion::{criterion_group, criterion_main, Criterion}; -use eth2_network_config::TRUSTED_SETUP_BYTES; -use kzg::TrustedSetup; +use kzg::{trusted_setup::get_trusted_setup, TrustedSetup}; use rust_eth_kzg::{DASContext, TrustedSetup as PeerDASTrustedSetup}; pub fn bench_init_context(c: &mut Criterion) { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); - c.bench_function(&format!("Initialize context rust_eth_kzg"), |b| { + c.bench_function("Initialize context rust_eth_kzg", |b| { b.iter(|| { let trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); DASContext::new( @@ -20,11 +19,12 @@ pub fn bench_init_context(c: &mut Criterion) { ) }) }); - c.bench_function(&format!("Initialize context c-kzg (4844)"), |b| { + c.bench_function("Initialize context c-kzg (4844)", |b| { b.iter(|| { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) - .map_err(|e| format!("Unable to read trusted setup file: {}", e)) - .expect("should have trusted setup"); + let trusted_setup: TrustedSetup = + serde_json::from_reader(get_trusted_setup().as_slice()) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); KzgSettings::load_trusted_setup(&trusted_setup.g1_points(), &trusted_setup.g2_points()) .unwrap() }) diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index ebe93934fd7..348ed785af0 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -1,6 +1,6 @@ mod kzg_commitment; mod kzg_proof; -mod trusted_setup; +pub mod trusted_setup; use rust_eth_kzg::{CellIndex, DASContext}; use std::fmt::Debug; @@ -51,18 +51,41 @@ impl From for Error { #[derive(Debug)] pub struct Kzg { trusted_setup: KzgSettings, - context: Option, + context: DASContext, } impl Kzg { + pub fn new_from_trusted_setup_no_precomp(trusted_setup: TrustedSetup) -> Result { + let peerdas_trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); + + let context = DASContext::new(&peerdas_trusted_setup, rust_eth_kzg::UsePrecomp::No); + + Ok(Self { + trusted_setup: KzgSettings::load_trusted_setup( + &trusted_setup.g1_points(), + &trusted_setup.g2_points(), + )?, + context, + }) + } + /// Load the kzg trusted setup parameters from a vec of G1 and G2 points. pub fn new_from_trusted_setup(trusted_setup: TrustedSetup) -> Result { + let peerdas_trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); + + let context = DASContext::new( + &peerdas_trusted_setup, + rust_eth_kzg::UsePrecomp::Yes { + width: rust_eth_kzg::constants::RECOMMENDED_PRECOMP_WIDTH, + }, + ); + Ok(Self { trusted_setup: KzgSettings::load_trusted_setup( &trusted_setup.g1_points(), &trusted_setup.g2_points(), )?, - context: None, + context, }) } @@ -88,12 +111,12 @@ impl Kzg { &trusted_setup.g1_points(), &trusted_setup.g2_points(), )?, - context: Some(context), + context, }) } - fn context(&self) -> Result<&DASContext, Error> { - self.context.as_ref().ok_or(Error::DASContextUninitialized) + fn context(&self) -> &DASContext { + &self.context } /// Compute the kzg proof given a blob and its kzg commitment. @@ -200,7 +223,7 @@ impl Kzg { blob: KzgBlobRef<'_>, ) -> Result { let (cells, proofs) = self - .context()? + .context() .compute_cells_and_kzg_proofs(blob) .map_err(Error::PeerDASKZG)?; @@ -226,7 +249,7 @@ impl Kzg { .iter() .map(|commitment| commitment.as_ref()) .collect(); - let verification_result = self.context()?.verify_cell_kzg_proof_batch( + let verification_result = self.context().verify_cell_kzg_proof_batch( commitments.to_vec(), columns, cells.to_vec(), @@ -247,7 +270,7 @@ impl Kzg { cells: &[CellRef<'_>], ) -> Result { let (cells, proofs) = self - .context()? + .context() .recover_cells_and_kzg_proofs(cell_ids.to_vec(), cells.to_vec()) .map_err(Error::PeerDASKZG)?; diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs index 6ddc33df5ab..f788be265a9 100644 --- a/crypto/kzg/src/trusted_setup.rs +++ b/crypto/kzg/src/trusted_setup.rs @@ -5,6 +5,12 @@ use serde::{ Deserialize, Serialize, }; +pub const TRUSTED_SETUP_BYTES: &[u8] = include_bytes!("../trusted_setup.json"); + +pub fn get_trusted_setup() -> Vec { + TRUSTED_SETUP_BYTES.into() +} + /// Wrapper over a BLS G1 point's byte representation. #[derive(Debug, Clone, PartialEq)] struct G1Point([u8; BYTES_PER_G1_POINT]); diff --git a/common/eth2_network_config/built_in_network_configs/trusted_setup.json b/crypto/kzg/trusted_setup.json similarity index 100% rename from common/eth2_network_config/built_in_network_configs/trusted_setup.json rename to crypto/kzg/trusted_setup.json diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index e30ac1f7653..cc61db8af21 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -315,7 +315,7 @@ pub fn migrate_db( runtime_context: &RuntimeContext, log: Logger, ) -> Result<(), Error> { - let spec = &runtime_context.eth2_config.spec; + let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); let blobs_path = client_config.get_blobs_db_path(); @@ -348,7 +348,7 @@ pub fn migrate_db( from, to, log, - spec, + &spec, ) } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index ec3bb5b9edb..94d95a0d1c4 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -103,7 +103,7 @@ pub fn run( network_config: Eth2NetworkConfig, matches: &ArgMatches, ) -> Result<(), String> { - let spec = &network_config.chain_spec::()?; + let spec = Arc::new(network_config.chain_spec::()?); let executor = env.core_context().executor; /* @@ -137,13 +137,15 @@ pub fn run( (Some(pre_state_path), Some(block_path), None) => { info!("Block path: {:?}", block_path); info!("Pre-state path: {:?}", pre_state_path); - let pre_state = load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?; - let block = load_from_ssz_with(&block_path, spec, SignedBeaconBlock::from_ssz_bytes)?; + let pre_state = + load_from_ssz_with(&pre_state_path, &spec, BeaconState::from_ssz_bytes)?; + let block = load_from_ssz_with(&block_path, &spec, SignedBeaconBlock::from_ssz_bytes)?; (pre_state, None, block) } (None, None, Some(beacon_url)) => { let block_id: BlockId = parse_required(matches, "block-id")?; let client = BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(HTTP_TIMEOUT)); + let inner_spec = spec.clone(); executor .handle() .ok_or("shutdown in progress")? @@ -155,7 +157,7 @@ pub fn run( .ok_or_else(|| format!("Unable to locate block at {:?}", block_id))? .data; - if block.slot() == spec.genesis_slot { + if block.slot() == inner_spec.genesis_slot { return Err("Cannot run on the genesis block".to_string()); } @@ -215,7 +217,7 @@ pub fn run( if config.exclude_cache_builds { pre_state - .build_all_caches(spec) + .build_all_caches(&spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; let state_root = pre_state .update_tree_hash_cache() @@ -251,7 +253,7 @@ pub fn run( &config, &validator_pubkey_cache, &mut saved_ctxt, - spec, + &spec, )?; let duration = Instant::now().duration_since(start); diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 967bd047935..b7fbb531bed 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -54,7 +54,7 @@ clap_utils = { workspace = true } eth2_network_config = { workspace = true } lighthouse_version = { workspace = true } account_utils = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index aa2caa23507..89d759d6629 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -333,7 +333,7 @@ impl EnvironmentBuilder { eth2_network_config: Eth2NetworkConfig, ) -> Result { // Create a new chain spec from the default configuration. - self.eth2_config.spec = eth2_network_config.chain_spec::()?; + self.eth2_config.spec = eth2_network_config.chain_spec::()?.into(); self.eth2_network_config = Some(eth2_network_config); Ok(self) diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 84e8274f06e..34e42a61f67 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -102,4 +102,5 @@ ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index e865fbd272e..e33e4cb9b81 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -115,16 +115,6 @@ fn main() { .global(true) .display_order(0), ) - .arg( - Arg::new("env_log") - .short('l') - .help( - "DEPRECATED Enables environment logging giving access to sub-protocol logs such as discv5 and libp2p", - ) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("logfile") .long("logfile") @@ -333,57 +323,43 @@ fn main() { Arg::new("terminal-total-difficulty-override") .long("terminal-total-difficulty-override") .value_name("INTEGER") - .help("Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. \ - Accepts a 256-bit decimal integer (not a hex value). \ - This flag should only be used if the user has a clear understanding that \ - the broad Ethereum community has elected to override the terminal difficulty. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("terminal-block-hash-override") .long("terminal-block-hash-override") .value_name("TERMINAL_BLOCK_HASH") - .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ - This flag should only be used if the user has a clear understanding that \ - the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .requires("terminal-block-hash-epoch-override") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("terminal-block-hash-epoch-override") .long("terminal-block-hash-epoch-override") .value_name("EPOCH") - .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ - parameter. This flag should only be used if the user has a clear understanding \ - that the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .requires("terminal-block-hash-override") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("safe-slots-to-import-optimistically") .long("safe-slots-to-import-optimistically") .value_name("INTEGER") - .help("Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY \ - parameter. This flag should only be used if the user has a clear understanding \ - that the broad Ethereum community has elected to override this parameter in the event \ - of an attack at the PoS transition block. Incorrect use of this flag can cause your \ - node to possibly accept an invalid chain or sync more slowly. Be extremely careful with \ - this flag.") + .help("DEPRECATED") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("genesis-state-url") @@ -626,20 +602,6 @@ fn run( })); } - let mut tracing_log_path: Option = clap_utils::parse_optional(matches, "logfile")?; - - if tracing_log_path.is_none() { - tracing_log_path = Some( - parse_path_or_default(matches, "datadir")? - .join(DEFAULT_BEACON_NODE_DIR) - .join("logs"), - ) - } - - let path = tracing_log_path.clone().unwrap(); - - logging::create_tracing_layer(path); - // Allow Prometheus to export the time at which the process was started. metrics::expose_process_start_time(&log); @@ -655,6 +617,20 @@ fn run( ); } + // Warn for DEPRECATED global flags. This code should be removed when we finish deleting these + // flags. + let deprecated_flags = [ + "terminal-total-difficulty-override", + "terminal-block-hash-override", + "terminal-block-hash-epoch-override", + "safe-slots-to-import-optimistically", + ]; + for flag in deprecated_flags { + if matches.get_one::(flag).is_some() { + slog::warn!(log, "The {} flag is deprecated and does nothing", flag); + } + } + // Note: the current code technically allows for starting a beacon node _and_ a validator // client at the same time. // @@ -724,6 +700,21 @@ fn run( return Ok(()); } + let mut tracing_log_path: Option = + clap_utils::parse_optional(matches, "logfile")?; + + if tracing_log_path.is_none() { + tracing_log_path = Some( + parse_path_or_default(matches, "datadir")? + .join(DEFAULT_BEACON_NODE_DIR) + .join("logs"), + ) + } + + let path = tracing_log_path.clone().unwrap(); + + logging::create_tracing_layer(path); + executor.clone().spawn( async move { if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { diff --git a/lighthouse/src/metrics.rs b/lighthouse/src/metrics.rs index 0002b43e7b9..30e0120582a 100644 --- a/lighthouse/src/metrics.rs +++ b/lighthouse/src/metrics.rs @@ -1,5 +1,5 @@ -pub use lighthouse_metrics::*; use lighthouse_version::VERSION; +pub use metrics::*; use slog::{error, Logger}; use std::sync::LazyLock; use std::time::{SystemTime, UNIX_EPOCH}; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4899779ad88..2d2e1b8be2e 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -22,7 +22,7 @@ use std::string::ToString; use std::time::Duration; use tempfile::TempDir; use types::non_zero_usize::new_non_zero_usize; -use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, Hash256, MainnetEthSpec}; +use types::{Address, Checkpoint, Epoch, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -160,13 +160,6 @@ fn max_skip_slots_flag() { .with_config(|config| assert_eq!(config.chain.import_max_skip_slots, Some(10))); } -#[test] -fn disable_lock_timeouts_flag() { - CommandLineTest::new() - .flag("disable-lock-timeouts", None) - .run_with_zero_port(); -} - #[test] fn shuffling_cache_default() { CommandLineTest::new() @@ -750,61 +743,30 @@ fn jwt_optional_flags() { fn jwt_optional_alias_flags() { run_jwt_optional_flags_test("jwt-secrets", "jwt-id", "jwt-version"); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn terminal_total_difficulty_override_flag() { - use beacon_node::beacon_chain::types::Uint256; CommandLineTest::new() .flag("terminal-total-difficulty-override", Some("1337424242")) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!(spec.terminal_total_difficulty, Uint256::from(1337424242)) - }); + .run_with_zero_port(); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn terminal_block_hash_and_activation_epoch_override_flags() { CommandLineTest::new() .flag("terminal-block-hash-epoch-override", Some("1337")) - .flag( - "terminal-block-hash-override", - Some("0x4242424242424242424242424242424242424242424242424242424242424242"), - ) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!( - spec.terminal_block_hash, - ExecutionBlockHash::from_str( - "0x4242424242424242424242424242424242424242424242424242424242424242" - ) - .unwrap() - ); - assert_eq!(spec.terminal_block_hash_activation_epoch, 1337); - }); -} -#[test] -#[should_panic] -fn terminal_block_hash_missing_activation_epoch() { - CommandLineTest::new() .flag( "terminal-block-hash-override", Some("0x4242424242424242424242424242424242424242424242424242424242424242"), ) .run_with_zero_port(); } -#[test] -#[should_panic] -fn epoch_override_missing_terminal_block_hash() { - CommandLineTest::new() - .flag("terminal-block-hash-epoch-override", Some("1337")) - .run_with_zero_port(); -} +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn safe_slots_to_import_optimistically_flag() { CommandLineTest::new() .flag("safe-slots-to-import-optimistically", Some("421337")) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!(spec.safe_slots_to_import_optimistically, 421337) - }); + .run_with_zero_port(); } // Tests for Network flags. @@ -1613,19 +1575,6 @@ fn http_port_flag() { .run() .with_config(|config| assert_eq!(config.http_api.listen_port, port1)); } -#[test] -fn empty_self_limiter_flag() { - // Test that empty rate limiter is accepted using the default rate limiting configurations. - CommandLineTest::new() - .flag("self-limiter", None) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.network.outbound_rate_limiter_config, - Some(lighthouse_network::rpc::config::OutboundRateLimiterConfig::default()) - ) - }); -} #[test] fn empty_inbound_rate_limiter_flag() { @@ -1668,14 +1617,6 @@ fn http_allow_origin_all_flag() { .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); } -#[test] -fn http_allow_sync_stalled_flag() { - CommandLineTest::new() - .flag("http", None) - .flag("http-allow-sync-stalled", None) - .run_with_zero_port(); -} - #[test] fn http_enable_beacon_processor() { CommandLineTest::new() @@ -1714,22 +1655,6 @@ fn http_tls_flags() { }); } -#[test] -fn http_spec_fork_default() { - CommandLineTest::new() - .flag("http", None) - .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.spec_fork_name, None)); -} - -#[test] -fn http_spec_fork_override() { - CommandLineTest::new() - .flag("http", None) - .flag("http-spec-fork", Some("altair")) - .run_with_zero_port(); -} - // Tests for Metrics flags. #[test] fn metrics_flag() { @@ -2632,14 +2557,6 @@ fn invalid_gossip_verified_blocks_path() { }); } -#[test] -fn progressive_balances_checked() { - // Flag is deprecated but supplying it should not crash until we remove it completely. - CommandLineTest::new() - .flag("progressive-balances", Some("checked")) - .run_with_zero_port(); -} - #[test] fn beacon_processor() { CommandLineTest::new() diff --git a/lighthouse/tests/exec.rs b/lighthouse/tests/exec.rs index 9d6453908c8..5379912c131 100644 --- a/lighthouse/tests/exec.rs +++ b/lighthouse/tests/exec.rs @@ -140,11 +140,6 @@ impl CompletedTest { func(&self.config); } - pub fn with_spec(self, func: F) { - let spec = ChainSpec::from_config::(&self.chain_config).unwrap(); - func(spec); - } - pub fn with_config_and_dir(self, func: F) { func(&self.config, &self.dir); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index cb16ca4792c..147a371f0eb 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -1,4 +1,6 @@ -use validator_client::{config::DEFAULT_WEB3SIGNER_KEEP_ALIVE, ApiTopic, Config}; +use validator_client::{ + config::DEFAULT_WEB3SIGNER_KEEP_ALIVE, ApiTopic, BeaconNodeSyncDistanceTiers, Config, +}; use crate::exec::CommandLineTestExec; use bls::{Keypair, PublicKeyBytes}; @@ -12,7 +14,7 @@ use std::str::FromStr; use std::string::ToString; use std::time::Duration; use tempfile::TempDir; -use types::Address; +use types::{Address, Slot}; /// Returns the `lighthouse validator_client` command. fn base_cmd() -> Command { @@ -424,13 +426,6 @@ fn no_doppelganger_protection_flag() { .with_config(|config| assert!(!config.enable_doppelganger_protection)); } -#[test] -fn produce_block_v3_flag() { - // The flag is DEPRECATED but providing it should not trigger an error. - // We can delete this test when deleting the flag entirely. - CommandLineTest::new().flag("produce-block-v3", None).run(); -} - #[test] fn no_gas_limit_flag() { CommandLineTest::new() @@ -512,24 +507,6 @@ fn monitoring_endpoint() { }); } -#[test] -fn disable_run_on_all_flag() { - CommandLineTest::new() - .flag("disable-run-on-all", None) - .run() - .with_config(|config| { - assert_eq!(config.broadcast_topics, vec![]); - }); - // --broadcast flag takes precedence - CommandLineTest::new() - .flag("disable-run-on-all", None) - .flag("broadcast", Some("attestations")) - .run() - .with_config(|config| { - assert_eq!(config.broadcast_topics, vec![ApiTopic::Attestations]); - }); -} - #[test] fn no_broadcast_flag() { CommandLineTest::new().run().with_config(|config| { @@ -572,6 +549,33 @@ fn broadcast_flag() { }); } +/// Tests for validator fallback flags. +#[test] +fn beacon_nodes_sync_tolerances_flag_default() { + CommandLineTest::new().run().with_config(|config| { + assert_eq!( + config.beacon_node_fallback.sync_tolerances, + BeaconNodeSyncDistanceTiers::default() + ) + }); +} +#[test] +fn beacon_nodes_sync_tolerances_flag() { + CommandLineTest::new() + .flag("beacon-nodes-sync-tolerances", Some("4,4,4")) + .run() + .with_config(|config| { + assert_eq!( + config.beacon_node_fallback.sync_tolerances, + BeaconNodeSyncDistanceTiers { + synced: Slot::new(4), + small: Slot::new(8), + medium: Slot::new(12), + } + ); + }); +} + #[test] #[should_panic(expected = "Unknown API topic")] fn wrong_broadcast_flag() { @@ -595,16 +599,6 @@ fn disable_latency_measurement_service() { assert!(!config.enable_latency_measurement_service); }); } -#[test] -fn latency_measurement_service() { - // This flag is DEPRECATED so has no effect, but should still be accepted. - CommandLineTest::new() - .flag("latency-measurement-service", Some("false")) - .run() - .with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); -} #[test] fn validator_registration_batch_size() { diff --git a/scripts/cli.sh b/scripts/cli.sh index 6ca019b39e9..ef4ed158ad8 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -16,7 +16,7 @@ write_to_file() { printf "# %s\n\n\`\`\`\n%s\n\`\`\`" "$program" "$cmd" > "$file" # Adjust the width of the help text and append to the end of file - sed -i -e '$a\'$'\n''\n''' "$file" + printf "\n\n%s\n" "" >> "$file" } CMD=./target/release/lighthouse @@ -40,7 +40,7 @@ vm_import=./help_vm_import.md vm_move=./help_vm_move.md # create .md files -write_to_file "$general_cli" "$general" "Lighthouse General Commands" +write_to_file "$general_cli" "$general" "Lighthouse CLI Reference" write_to_file "$bn_cli" "$bn" "Beacon Node" write_to_file "$vc_cli" "$vc" "Validator Client" write_to_file "$vm_cli" "$vm" "Validator Manager" diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 0275cb217f8..ca701eb7e91 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -9,7 +9,7 @@ This setup can be useful for testing and development. 1. Install [Kurtosis](https://docs.kurtosis.com/install/). Verify that Kurtosis has been successfully installed by running `kurtosis version` which should display the version. -1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `sudo apt install yq -y`. +1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. ## Starting the testnet @@ -82,4 +82,4 @@ The script comes with some CLI options, which can be viewed with `./start_local_ ```bash ./start_local_testnet.sh -b false -``` \ No newline at end of file +``` diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index f90132764e4..1f156886931 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -7,7 +7,7 @@ set -Eeuo pipefail SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" ENCLAVE_NAME=local-testnet NETWORK_PARAMS_FILE=$SCRIPT_DIR/network_params.yaml -ETHEREUM_PKG_VERSION=4.2.0 +ETHEREUM_PKG_VERSION=main BUILD_IMAGE=true BUILDER_PROPOSALS=false diff --git a/scripts/local_testnet/stop_local_testnet.sh b/scripts/local_testnet/stop_local_testnet.sh index 5500f8d5a04..6af1989e9ff 100755 --- a/scripts/local_testnet/stop_local_testnet.sh +++ b/scripts/local_testnet/stop_local_testnet.sh @@ -12,4 +12,5 @@ kurtosis enclave dump $ENCLAVE_NAME $LOGS_SUBDIR echo "Local testnet logs stored to $LOGS_SUBDIR." kurtosis enclave rm -f $ENCLAVE_NAME +kurtosis engine stop echo "Local testnet stopped." diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index d74b0ac062a..56a023df0bb 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -18,7 +18,7 @@ derivative = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } filesystem = { workspace = true } lru = { workspace = true } parking_lot = { workspace = true } @@ -37,7 +37,7 @@ mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } -redb = { version = "2.1", optional = true } +redb = { version = "2.1.4", optional = true } [dev-dependencies] maplit = { workspace = true } diff --git a/slasher/src/database/redb_impl.rs b/slasher/src/database/redb_impl.rs index 6c5b62a44fd..12bef711484 100644 --- a/slasher/src/database/redb_impl.rs +++ b/slasher/src/database/redb_impl.rs @@ -164,13 +164,9 @@ impl<'env> Cursor<'env> { let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(&self.db.table_name); let table = self.txn.open_table(table_definition)?; - let first = table - .iter()? - .next() - .map(|x| x.map(|(key, _)| key.value().to_vec())); + let first = table.first()?.map(|(key, _)| key.value().to_vec()); if let Some(owned_key) = first { - let owned_key = owned_key?; self.current_key = Some(Cow::from(owned_key)); Ok(self.current_key.clone()) } else { @@ -182,13 +178,9 @@ impl<'env> Cursor<'env> { let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(&self.db.table_name); let table = self.txn.open_table(table_definition)?; - let last = table - .iter()? - .next_back() - .map(|x| x.map(|(key, _)| key.value().to_vec())); + let last = table.last()?.map(|(key, _)| key.value().to_vec()); if let Some(owned_key) = last { - let owned_key = owned_key?; self.current_key = Some(Cow::from(owned_key)); return Ok(self.current_key.clone()); } diff --git a/slasher/src/metrics.rs b/slasher/src/metrics.rs index 2e49bd4aeba..cfeec2d74ed 100644 --- a/slasher/src/metrics.rs +++ b/slasher/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static SLASHER_DATABASE_SIZE: LazyLock> = LazyLock::new(|| { diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 0aa5f1d38db..390711079f4 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.5 +TESTS_TAG := v1.5.0-alpha.6 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 9495047e7f9..dacca204c19 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -25,6 +25,8 @@ # Intentionally omitted, as per https://github.com/sigp/lighthouse/issues/1835 "tests/.*/.*/ssz_static/Eth1Block/", "tests/.*/.*/ssz_static/PowBlock/", + # We no longer implement merge logic. + "tests/.*/bellatrix/fork_choice/on_merge_block", # light_client "tests/.*/.*/light_client/single_merkle_proof", "tests/.*/.*/light_client/sync", @@ -46,11 +48,6 @@ "tests/.*/eip6110", "tests/.*/whisk", "tests/.*/eip7594", - # TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - "tests/.*/electra/ssz_static/LightClientUpdate", - "tests/.*/electra/ssz_static/LightClientFinalityUpdate", - "tests/.*/electra/ssz_static/LightClientBootstrap", - "tests/.*/electra/merkle_proof", ] diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 2a2cc067e58..8d933a6fcd5 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -350,11 +350,12 @@ impl Case for ForkChoiceTest { /// A testing rig used to execute a test case. struct Tester { harness: BeaconChainHarness>, - spec: ChainSpec, + spec: Arc, } impl Tester { pub fn new(case: &ForkChoiceTest, spec: ChainSpec) -> Result { + let spec = Arc::new(spec); let genesis_time = case.anchor_state.genesis_time(); if case.anchor_state.slot() != spec.genesis_slot { @@ -504,8 +505,8 @@ impl Tester { } Err(_) => GossipVerifiedBlob::__assumed_valid(blob_sidecar), }; - let result = - self.block_on_dangerous(self.harness.chain.process_gossip_blob(blob))?; + let result = self + .block_on_dangerous(self.harness.chain.process_gossip_blob(blob, || Ok(())))?; if valid { assert!(result.is_ok()); } diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs index f9b3009fded..3dc955bdcc8 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::validate_blob; -use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::trusted_setup::get_trusted_setup; use kzg::{Cell, Error as KzgError, Kzg, KzgCommitment, KzgProof, TrustedSetup}; use serde::Deserialize; use std::marker::PhantomData; @@ -10,7 +10,7 @@ use std::sync::LazyLock; use types::Blob; static KZG: LazyLock> = LazyLock::new(|| { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| Error::InternalError(format!("Failed to initialize trusted setup: {:?}", e))) .expect("failed to initialize trusted setup"); let kzg = Kzg::new_from_trusted_setup_das_enabled(trusted_setup) diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index b68bbdc5d39..49c07197848 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -3,8 +3,8 @@ use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use tree_hash::Hash256; use types::{ - BeaconBlockBody, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, FixedVector, - FullPayload, Unsigned, + light_client_update, BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, + BeaconBlockBodyElectra, BeaconState, FixedVector, FullPayload, Unsigned, }; #[derive(Debug, Clone, Deserialize)] @@ -22,13 +22,13 @@ pub struct MerkleProof { #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] -pub struct MerkleProofValidity { +pub struct BeaconStateMerkleProofValidity { pub metadata: Option, pub state: BeaconState, pub merkle_proof: MerkleProof, } -impl LoadCase for MerkleProofValidity { +impl LoadCase for BeaconStateMerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); let state = ssz_decode_state(&path.join("object.ssz_snappy"), spec)?; @@ -49,11 +49,30 @@ impl LoadCase for MerkleProofValidity { } } -impl Case for MerkleProofValidity { +impl Case for BeaconStateMerkleProofValidity { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut state = self.state.clone(); state.update_tree_hash_cache().unwrap(); - let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else { + + let proof = match self.merkle_proof.leaf_index { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + | light_client_update::CURRENT_SYNC_COMMITTEE_INDEX => { + state.compute_current_sync_committee_proof() + } + light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { + state.compute_next_sync_committee_proof() + } + light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + | light_client_update::FINALIZED_ROOT_INDEX => state.compute_finalized_root_proof(), + _ => { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof, invalid index".to_string(), + )); + } + }; + + let Ok(proof) = proof else { return Err(Error::FailedToParseTest( "Could not retrieve merkle proof".to_string(), )); @@ -198,3 +217,81 @@ impl Case for KzgInclusionMerkleProofValidity { } } } + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct BeaconBlockBodyMerkleProofValidity { + pub metadata: Option, + pub block_body: BeaconBlockBody>, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for BeaconBlockBodyMerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let block_body: BeaconBlockBody> = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix => { + return Err(Error::InternalError(format!( + "Beacon block body merkle proof validity test skipped for {:?}", + fork_name + ))) + } + ForkName::Capella => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + .into() + } + ForkName::Deneb => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))?.into() + } + ForkName::Electra => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + .into() + } + }; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + Ok(Self { + metadata, + block_body, + merkle_proof, + }) + } +} + +impl Case for BeaconBlockBodyMerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let binding = self.block_body.clone(); + let block_body = binding.to_ref(); + let Ok(proof) = block_body.block_body_merkle_proof(self.merkle_proof.leaf_index) else { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )); + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merke proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + + Ok(()) + } +} diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 24184441047..54ca52447f4 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -270,7 +270,7 @@ impl Operation for SyncAggregate { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name != ForkName::Base + fork_name.altair_enabled() } fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index dacaba1dcab..f4a09de32cb 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -627,8 +627,8 @@ impl Handler for ForkChoiceHandler { } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - // Merge block tests are only enabled for Bellatrix. - if self.handler_name == "on_merge_block" && fork_name != ForkName::Bellatrix { + // We no longer run on_merge_block tests since removing merge support. + if self.handler_name == "on_merge_block" { return false; } @@ -921,10 +921,10 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { #[derive(Derivative)] #[derivative(Default(bound = ""))] -pub struct MerkleProofValidityHandler(PhantomData); +pub struct BeaconStateMerkleProofValidityHandler(PhantomData); -impl Handler for MerkleProofValidityHandler { - type Case = cases::MerkleProofValidity; +impl Handler for BeaconStateMerkleProofValidityHandler { + type Case = cases::BeaconStateMerkleProofValidity; fn config_name() -> &'static str { E::name() @@ -935,15 +935,11 @@ impl Handler for MerkleProofValidityHandler { } fn handler_name(&self) -> String { - "single_merkle_proof".into() + "single_merkle_proof/BeaconState".into() } - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { - // Test is skipped due to some changes in the Capella light client - // spec. - // - // https://github.com/sigp/lighthouse/issues/4022 - false + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name.altair_enabled() } } @@ -968,7 +964,31 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { // Enabled in Deneb - fork_name == ForkName::Deneb + fork_name.deneb_enabled() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct BeaconBlockBodyMerkleProofValidityHandler(PhantomData); + +impl Handler for BeaconBlockBodyMerkleProofValidityHandler { + type Case = cases::BeaconBlockBodyMerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "single_merkle_proof/BeaconBlockBody".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name.capella_enabled() } } @@ -993,8 +1013,7 @@ impl Handler for LightClientUpdateHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Enabled in Altair - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - fork_name != ForkName::Base && fork_name != ForkName::Electra + fork_name.altair_enabled() } } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 49de073d6ae..a9322e5dd5e 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -80,6 +80,7 @@ type_name_generic!(ExecutionPayloadHeaderBellatrix, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderDeneb, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderElectra, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionRequests); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index a677736d519..3f802d89447 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -396,11 +396,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only() - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only() - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only() + .run(); } // LightClientHeader has no internal indicator of which fork it is for, so we test it separately. @@ -476,13 +475,12 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::deneb_only( ) .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only( - // ) - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only( - // ) - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } // LightClientUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -506,13 +504,12 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only( - // ) - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only( - // ) - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } #[test] @@ -679,6 +676,14 @@ mod ssz_static { SszStaticHandler::::electra_and_later().run(); SszStaticHandler::::electra_and_later().run(); } + + #[test] + fn execution_requests() { + SszStaticHandler::, MainnetEthSpec>::electra_and_later() + .run(); + SszStaticHandler::, MinimalEthSpec>::electra_and_later() + .run(); + } } #[test] @@ -818,12 +823,6 @@ fn fork_choice_on_block() { ForkChoiceHandler::::new("on_block").run(); } -#[test] -fn fork_choice_on_merge_block() { - ForkChoiceHandler::::new("on_merge_block").run(); - ForkChoiceHandler::::new("on_merge_block").run(); -} - #[test] fn fork_choice_ex_ante() { ForkChoiceHandler::::new("ex_ante").run(); @@ -920,8 +919,13 @@ fn kzg_recover_cells_and_proofs() { } #[test] -fn merkle_proof_validity() { - MerkleProofValidityHandler::::default().run(); +fn beacon_state_merkle_proof_validity() { + BeaconStateMerkleProofValidityHandler::::default().run(); +} + +#[test] +fn beacon_block_body_merkle_proof_validity() { + BeaconBlockBodyMerkleProofValidityHandler::::default().run(); } #[test] diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index f8769b10e21..7772523284a 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -19,3 +19,4 @@ rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } eth2_network_config = { workspace = true } serde_json = { workspace = true } +kzg = { workspace = true } diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 16badaffc2d..e1cef95cd32 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -11,6 +11,7 @@ use node_test_rig::{ }; use rayon::prelude::*; use std::cmp::max; +use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; @@ -98,7 +99,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { .multi_threaded_tokio_runtime()? .build()?; - let spec = &mut env.eth2_config.spec; + let mut spec = (*env.eth2_config.spec).clone(); let total_validator_count = validators_per_node * node_count; let genesis_delay = GENESIS_DELAY; @@ -117,6 +118,8 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); //spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); + let spec = Arc::new(spec); + env.eth2_config.spec = spec.clone(); let slot_duration = Duration::from_secs(spec.seconds_per_slot); let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 73984aadad7..3859257fb75 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -10,6 +10,7 @@ use node_test_rig::{ }; use rayon::prelude::*; use std::cmp::max; +use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; @@ -28,7 +29,7 @@ const DENEB_FORK_EPOCH: u64 = 2; // This has potential to block CI so it should be set conservatively enough that spurious failures // don't become very common, but not so conservatively that regressions to the fallback mechanism // cannot be detected. -const ACCEPTABLE_FALLBACK_ATTESTATION_HIT_PERCENTAGE: f64 = 85.0; +const ACCEPTABLE_FALLBACK_ATTESTATION_HIT_PERCENTAGE: f64 = 95.0; const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; @@ -105,7 +106,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { .multi_threaded_tokio_runtime()? .build()?; - let spec = &mut env.eth2_config.spec; + let mut spec = (*env.eth2_config.spec).clone(); let total_validator_count = validators_per_vc * vc_count; let node_count = vc_count * bns_per_vc; @@ -122,6 +123,8 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); //spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); + let spec = Arc::new(spec); + env.eth2_config.spec = spec.clone(); let slot_duration = Duration::from_secs(spec.seconds_per_slot); let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index faf3246e0d7..7b9327a7aaa 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,5 +1,5 @@ use crate::checks::epoch_delay; -use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::trusted_setup::get_trusted_setup; use node_test_rig::{ environment::RuntimeContext, eth2::{types::StateId, BeaconNodeHttpClient}, @@ -46,8 +46,8 @@ fn default_client_config(network_params: LocalNetworkParams, genesis_time: u64) beacon_config.chain.enable_light_client_server = true; beacon_config.http_api.enable_light_client_server = true; beacon_config.chain.optimistic_finalized_sync = false; - beacon_config.trusted_setup = - serde_json::from_reader(TRUSTED_SETUP_BYTES).expect("Trusted setup bytes should be valid"); + beacon_config.trusted_setup = serde_json::from_reader(get_trusted_setup().as_slice()) + .expect("Trusted setup bytes should be valid"); let el_config = execution_layer::Config { execution_endpoint: Some( diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index f6ee01a4ba1..3a039d3c803 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -317,7 +317,7 @@ mod tests { validator_definitions: Vec, slashing_protection_config: SlashingProtectionConfig, using_web3signer: bool, - spec: ChainSpec, + spec: Arc, ) -> Self { let log = test_logger(); let validator_dir = TempDir::new().unwrap(); @@ -408,7 +408,7 @@ mod tests { pub async fn new( network: &str, slashing_protection_config: SlashingProtectionConfig, - spec: ChainSpec, + spec: Arc, listen_port: u16, ) -> Self { let signer_rig = @@ -575,7 +575,7 @@ mod tests { /// Test all the "base" (phase 0) types. async fn test_base_types(network: &str, listen_port: u16) { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); - let spec = &network_config.chain_spec::().unwrap(); + let spec = Arc::new(network_config.chain_spec::().unwrap()); TestingRig::new( network, @@ -591,13 +591,16 @@ mod tests { .unwrap() }) .await - .assert_signatures_match("beacon_block_base", |pubkey, validator_store| async move { - let block = BeaconBlock::Base(BeaconBlockBase::empty(spec)); - let block_slot = block.slot(); - validator_store - .sign_block(pubkey, block, block_slot) - .await - .unwrap() + .assert_signatures_match("beacon_block_base", |pubkey, validator_store| { + let spec = spec.clone(); + async move { + let block = BeaconBlock::Base(BeaconBlockBase::empty(&spec)); + let block_slot = block.slot(); + validator_store + .sign_block(pubkey, block, block_slot) + .await + .unwrap() + } }) .await .assert_signatures_match("attestation", |pubkey, validator_store| async move { @@ -645,7 +648,7 @@ mod tests { /// Test all the Altair types. async fn test_altair_types(network: &str, listen_port: u16) { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); - let spec = &network_config.chain_spec::().unwrap(); + let spec = Arc::new(network_config.chain_spec::().unwrap()); let altair_fork_slot = spec .altair_fork_epoch .unwrap() @@ -658,17 +661,17 @@ mod tests { listen_port, ) .await - .assert_signatures_match( - "beacon_block_altair", - |pubkey, validator_store| async move { - let mut altair_block = BeaconBlockAltair::empty(spec); + .assert_signatures_match("beacon_block_altair", |pubkey, validator_store| { + let spec = spec.clone(); + async move { + let mut altair_block = BeaconBlockAltair::empty(&spec); altair_block.slot = altair_fork_slot; validator_store .sign_block(pubkey, BeaconBlock::Altair(altair_block), altair_fork_slot) .await .unwrap() - }, - ) + } + }) .await .assert_signatures_match( "sync_selection_proof", @@ -728,7 +731,7 @@ mod tests { /// Test all the Bellatrix types. async fn test_bellatrix_types(network: &str, listen_port: u16) { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); - let spec = &network_config.chain_spec::().unwrap(); + let spec = Arc::new(network_config.chain_spec::().unwrap()); let bellatrix_fork_slot = spec .bellatrix_fork_epoch .unwrap() @@ -741,10 +744,10 @@ mod tests { listen_port, ) .await - .assert_signatures_match( - "beacon_block_bellatrix", - |pubkey, validator_store| async move { - let mut bellatrix_block = BeaconBlockBellatrix::empty(spec); + .assert_signatures_match("beacon_block_bellatrix", |pubkey, validator_store| { + let spec = spec.clone(); + async move { + let mut bellatrix_block = BeaconBlockBellatrix::empty(&spec); bellatrix_block.slot = bellatrix_fork_slot; validator_store .sign_block( @@ -754,8 +757,8 @@ mod tests { ) .await .unwrap() - }, - ) + } + }) .await; } @@ -767,7 +770,7 @@ mod tests { let network = "mainnet"; let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); - let spec = &network_config.chain_spec::().unwrap(); + let spec = Arc::new(network_config.chain_spec::().unwrap()); let bellatrix_fork_slot = spec .bellatrix_fork_epoch .unwrap() @@ -805,7 +808,7 @@ mod tests { }; let first_block = || { - let mut bellatrix_block = BeaconBlockBellatrix::empty(spec); + let mut bellatrix_block = BeaconBlockBellatrix::empty(&spec); bellatrix_block.slot = bellatrix_fork_slot; BeaconBlock::Bellatrix(bellatrix_block) }; diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index bff40b41d5f..86825a9ee3b 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -10,7 +10,6 @@ path = "src/lib.rs" [dev-dependencies] tokio = { workspace = true } -itertools = { workspace = true } [dependencies] tree_hash = { workspace = true } @@ -49,7 +48,7 @@ ethereum_serde_utils = { workspace = true } libsecp256k1 = { workspace = true } ring = { workspace = true } rand = { workspace = true, features = ["small_rng"] } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } monitoring_api = { workspace = true } sensitive_url = { workspace = true } task_executor = { workspace = true } @@ -60,4 +59,5 @@ sysinfo = { workspace = true } system_health = { path = "../common/system_health" } logging = { workspace = true } strum = { workspace = true } +itertools = { workspace = true } fdlimit = "0.3.0" diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 30fe508a2c2..5363f36f665 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -1,9 +1,8 @@ -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use crate::{ duties_service::{DutiesService, DutyAndProof}, http_metrics::metrics, validator_store::{Error as ValidatorStoreError, ValidatorStore}, - OfflineOnFailure, }; use environment::RuntimeContext; use futures::future::join_all; @@ -339,21 +338,17 @@ impl AttestationService { let attestation_data = self .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS_HTTP_GET], - ); - beacon_node - .get_validator_attestation_data(slot, committee_index) - .await - .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) - .map(|result| result.data) - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::ATTESTATIONS_HTTP_GET], + ); + beacon_node + .get_validator_attestation_data(slot, committee_index) + .await + .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) + .map(|result| result.data) + }) .await .map_err(|e| e.to_string())?; @@ -458,26 +453,21 @@ impl AttestationService { // Post the attestations to the BN. match self .beacon_nodes - .request( - RequireSynced::No, - OfflineOnFailure::Yes, - ApiTopic::Attestations, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS_HTTP_POST], - ); - if fork_name.electra_enabled() { - beacon_node - .post_beacon_pool_attestations_v2(attestations, fork_name) - .await - } else { - beacon_node - .post_beacon_pool_attestations_v1(attestations) - .await - } - }, - ) + .request(ApiTopic::Attestations, |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::ATTESTATIONS_HTTP_POST], + ); + if fork_name.electra_enabled() { + beacon_node + .post_beacon_pool_attestations_v2(attestations, fork_name) + .await + } else { + beacon_node + .post_beacon_pool_attestations_v1(attestations) + .await + } + }) .await { Ok(()) => info!( @@ -540,46 +530,38 @@ impl AttestationService { let aggregated_attestation = &self .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES_HTTP_GET], - ); - if fork_name.electra_enabled() { - beacon_node - .get_validator_aggregate_attestation_v2( - attestation_data.slot, - attestation_data.tree_hash_root(), - committee_index, - ) - .await - .map_err(|e| { - format!("Failed to produce an aggregate attestation: {:?}", e) - })? - .ok_or_else(|| { - format!("No aggregate available for {:?}", attestation_data) - }) - .map(|result| result.data) - } else { - beacon_node - .get_validator_aggregate_attestation_v1( - attestation_data.slot, - attestation_data.tree_hash_root(), - ) - .await - .map_err(|e| { - format!("Failed to produce an aggregate attestation: {:?}", e) - })? - .ok_or_else(|| { - format!("No aggregate available for {:?}", attestation_data) - }) - .map(|result| result.data) - } - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::AGGREGATES_HTTP_GET], + ); + if fork_name.electra_enabled() { + beacon_node + .get_validator_aggregate_attestation_v2( + attestation_data.slot, + attestation_data.tree_hash_root(), + committee_index, + ) + .await + .map_err(|e| { + format!("Failed to produce an aggregate attestation: {:?}", e) + })? + .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data)) + .map(|result| result.data) + } else { + beacon_node + .get_validator_aggregate_attestation_v1( + attestation_data.slot, + attestation_data.tree_hash_root(), + ) + .await + .map_err(|e| { + format!("Failed to produce an aggregate attestation: {:?}", e) + })? + .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data)) + .map(|result| result.data) + } + }) .await .map_err(|e| e.to_string())?; @@ -637,30 +619,26 @@ impl AttestationService { let signed_aggregate_and_proofs_slice = signed_aggregate_and_proofs.as_slice(); match self .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES_HTTP_POST], - ); - if fork_name.electra_enabled() { - beacon_node - .post_validator_aggregate_and_proof_v2( - signed_aggregate_and_proofs_slice, - fork_name, - ) - .await - } else { - beacon_node - .post_validator_aggregate_and_proof_v1( - signed_aggregate_and_proofs_slice, - ) - .await - } - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::AGGREGATES_HTTP_POST], + ); + if fork_name.electra_enabled() { + beacon_node + .post_validator_aggregate_and_proof_v2( + signed_aggregate_and_proofs_slice, + fork_name, + ) + .await + } else { + beacon_node + .post_validator_aggregate_and_proof_v1( + signed_aggregate_and_proofs_slice, + ) + .await + } + }) .await { Ok(()) => { diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 58d7f9d8eef..e5fe419983a 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -2,14 +2,19 @@ //! "fallback" behaviour; it will try a request on all of the nodes until one or none of them //! succeed. -use crate::check_synced::check_synced; +use crate::beacon_node_health::{ + BeaconNodeHealth, BeaconNodeSyncDistanceTiers, ExecutionEngineHealth, IsOptimistic, + SyncDistanceTier, +}; +use crate::check_synced::check_node_health; use crate::http_metrics::metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_REQUESTS}; use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use futures::future; -use serde::{Deserialize, Serialize}; -use slog::{debug, error, info, warn, Logger}; +use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; +use std::cmp::Ordering; use std::fmt; use std::fmt::Debug; use std::future::Future; @@ -18,7 +23,7 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use strum::{EnumString, EnumVariantNames}; use tokio::{sync::RwLock, time::sleep}; -use types::{ChainSpec, Config, EthSpec}; +use types::{ChainSpec, Config as ConfigSpec, EthSpec, Slot}; /// Message emitted when the VC detects the BN is using a different spec. const UPDATE_REQUIRED_LOG_HINT: &str = "this VC or the remote BN may need updating"; @@ -32,6 +37,16 @@ const UPDATE_REQUIRED_LOG_HINT: &str = "this VC or the remote BN may need updati /// having the correct nodes up and running prior to the start of the slot. const SLOT_LOOKAHEAD: Duration = Duration::from_secs(2); +/// If the beacon node slot_clock is within 1 slot, this is deemed acceptable. Otherwise the node +/// will be marked as CandidateError::TimeDiscrepancy. +const FUTURE_SLOT_TOLERANCE: Slot = Slot::new(1); + +// Configuration for the Beacon Node fallback. +#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize)] +pub struct Config { + pub sync_tolerances: BeaconNodeSyncDistanceTiers, +} + /// Indicates a measurement of latency between the VC and a BN. pub struct LatencyMeasurement { /// An identifier for the beacon node (e.g. the URL). @@ -76,34 +91,8 @@ pub fn start_fallback_updater_service( Ok(()) } -/// Indicates if a beacon node must be synced before some action is performed on it. -#[derive(PartialEq, Clone, Copy)] -pub enum RequireSynced { - Yes, - No, -} - -/// Indicates if a beacon node should be set to `Offline` if a request fails. -#[derive(PartialEq, Clone, Copy)] -pub enum OfflineOnFailure { - Yes, - No, -} - -impl PartialEq for RequireSynced { - fn eq(&self, other: &bool) -> bool { - if *other { - *self == RequireSynced::Yes - } else { - *self == RequireSynced::No - } - } -} - #[derive(Debug)] pub enum Error { - /// The node was unavailable and we didn't attempt to contact it. - Unavailable(CandidateError), /// We attempted to contact the node but it failed. RequestFailed(T), } @@ -112,7 +101,6 @@ impl Error { pub fn request_failure(&self) -> Option<&T> { match self { Error::RequestFailed(e) => Some(e), - _ => None, } } } @@ -141,106 +129,159 @@ impl Errors { } /// Reasons why a candidate might not be ready. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq, Deserialize, Serialize)] pub enum CandidateError { + PreGenesis, Uninitialized, Offline, Incompatible, - NotSynced, + TimeDiscrepancy, +} + +impl std::fmt::Display for CandidateError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CandidateError::PreGenesis => write!(f, "PreGenesis"), + CandidateError::Uninitialized => write!(f, "Uninitialized"), + CandidateError::Offline => write!(f, "Offline"), + CandidateError::Incompatible => write!(f, "Incompatible"), + CandidateError::TimeDiscrepancy => write!(f, "TimeDiscrepancy"), + } + } +} + +#[derive(Debug, Clone, Deserialize)] +pub struct CandidateInfo { + pub index: usize, + pub endpoint: String, + pub health: Result, +} + +impl Serialize for CandidateInfo { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_struct("CandidateInfo", 2)?; + + state.serialize_field("index", &self.index)?; + state.serialize_field("endpoint", &self.endpoint)?; + + // Serialize either the health or the error field based on the Result + match &self.health { + Ok(health) => { + state.serialize_field("health", health)?; + } + Err(e) => { + state.serialize_field("error", &e.to_string())?; + } + } + + state.end() + } } /// Represents a `BeaconNodeHttpClient` inside a `BeaconNodeFallback` that may or may not be used /// for a query. +#[derive(Clone, Debug)] pub struct CandidateBeaconNode { - beacon_node: BeaconNodeHttpClient, - status: RwLock>, + pub index: usize, + pub beacon_node: BeaconNodeHttpClient, + pub health: Arc>>, _phantom: PhantomData, } +impl PartialEq for CandidateBeaconNode { + fn eq(&self, other: &Self) -> bool { + self.index == other.index && self.beacon_node == other.beacon_node + } +} + +impl Eq for CandidateBeaconNode {} + impl CandidateBeaconNode { /// Instantiate a new node. - pub fn new(beacon_node: BeaconNodeHttpClient) -> Self { + pub fn new(beacon_node: BeaconNodeHttpClient, index: usize) -> Self { Self { + index, beacon_node, - status: RwLock::new(Err(CandidateError::Uninitialized)), + health: Arc::new(RwLock::new(Err(CandidateError::Uninitialized))), _phantom: PhantomData, } } - /// Returns the status of `self`. - /// - /// If `RequiredSynced::No`, any `NotSynced` node will be ignored and mapped to `Ok(())`. - pub async fn status(&self, synced: RequireSynced) -> Result<(), CandidateError> { - match *self.status.read().await { - Err(CandidateError::NotSynced) if synced == false => Ok(()), - other => other, - } - } - - /// Indicate that `self` is offline. - pub async fn set_offline(&self) { - *self.status.write().await = Err(CandidateError::Offline) + /// Returns the health of `self`. + pub async fn health(&self) -> Result { + *self.health.read().await } - /// Perform some queries against the node to determine if it is a good candidate, updating - /// `self.status` and returning that result. - pub async fn refresh_status( + pub async fn refresh_health( &self, + distance_tiers: &BeaconNodeSyncDistanceTiers, slot_clock: Option<&T>, spec: &ChainSpec, log: &Logger, ) -> Result<(), CandidateError> { - let previous_status = self.status(RequireSynced::Yes).await; - let was_offline = matches!(previous_status, Err(CandidateError::Offline)); - - let new_status = if let Err(e) = self.is_online(was_offline, log).await { - Err(e) - } else if let Err(e) = self.is_compatible(spec, log).await { - Err(e) - } else if let Err(e) = self.is_synced(slot_clock, log).await { - Err(e) - } else { - Ok(()) - }; - - // In case of concurrent use, the latest value will always be used. It's possible that a - // long time out might over-ride a recent successful response, leading to a falsely-offline - // status. I deem this edge-case acceptable in return for the concurrency benefits of not - // holding a write-lock whilst we check the online status of the node. - *self.status.write().await = new_status; - - new_status - } + if let Err(e) = self.is_compatible(spec, log).await { + *self.health.write().await = Err(e); + return Err(e); + } - /// Checks if the node is reachable. - async fn is_online(&self, was_offline: bool, log: &Logger) -> Result<(), CandidateError> { - let result = self - .beacon_node - .get_node_version() - .await - .map(|body| body.data.version); - - match result { - Ok(version) => { - if was_offline { - info!( - log, - "Connected to beacon node"; - "version" => version, - "endpoint" => %self.beacon_node, + if let Some(slot_clock) = slot_clock { + match check_node_health(&self.beacon_node, log).await { + Ok((head, is_optimistic, el_offline)) => { + let Some(slot_clock_head) = slot_clock.now() else { + let e = match slot_clock.is_prior_to_genesis() { + Some(true) => CandidateError::PreGenesis, + _ => CandidateError::Uninitialized, + }; + *self.health.write().await = Err(e); + return Err(e); + }; + + if head > slot_clock_head + FUTURE_SLOT_TOLERANCE { + let e = CandidateError::TimeDiscrepancy; + *self.health.write().await = Err(e); + return Err(e); + } + let sync_distance = slot_clock_head.saturating_sub(head); + + // Currently ExecutionEngineHealth is solely determined by online status. + let execution_status = if el_offline { + ExecutionEngineHealth::Unhealthy + } else { + ExecutionEngineHealth::Healthy + }; + + let optimistic_status = if is_optimistic { + IsOptimistic::Yes + } else { + IsOptimistic::No + }; + + let new_health = BeaconNodeHealth::from_status( + self.index, + sync_distance, + head, + optimistic_status, + execution_status, + distance_tiers, ); + + *self.health.write().await = Ok(new_health); + Ok(()) + } + Err(e) => { + // Set the health as `Err` which is sorted last in the list. + *self.health.write().await = Err(e); + Err(e) } - Ok(()) - } - Err(e) => { - warn!( - log, - "Offline beacon node"; - "error" => %e, - "endpoint" => %self.beacon_node, - ); - Err(CandidateError::Offline) } + } else { + // Slot clock will only be `None` at startup. + let e = CandidateError::Uninitialized; + *self.health.write().await = Err(e); + Err(e) } } @@ -248,7 +289,7 @@ impl CandidateBeaconNode { async fn is_compatible(&self, spec: &ChainSpec, log: &Logger) -> Result<(), CandidateError> { let config = self .beacon_node - .get_config_spec::() + .get_config_spec::() .await .map_err(|e| { error!( @@ -324,42 +365,33 @@ impl CandidateBeaconNode { Ok(()) } - - /// Checks if the beacon node is synced. - async fn is_synced( - &self, - slot_clock: Option<&T>, - log: &Logger, - ) -> Result<(), CandidateError> { - if let Some(slot_clock) = slot_clock { - check_synced(&self.beacon_node, slot_clock, Some(log)).await - } else { - // Skip this check if we don't supply a slot clock. - Ok(()) - } - } } /// A collection of `CandidateBeaconNode` that can be used to perform requests with "fallback" /// behaviour, where the failure of one candidate results in the next candidate receiving an /// identical query. +#[derive(Clone, Debug)] pub struct BeaconNodeFallback { - candidates: Vec>, + pub candidates: Arc>>>, + distance_tiers: BeaconNodeSyncDistanceTiers, slot_clock: Option, broadcast_topics: Vec, - spec: ChainSpec, + spec: Arc, log: Logger, } impl BeaconNodeFallback { pub fn new( candidates: Vec>, + config: Config, broadcast_topics: Vec, - spec: ChainSpec, + spec: Arc, log: Logger, ) -> Self { + let distance_tiers = config.sync_tolerances; Self { - candidates, + candidates: Arc::new(RwLock::new(candidates)), + distance_tiers, slot_clock: None, broadcast_topics, spec, @@ -377,41 +409,56 @@ impl BeaconNodeFallback { } /// The count of candidates, regardless of their state. - pub fn num_total(&self) -> usize { - self.candidates.len() + pub async fn num_total(&self) -> usize { + self.candidates.read().await.len() } - /// The count of synced and ready candidates. - pub async fn num_synced(&self) -> usize { + /// The count of candidates that are online and compatible, but not necessarily synced. + pub async fn num_available(&self) -> usize { let mut n = 0; - for candidate in &self.candidates { - if candidate.status(RequireSynced::Yes).await.is_ok() { - n += 1 + for candidate in self.candidates.read().await.iter() { + match candidate.health().await { + Ok(_) | Err(CandidateError::Uninitialized) => n += 1, + Err(_) => continue, } } n } - /// The count of synced and ready fallbacks excluding the primary beacon node candidate. - pub async fn num_synced_fallback(&self) -> usize { - let mut n = 0; - for candidate in self.candidates.iter().skip(1) { - if candidate.status(RequireSynced::Yes).await.is_ok() { - n += 1 + // Returns all data required by the VC notifier. + pub async fn get_notifier_info(&self) -> (Vec, usize, usize) { + let candidates = self.candidates.read().await; + + let mut candidate_info = Vec::with_capacity(candidates.len()); + let mut num_available = 0; + let mut num_synced = 0; + + for candidate in candidates.iter() { + let health = candidate.health().await; + + match health { + Ok(health) => { + if self + .distance_tiers + .compute_distance_tier(health.health_tier.sync_distance) + == SyncDistanceTier::Synced + { + num_synced += 1; + } + num_available += 1; + } + Err(CandidateError::Uninitialized) => num_available += 1, + Err(_) => (), } - } - n - } - /// The count of candidates that are online and compatible, but not necessarily synced. - pub async fn num_available(&self) -> usize { - let mut n = 0; - for candidate in &self.candidates { - if candidate.status(RequireSynced::No).await.is_ok() { - n += 1 - } + candidate_info.push(CandidateInfo { + index: candidate.index, + endpoint: candidate.beacon_node.to_string(), + health, + }); } - n + + (candidate_info, num_available, num_synced) } /// Loop through ALL candidates in `self.candidates` and update their sync status. @@ -420,26 +467,54 @@ impl BeaconNodeFallback { /// low quality responses. To route around this it's best to poll all connected beacon nodes. /// A previous implementation of this function polled only the unavailable BNs. pub async fn update_all_candidates(&self) { - let futures = self - .candidates - .iter() - .map(|candidate| { - candidate.refresh_status(self.slot_clock.as_ref(), &self.spec, &self.log) - }) - .collect::>(); + // Clone the vec, so we release the read lock immediately. + // `candidate.health` is behind an Arc, so this would still allow us to mutate the values. + let candidates = self.candidates.read().await.clone(); + let mut futures = Vec::with_capacity(candidates.len()); + let mut nodes = Vec::with_capacity(candidates.len()); + + for candidate in candidates.iter() { + futures.push(candidate.refresh_health( + &self.distance_tiers, + self.slot_clock.as_ref(), + &self.spec, + &self.log, + )); + nodes.push(candidate.beacon_node.to_string()); + } - // run all updates concurrently and ignore errors - let _ = future::join_all(futures).await; + // Run all updates concurrently. + let future_results = future::join_all(futures).await; + let results = future_results.iter().zip(nodes); + + for (result, node) in results { + if let Err(e) = result { + if *e != CandidateError::PreGenesis { + warn!( + self.log, + "A connected beacon node errored during routine health check"; + "error" => ?e, + "endpoint" => node, + ); + } + } + } + + drop(candidates); + + let mut candidates = self.candidates.write().await; + sort_nodes_by_health(&mut candidates).await; } /// Concurrently send a request to all candidates (regardless of /// offline/online) status and attempt to collect a rough reading on the /// latency between the VC and candidate. pub async fn measure_latency(&self) -> Vec { - let futures: Vec<_> = self - .candidates - .iter() - .map(|candidate| async { + let candidates = self.candidates.read().await; + let futures: Vec<_> = candidates + .clone() + .into_iter() + .map(|candidate| async move { let beacon_node_id = candidate.beacon_node.to_string(); // The `node/version` endpoint is used since I imagine it would // require the least processing in the BN and therefore measure @@ -456,6 +531,7 @@ impl BeaconNodeFallback { (beacon_node_id, response_instant) }) .collect(); + drop(candidates); let request_instant = Instant::now(); @@ -475,225 +551,120 @@ impl BeaconNodeFallback { /// Run `func` against each candidate in `self`, returning immediately if a result is found. /// Otherwise, return all the errors encountered along the way. - /// - /// First this function will try all nodes with a suitable status. If no candidates are suitable - /// or all the requests fail, it will try updating the status of all unsuitable nodes and - /// re-running `func` again. - pub async fn first_success<'a, F, O, Err, R>( - &'a self, - require_synced: RequireSynced, - offline_on_failure: OfflineOnFailure, - func: F, - ) -> Result> + pub async fn first_success(&self, func: F) -> Result> where - F: Fn(&'a BeaconNodeHttpClient) -> R, + F: Fn(BeaconNodeHttpClient) -> R, R: Future>, Err: Debug, { let mut errors = vec![]; - let mut to_retry = vec![]; - let mut retry_unsynced = vec![]; - let log = &self.log.clone(); + + // First pass: try `func` on all candidates. Candidate order has already been set in + // `update_all_candidates`. This ensures the most suitable node is always tried first. + let candidates = self.candidates.read().await; + let mut futures = vec![]; // Run `func` using a `candidate`, returning the value or capturing errors. - // - // We use a macro instead of a closure here since it is not trivial to move `func` into a - // closure. - macro_rules! try_func { - ($candidate: ident) => {{ - inc_counter_vec(&ENDPOINT_REQUESTS, &[$candidate.beacon_node.as_ref()]); - - // There exists a race condition where `func` may be called when the candidate is - // actually not ready. We deem this an acceptable inefficiency. - match func(&$candidate.beacon_node).await { - Ok(val) => return Ok(val), - Err(e) => { - debug!( - log, - "Request to beacon node failed"; - "node" => $candidate.beacon_node.to_string(), - "error" => ?e, - ); - // If we have an error on this function, make the client as not-ready. - // - // There exists a race condition where the candidate may have been marked - // as ready between the `func` call and now. We deem this an acceptable - // inefficiency. - if matches!(offline_on_failure, OfflineOnFailure::Yes) { - $candidate.set_offline().await; - } - errors.push(($candidate.beacon_node.to_string(), Error::RequestFailed(e))); - inc_counter_vec(&ENDPOINT_ERRORS, &[$candidate.beacon_node.as_ref()]); - } - } - }}; + for candidate in candidates.iter() { + futures.push(Self::run_on_candidate( + candidate.beacon_node.clone(), + &func, + &self.log, + )); } + drop(candidates); - // First pass: try `func` on all synced and ready candidates. - // - // This ensures that we always choose a synced node if it is available. - for candidate in &self.candidates { - match candidate.status(RequireSynced::Yes).await { - Err(e @ CandidateError::NotSynced) if require_synced == false => { - // This client is unsynced we will try it after trying all synced clients - retry_unsynced.push(candidate); - errors.push((candidate.beacon_node.to_string(), Error::Unavailable(e))); - } - Err(e) => { - // This client was not ready on the first pass, we might try it again later. - to_retry.push(candidate); - errors.push((candidate.beacon_node.to_string(), Error::Unavailable(e))); - } - _ => try_func!(candidate), + for future in futures { + match future.await { + Ok(val) => return Ok(val), + Err(e) => errors.push(e), } } - // Second pass: try `func` on ready unsynced candidates. This only runs if we permit - // unsynced candidates. - // - // Due to async race-conditions, it is possible that we will send a request to a candidate - // that has been set to an offline/unready status. This is acceptable. - if require_synced == false { - for candidate in retry_unsynced { - try_func!(candidate); - } - } + // Second pass. No candidates returned successfully. Try again with the same order. + // This will duplicate errors. + let candidates = self.candidates.read().await; + let mut futures = vec![]; - // Third pass: try again, attempting to make non-ready clients become ready. - for candidate in to_retry { - // If the candidate hasn't luckily transferred into the correct state in the meantime, - // force an update of the state. - let new_status = match candidate.status(require_synced).await { - Ok(()) => Ok(()), - Err(_) => { - candidate - .refresh_status(self.slot_clock.as_ref(), &self.spec, &self.log) - .await - } - }; + // Run `func` using a `candidate`, returning the value or capturing errors. + for candidate in candidates.iter() { + futures.push(Self::run_on_candidate( + candidate.beacon_node.clone(), + &func, + &self.log, + )); + } + drop(candidates); - match new_status { - Ok(()) => try_func!(candidate), - Err(CandidateError::NotSynced) if require_synced == false => try_func!(candidate), - Err(e) => { - errors.push((candidate.beacon_node.to_string(), Error::Unavailable(e))); - } + for future in futures { + match future.await { + Ok(val) => return Ok(val), + Err(e) => errors.push(e), } } - // There were no candidates already ready and we were unable to make any of them ready. + // No candidates returned successfully. Err(Errors(errors)) } + /// Run the future `func` on `candidate` while reporting metrics. + async fn run_on_candidate( + candidate: BeaconNodeHttpClient, + func: F, + log: &Logger, + ) -> Result)> + where + F: Fn(BeaconNodeHttpClient) -> R, + R: Future>, + Err: Debug, + { + inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.as_ref()]); + + // There exists a race condition where `func` may be called when the candidate is + // actually not ready. We deem this an acceptable inefficiency. + match func(candidate.clone()).await { + Ok(val) => Ok(val), + Err(e) => { + debug!( + log, + "Request to beacon node failed"; + "node" => %candidate, + "error" => ?e, + ); + inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.as_ref()]); + Err((candidate.to_string(), Error::RequestFailed(e))) + } + } + } + /// Run `func` against all candidates in `self`, collecting the result of `func` against each /// candidate. /// - /// First this function will try all nodes with a suitable status. If no candidates are suitable - /// it will try updating the status of all unsuitable nodes and re-running `func` again. - /// /// Note: This function returns `Ok(())` if `func` returned successfully on all beacon nodes. /// It returns a list of errors along with the beacon node id that failed for `func`. /// Since this ignores the actual result of `func`, this function should only be used for beacon /// node calls whose results we do not care about, only that they completed successfully. - pub async fn broadcast<'a, F, O, Err, R>( - &'a self, - require_synced: RequireSynced, - offline_on_failure: OfflineOnFailure, - func: F, - ) -> Result<(), Errors> + pub async fn broadcast(&self, func: F) -> Result<(), Errors> where - F: Fn(&'a BeaconNodeHttpClient) -> R, + F: Fn(BeaconNodeHttpClient) -> R, R: Future>, + Err: Debug, { - let mut to_retry = vec![]; - let mut retry_unsynced = vec![]; + // Run `func` on all candidates. + let candidates = self.candidates.read().await; + let mut futures = vec![]; // Run `func` using a `candidate`, returning the value or capturing errors. - let run_on_candidate = |candidate: &'a CandidateBeaconNode| async { - inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.beacon_node.as_ref()]); - - // There exists a race condition where `func` may be called when the candidate is - // actually not ready. We deem this an acceptable inefficiency. - match func(&candidate.beacon_node).await { - Ok(val) => Ok(val), - Err(e) => { - // If we have an error on this function, mark the client as not-ready. - // - // There exists a race condition where the candidate may have been marked - // as ready between the `func` call and now. We deem this an acceptable - // inefficiency. - if matches!(offline_on_failure, OfflineOnFailure::Yes) { - candidate.set_offline().await; - } - inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.beacon_node.as_ref()]); - Err((candidate.beacon_node.to_string(), Error::RequestFailed(e))) - } - } - }; - - // First pass: try `func` on all synced and ready candidates. - // - // This ensures that we always choose a synced node if it is available. - let mut first_batch_futures = vec![]; - for candidate in &self.candidates { - match candidate.status(RequireSynced::Yes).await { - Ok(_) => { - first_batch_futures.push(run_on_candidate(candidate)); - } - Err(CandidateError::NotSynced) if require_synced == false => { - // This client is unsynced we will try it after trying all synced clients - retry_unsynced.push(candidate); - } - Err(_) => { - // This client was not ready on the first pass, we might try it again later. - to_retry.push(candidate); - } - } - } - let first_batch_results = futures::future::join_all(first_batch_futures).await; - - // Second pass: try `func` on ready unsynced candidates. This only runs if we permit - // unsynced candidates. - // - // Due to async race-conditions, it is possible that we will send a request to a candidate - // that has been set to an offline/unready status. This is acceptable. - let second_batch_results = if require_synced == false { - futures::future::join_all(retry_unsynced.into_iter().map(run_on_candidate)).await - } else { - vec![] - }; - - // Third pass: try again, attempting to make non-ready clients become ready. - let mut third_batch_futures = vec![]; - let mut third_batch_results = vec![]; - for candidate in to_retry { - // If the candidate hasn't luckily transferred into the correct state in the meantime, - // force an update of the state. - let new_status = match candidate.status(require_synced).await { - Ok(()) => Ok(()), - Err(_) => { - candidate - .refresh_status(self.slot_clock.as_ref(), &self.spec, &self.log) - .await - } - }; - - match new_status { - Ok(()) => third_batch_futures.push(run_on_candidate(candidate)), - Err(CandidateError::NotSynced) if require_synced == false => { - third_batch_futures.push(run_on_candidate(candidate)) - } - Err(e) => third_batch_results.push(Err(( - candidate.beacon_node.to_string(), - Error::Unavailable(e), - ))), - } + for candidate in candidates.iter() { + futures.push(Self::run_on_candidate( + candidate.beacon_node.clone(), + &func, + &self.log, + )); } - third_batch_results.extend(futures::future::join_all(third_batch_futures).await); + drop(candidates); - let mut results = first_batch_results; - results.extend(second_batch_results); - results.extend(third_batch_results); + let results = future::join_all(futures).await; let errors: Vec<_> = results.into_iter().filter_map(|res| res.err()).collect(); @@ -706,29 +677,47 @@ impl BeaconNodeFallback { /// Call `func` on first beacon node that returns success or on all beacon nodes /// depending on the `topic` and configuration. - pub async fn request<'a, F, Err, R>( - &'a self, - require_synced: RequireSynced, - offline_on_failure: OfflineOnFailure, - topic: ApiTopic, - func: F, - ) -> Result<(), Errors> + pub async fn request(&self, topic: ApiTopic, func: F) -> Result<(), Errors> where - F: Fn(&'a BeaconNodeHttpClient) -> R, + F: Fn(BeaconNodeHttpClient) -> R, R: Future>, Err: Debug, { if self.broadcast_topics.contains(&topic) { - self.broadcast(require_synced, offline_on_failure, func) - .await + self.broadcast(func).await } else { - self.first_success(require_synced, offline_on_failure, func) - .await?; + self.first_success(func).await?; Ok(()) } } } +/// Helper functions to allow sorting candidate nodes by health. +async fn sort_nodes_by_health(nodes: &mut Vec>) { + // Fetch all health values. + let health_results: Vec> = + future::join_all(nodes.iter().map(|node| node.health())).await; + + // Pair health results with their indices. + let mut indices_with_health: Vec<(usize, Result)> = + health_results.into_iter().enumerate().collect(); + + // Sort indices based on their health. + indices_with_health.sort_by(|a, b| match (&a.1, &b.1) { + (Ok(health_a), Ok(health_b)) => health_a.cmp(health_b), + (Err(_), Ok(_)) => Ordering::Greater, + (Ok(_), Err(_)) => Ordering::Less, + (Err(_), Err(_)) => Ordering::Equal, + }); + + // Reorder candidates based on the sorted indices. + let sorted_nodes: Vec> = indices_with_health + .into_iter() + .map(|(index, _)| nodes[index].clone()) + .collect(); + *nodes = sorted_nodes; +} + /// Serves as a cue for `BeaconNodeFallback` to tell which requests need to be broadcasted. #[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, EnumString, EnumVariantNames)] #[strum(serialize_all = "kebab-case")] @@ -747,10 +736,16 @@ impl ApiTopic { } #[cfg(test)] -mod test { +mod tests { use super::*; + use crate::beacon_node_health::BeaconNodeHealthTier; + use crate::SensitiveUrl; + use eth2::Timeouts; use std::str::FromStr; use strum::VariantNames; + use types::{MainnetEthSpec, Slot}; + + type E = MainnetEthSpec; #[test] fn api_topic_all() { @@ -761,4 +756,115 @@ mod test { .map(|topic| ApiTopic::from_str(topic).unwrap()) .eq(all.into_iter())); } + + #[tokio::test] + async fn check_candidate_order() { + // These fields is irrelvant for sorting. They are set to arbitrary values. + let head = Slot::new(99); + let optimistic_status = IsOptimistic::No; + let execution_status = ExecutionEngineHealth::Healthy; + + fn new_candidate(index: usize) -> CandidateBeaconNode { + let beacon_node = BeaconNodeHttpClient::new( + SensitiveUrl::parse(&format!("http://example_{index}.com")).unwrap(), + Timeouts::set_all(Duration::from_secs(index as u64)), + ); + CandidateBeaconNode::new(beacon_node, index) + } + + let candidate_1 = new_candidate(1); + let expected_candidate_1 = new_candidate(1); + let candidate_2 = new_candidate(2); + let expected_candidate_2 = new_candidate(2); + let candidate_3 = new_candidate(3); + let expected_candidate_3 = new_candidate(3); + let candidate_4 = new_candidate(4); + let expected_candidate_4 = new_candidate(4); + let candidate_5 = new_candidate(5); + let expected_candidate_5 = new_candidate(5); + let candidate_6 = new_candidate(6); + let expected_candidate_6 = new_candidate(6); + + let synced = SyncDistanceTier::Synced; + let small = SyncDistanceTier::Small; + + // Despite `health_1` having a larger sync distance, it is inside the `synced` range which + // does not tie-break on sync distance and so will tie-break on `user_index` instead. + let health_1 = BeaconNodeHealth { + user_index: 1, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(1, Slot::new(2), synced), + }; + let health_2 = BeaconNodeHealth { + user_index: 2, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(2, Slot::new(1), synced), + }; + + // `health_3` and `health_4` have the same health tier and sync distance so should + // tie-break on `user_index`. + let health_3 = BeaconNodeHealth { + user_index: 3, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(3, Slot::new(9), small), + }; + let health_4 = BeaconNodeHealth { + user_index: 4, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(3, Slot::new(9), small), + }; + + // `health_5` has a smaller sync distance and is outside the `synced` range so should be + // sorted first. Note the values of `user_index`. + let health_5 = BeaconNodeHealth { + user_index: 6, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(4, Slot::new(9), small), + }; + let health_6 = BeaconNodeHealth { + user_index: 5, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(4, Slot::new(10), small), + }; + + *candidate_1.health.write().await = Ok(health_1); + *candidate_2.health.write().await = Ok(health_2); + *candidate_3.health.write().await = Ok(health_3); + *candidate_4.health.write().await = Ok(health_4); + *candidate_5.health.write().await = Ok(health_5); + *candidate_6.health.write().await = Ok(health_6); + + let mut candidates = vec![ + candidate_3, + candidate_6, + candidate_5, + candidate_1, + candidate_4, + candidate_2, + ]; + let expected_candidates = vec![ + expected_candidate_1, + expected_candidate_2, + expected_candidate_3, + expected_candidate_4, + expected_candidate_5, + expected_candidate_6, + ]; + + sort_nodes_by_health(&mut candidates).await; + + assert_eq!(candidates, expected_candidates); + } } diff --git a/validator_client/src/beacon_node_health.rs b/validator_client/src/beacon_node_health.rs new file mode 100644 index 00000000000..1783bb312cf --- /dev/null +++ b/validator_client/src/beacon_node_health.rs @@ -0,0 +1,420 @@ +use itertools::Itertools; +use serde::{Deserialize, Serialize}; +use std::cmp::Ordering; +use std::fmt::{Debug, Display, Formatter}; +use std::str::FromStr; +use types::Slot; + +/// Sync distances between 0 and DEFAULT_SYNC_TOLERANCE are considered `synced`. +/// Sync distance tiers are determined by the different modifiers. +/// +/// The default range is the following: +/// Synced: 0..=8 +/// Small: 9..=16 +/// Medium: 17..=64 +/// Large: 65.. +const DEFAULT_SYNC_TOLERANCE: Slot = Slot::new(8); +const DEFAULT_SMALL_SYNC_DISTANCE_MODIFIER: Slot = Slot::new(8); +const DEFAULT_MEDIUM_SYNC_DISTANCE_MODIFIER: Slot = Slot::new(48); + +type HealthTier = u8; +type SyncDistance = Slot; + +/// Helpful enum which is used when pattern matching to determine health tier. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub enum SyncDistanceTier { + Synced, + Small, + Medium, + Large, +} + +/// Contains the different sync distance tiers which are determined at runtime by the +/// `beacon-nodes-sync-tolerances` flag. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct BeaconNodeSyncDistanceTiers { + pub synced: SyncDistance, + pub small: SyncDistance, + pub medium: SyncDistance, +} + +impl Default for BeaconNodeSyncDistanceTiers { + fn default() -> Self { + Self { + synced: DEFAULT_SYNC_TOLERANCE, + small: DEFAULT_SYNC_TOLERANCE + DEFAULT_SMALL_SYNC_DISTANCE_MODIFIER, + medium: DEFAULT_SYNC_TOLERANCE + + DEFAULT_SMALL_SYNC_DISTANCE_MODIFIER + + DEFAULT_MEDIUM_SYNC_DISTANCE_MODIFIER, + } + } +} + +impl FromStr for BeaconNodeSyncDistanceTiers { + type Err = String; + + fn from_str(s: &str) -> Result { + let values: (u64, u64, u64) = s + .split(',') + .map(|s| { + s.parse() + .map_err(|e| format!("Invalid sync distance modifier: {e:?}")) + }) + .collect::, _>>()? + .into_iter() + .collect_tuple() + .ok_or("Invalid number of sync distance modifiers".to_string())?; + + Ok(BeaconNodeSyncDistanceTiers { + synced: Slot::new(values.0), + small: Slot::new(values.0 + values.1), + medium: Slot::new(values.0 + values.1 + values.2), + }) + } +} + +impl BeaconNodeSyncDistanceTiers { + /// Takes a given sync distance and determines its tier based on the `sync_tolerance` defined by + /// the CLI. + pub fn compute_distance_tier(&self, distance: SyncDistance) -> SyncDistanceTier { + if distance <= self.synced { + SyncDistanceTier::Synced + } else if distance <= self.small { + SyncDistanceTier::Small + } else if distance <= self.medium { + SyncDistanceTier::Medium + } else { + SyncDistanceTier::Large + } + } +} + +/// Execution Node health metrics. +/// +/// Currently only considers `el_offline`. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub enum ExecutionEngineHealth { + Healthy, + Unhealthy, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub enum IsOptimistic { + Yes, + No, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct BeaconNodeHealthTier { + pub tier: HealthTier, + pub sync_distance: SyncDistance, + pub distance_tier: SyncDistanceTier, +} + +impl Display for BeaconNodeHealthTier { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "Tier{}({})", self.tier, self.sync_distance) + } +} + +impl Ord for BeaconNodeHealthTier { + fn cmp(&self, other: &Self) -> Ordering { + let ordering = self.tier.cmp(&other.tier); + if ordering == Ordering::Equal { + if self.distance_tier == SyncDistanceTier::Synced { + // Don't tie-break on sync distance in these cases. + // This ensures validator clients don't artificially prefer one node. + ordering + } else { + self.sync_distance.cmp(&other.sync_distance) + } + } else { + ordering + } + } +} + +impl PartialOrd for BeaconNodeHealthTier { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl BeaconNodeHealthTier { + pub fn new( + tier: HealthTier, + sync_distance: SyncDistance, + distance_tier: SyncDistanceTier, + ) -> Self { + Self { + tier, + sync_distance, + distance_tier, + } + } +} + +/// Beacon Node Health metrics. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct BeaconNodeHealth { + // The index of the Beacon Node. This should correspond with its position in the + // `--beacon-nodes` list. Note that the `user_index` field is used to tie-break nodes with the + // same health so that nodes with a lower index are preferred. + pub user_index: usize, + // The slot number of the head. + pub head: Slot, + // Whether the node is optimistically synced. + pub optimistic_status: IsOptimistic, + // The status of the nodes connected Execution Engine. + pub execution_status: ExecutionEngineHealth, + // The overall health tier of the Beacon Node. Used to rank the nodes for the purposes of + // fallbacks. + pub health_tier: BeaconNodeHealthTier, +} + +impl Ord for BeaconNodeHealth { + fn cmp(&self, other: &Self) -> Ordering { + let ordering = self.health_tier.cmp(&other.health_tier); + if ordering == Ordering::Equal { + // Tie-break node health by `user_index`. + self.user_index.cmp(&other.user_index) + } else { + ordering + } + } +} + +impl PartialOrd for BeaconNodeHealth { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl BeaconNodeHealth { + pub fn from_status( + user_index: usize, + sync_distance: Slot, + head: Slot, + optimistic_status: IsOptimistic, + execution_status: ExecutionEngineHealth, + distance_tiers: &BeaconNodeSyncDistanceTiers, + ) -> Self { + let health_tier = BeaconNodeHealth::compute_health_tier( + sync_distance, + optimistic_status, + execution_status, + distance_tiers, + ); + + Self { + user_index, + head, + optimistic_status, + execution_status, + health_tier, + } + } + + pub fn get_index(&self) -> usize { + self.user_index + } + + pub fn get_health_tier(&self) -> BeaconNodeHealthTier { + self.health_tier + } + + fn compute_health_tier( + sync_distance: SyncDistance, + optimistic_status: IsOptimistic, + execution_status: ExecutionEngineHealth, + sync_distance_tiers: &BeaconNodeSyncDistanceTiers, + ) -> BeaconNodeHealthTier { + let sync_distance_tier = sync_distance_tiers.compute_distance_tier(sync_distance); + let health = (sync_distance_tier, optimistic_status, execution_status); + + match health { + (SyncDistanceTier::Synced, IsOptimistic::No, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(1, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Small, IsOptimistic::No, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(2, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Synced, IsOptimistic::No, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(3, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Medium, IsOptimistic::No, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(4, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Synced, IsOptimistic::Yes, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(5, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Synced, IsOptimistic::Yes, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(6, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Small, IsOptimistic::No, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(7, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Small, IsOptimistic::Yes, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(8, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Small, IsOptimistic::Yes, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(9, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Large, IsOptimistic::No, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(10, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Medium, IsOptimistic::No, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(11, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Medium, IsOptimistic::Yes, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(12, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Medium, IsOptimistic::Yes, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(13, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Large, IsOptimistic::No, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(14, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Large, IsOptimistic::Yes, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(15, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Large, IsOptimistic::Yes, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(16, sync_distance, sync_distance_tier) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::ExecutionEngineHealth::{Healthy, Unhealthy}; + use super::{ + BeaconNodeHealth, BeaconNodeHealthTier, BeaconNodeSyncDistanceTiers, IsOptimistic, + SyncDistanceTier, + }; + use crate::beacon_node_fallback::Config; + use std::str::FromStr; + use types::Slot; + + #[test] + fn all_possible_health_tiers() { + let config = Config::default(); + let beacon_node_sync_distance_tiers = config.sync_tolerances; + + let mut health_vec = vec![]; + + for head_slot in 0..=64 { + for optimistic_status in &[IsOptimistic::No, IsOptimistic::Yes] { + for ee_health in &[Healthy, Unhealthy] { + let health = BeaconNodeHealth::from_status( + 0, + Slot::new(0), + Slot::new(head_slot), + *optimistic_status, + *ee_health, + &beacon_node_sync_distance_tiers, + ); + health_vec.push(health); + } + } + } + + for health in health_vec { + let health_tier = health.get_health_tier(); + let tier = health_tier.tier; + let distance = health_tier.sync_distance; + + let distance_tier = beacon_node_sync_distance_tiers.compute_distance_tier(distance); + + // Check sync distance. + if [1, 3, 5, 6].contains(&tier) { + assert!(distance_tier == SyncDistanceTier::Synced) + } else if [2, 7, 8, 9].contains(&tier) { + assert!(distance_tier == SyncDistanceTier::Small); + } else if [4, 11, 12, 13].contains(&tier) { + assert!(distance_tier == SyncDistanceTier::Medium); + } else { + assert!(distance_tier == SyncDistanceTier::Large); + } + + // Check optimistic status. + if [1, 2, 3, 4, 7, 10, 11, 14].contains(&tier) { + assert_eq!(health.optimistic_status, IsOptimistic::No); + } else { + assert_eq!(health.optimistic_status, IsOptimistic::Yes); + } + + // Check execution health. + if [3, 6, 7, 9, 11, 13, 14, 16].contains(&tier) { + assert_eq!(health.execution_status, Unhealthy); + } else { + assert_eq!(health.execution_status, Healthy); + } + } + } + + fn new_distance_tier( + distance: u64, + distance_tiers: &BeaconNodeSyncDistanceTiers, + ) -> BeaconNodeHealthTier { + BeaconNodeHealth::compute_health_tier( + Slot::new(distance), + IsOptimistic::No, + Healthy, + distance_tiers, + ) + } + + #[test] + fn sync_tolerance_default() { + let distance_tiers = BeaconNodeSyncDistanceTiers::default(); + + let synced_low = new_distance_tier(0, &distance_tiers); + let synced_high = new_distance_tier(8, &distance_tiers); + + let small_low = new_distance_tier(9, &distance_tiers); + let small_high = new_distance_tier(16, &distance_tiers); + + let medium_low = new_distance_tier(17, &distance_tiers); + let medium_high = new_distance_tier(64, &distance_tiers); + let large = new_distance_tier(65, &distance_tiers); + + assert_eq!(synced_low.tier, 1); + assert_eq!(synced_high.tier, 1); + assert_eq!(small_low.tier, 2); + assert_eq!(small_high.tier, 2); + assert_eq!(medium_low.tier, 4); + assert_eq!(medium_high.tier, 4); + assert_eq!(large.tier, 10); + } + + #[test] + fn sync_tolerance_from_str() { + // String should set the tiers as: + // synced: 0..=4 + // small: 5..=8 + // medium 9..=12 + // large: 13.. + + let distance_tiers = BeaconNodeSyncDistanceTiers::from_str("4,4,4").unwrap(); + + let synced_low = new_distance_tier(0, &distance_tiers); + let synced_high = new_distance_tier(4, &distance_tiers); + + let small_low = new_distance_tier(5, &distance_tiers); + let small_high = new_distance_tier(8, &distance_tiers); + + let medium_low = new_distance_tier(9, &distance_tiers); + let medium_high = new_distance_tier(12, &distance_tiers); + + let large = new_distance_tier(13, &distance_tiers); + + assert_eq!(synced_low.tier, 1); + assert_eq!(synced_high.tier, 1); + assert_eq!(small_low.tier, 2); + assert_eq!(small_high.tier, 2); + assert_eq!(medium_low.tier, 4); + assert_eq!(medium_high.tier, 4); + assert_eq!(large.tier, 10); + } +} diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index af11d82eb53..9903324cade 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,9 +1,8 @@ use crate::beacon_node_fallback::{Error as FallbackError, Errors}; use crate::{ - beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}, + beacon_node_fallback::{ApiTopic, BeaconNodeFallback}, determine_graffiti, graffiti_file::GraffitiFile, - OfflineOnFailure, }; use crate::{ http_metrics::metrics, @@ -141,26 +140,16 @@ pub struct ProposerFallback { impl ProposerFallback { // Try `func` on `self.proposer_nodes` first. If that doesn't work, try `self.beacon_nodes`. - pub async fn request_proposers_first<'a, F, Err, R>( - &'a self, - require_synced: RequireSynced, - offline_on_failure: OfflineOnFailure, - func: F, - ) -> Result<(), Errors> + pub async fn request_proposers_first(&self, func: F) -> Result<(), Errors> where - F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, + F: Fn(BeaconNodeHttpClient) -> R + Clone, R: Future>, Err: Debug, { // If there are proposer nodes, try calling `func` on them and return early if they are successful. if let Some(proposer_nodes) = &self.proposer_nodes { if proposer_nodes - .request( - require_synced, - offline_on_failure, - ApiTopic::Blocks, - func.clone(), - ) + .request(ApiTopic::Blocks, func.clone()) .await .is_ok() { @@ -169,28 +158,18 @@ impl ProposerFallback { } // If the proposer nodes failed, try on the non-proposer nodes. - self.beacon_nodes - .request(require_synced, offline_on_failure, ApiTopic::Blocks, func) - .await + self.beacon_nodes.request(ApiTopic::Blocks, func).await } // Try `func` on `self.beacon_nodes` first. If that doesn't work, try `self.proposer_nodes`. - pub async fn request_proposers_last<'a, F, O, Err, R>( - &'a self, - require_synced: RequireSynced, - offline_on_failure: OfflineOnFailure, - func: F, - ) -> Result> + pub async fn request_proposers_last(&self, func: F) -> Result> where - F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, + F: Fn(BeaconNodeHttpClient) -> R + Clone, R: Future>, Err: Debug, { // Try running `func` on the non-proposer beacon nodes. - let beacon_nodes_result = self - .beacon_nodes - .first_success(require_synced, offline_on_failure, func.clone()) - .await; + let beacon_nodes_result = self.beacon_nodes.first_success(func.clone()).await; match (beacon_nodes_result, &self.proposer_nodes) { // The non-proposer node call succeed, return the result. @@ -198,11 +177,7 @@ impl ProposerFallback { // The non-proposer node call failed, but we don't have any proposer nodes. Return an error. (Err(e), None) => Err(e), // The non-proposer node call failed, try the same call on the proposer nodes. - (Err(_), Some(proposer_nodes)) => { - proposer_nodes - .first_success(require_synced, offline_on_failure, func) - .await - } + (Err(_), Some(proposer_nodes)) => proposer_nodes.first_success(func).await, } } } @@ -211,8 +186,8 @@ impl ProposerFallback { pub struct Inner { validator_store: Arc>, slot_clock: Arc, - beacon_nodes: Arc>, - proposer_nodes: Option>>, + pub(crate) beacon_nodes: Arc>, + pub(crate) proposer_nodes: Option>>, context: RuntimeContext, graffiti: Option, graffiti_file: Option, @@ -418,14 +393,10 @@ impl BlockService { // protect them from DoS attacks and they're most likely to successfully // publish a block. proposer_fallback - .request_proposers_first( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async { - self.publish_signed_block_contents(&signed_block, beacon_node) - .await - }, - ) + .request_proposers_first(|beacon_node| async { + self.publish_signed_block_contents(&signed_block, beacon_node) + .await + }) .await?; info!( @@ -503,32 +474,28 @@ impl BlockService { // Try the proposer nodes last, since it's likely that they don't have a // great view of attestations on the network. let unsigned_block = proposer_fallback - .request_proposers_last( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); - Self::get_validator_block( - beacon_node, - slot, - randao_reveal_ref, - graffiti, - proposer_index, - builder_boost_factor, - log, - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - }) - }, - ) + .request_proposers_last(|beacon_node| async move { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + Self::get_validator_block( + &beacon_node, + slot, + randao_reveal_ref, + graffiti, + proposer_index, + builder_boost_factor, + log, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + }) + }) .await?; self_ref @@ -547,7 +514,7 @@ impl BlockService { async fn publish_signed_block_contents( &self, signed_block: &SignedBlock, - beacon_node: &BeaconNodeHttpClient, + beacon_node: BeaconNodeHttpClient, ) -> Result<(), BlockError> { let log = self.context.log(); let slot = signed_block.slot(); @@ -558,7 +525,7 @@ impl BlockService { &[metrics::BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blocks(signed_block) + .post_beacon_blocks_v2_ssz(signed_block, None) .await .or_else(|e| handle_block_post_error(e, slot, log))? } @@ -568,7 +535,7 @@ impl BlockService { &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blinded_blocks(signed_block) + .post_beacon_blinded_blocks_v2_ssz(signed_block, None) .await .or_else(|e| handle_block_post_error(e, slot, log))? } diff --git a/validator_client/src/check_synced.rs b/validator_client/src/check_synced.rs index 6437682512d..2e9a62ff65a 100644 --- a/validator_client/src/check_synced.rs +++ b/validator_client/src/check_synced.rs @@ -1,80 +1,27 @@ use crate::beacon_node_fallback::CandidateError; -use eth2::BeaconNodeHttpClient; -use slog::{debug, error, warn, Logger}; -use slot_clock::SlotClock; +use eth2::{types::Slot, BeaconNodeHttpClient}; +use slog::{warn, Logger}; -/// A distance in slots. -const SYNC_TOLERANCE: u64 = 4; - -/// Returns -/// -/// `Ok(())` if the beacon node is synced and ready for action, -/// `Err(CandidateError::Offline)` if the beacon node is unreachable, -/// `Err(CandidateError::NotSynced)` if the beacon node indicates that it is syncing **AND** -/// it is more than `SYNC_TOLERANCE` behind the highest -/// known slot. -/// -/// The second condition means the even if the beacon node thinks that it's syncing, we'll still -/// try to use it if it's close enough to the head. -pub async fn check_synced( +pub async fn check_node_health( beacon_node: &BeaconNodeHttpClient, - slot_clock: &T, - log_opt: Option<&Logger>, -) -> Result<(), CandidateError> { + log: &Logger, +) -> Result<(Slot, bool, bool), CandidateError> { let resp = match beacon_node.get_node_syncing().await { Ok(resp) => resp, Err(e) => { - if let Some(log) = log_opt { - warn!( - log, - "Unable connect to beacon node"; - "error" => %e - ) - } - - return Err(CandidateError::Offline); - } - }; - - let bn_is_synced = !resp.data.is_syncing || (resp.data.sync_distance.as_u64() < SYNC_TOLERANCE); - let is_synced = bn_is_synced && !resp.data.el_offline; - - if let Some(log) = log_opt { - if !is_synced { - debug!( - log, - "Beacon node sync status"; - "status" => format!("{:?}", resp), - ); - warn!( log, - "Beacon node is not synced"; - "sync_distance" => resp.data.sync_distance.as_u64(), - "head_slot" => resp.data.head_slot.as_u64(), - "endpoint" => %beacon_node, - "el_offline" => resp.data.el_offline, + "Unable connect to beacon node"; + "error" => %e ); - } - if let Some(local_slot) = slot_clock.now() { - let remote_slot = resp.data.head_slot + resp.data.sync_distance; - if remote_slot + 1 < local_slot || local_slot + 1 < remote_slot { - error!( - log, - "Time discrepancy with beacon node"; - "msg" => "check the system time on this host and the beacon node", - "beacon_node_slot" => remote_slot, - "local_slot" => local_slot, - "endpoint" => %beacon_node, - ); - } + return Err(CandidateError::Offline); } - } + }; - if is_synced { - Ok(()) - } else { - Err(CandidateError::NotSynced) - } + Ok(( + resp.data.head_slot, + resp.data.is_optimistic, + resp.data.el_offline, + )) } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index f84260a9243..209876f07b0 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -39,20 +39,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - // TODO remove this flag in a future release - .arg( - Arg::new("disable-run-on-all") - .long("disable-run-on-all") - .value_name("DISABLE_RUN_ON_ALL") - .help("DEPRECATED. Use --broadcast. \ - By default, Lighthouse publishes attestation, sync committee subscriptions \ - and proposer preparation messages to all beacon nodes provided in the \ - `--beacon-nodes flag`. This option changes that behaviour such that these \ - api calls only go out to the first available and synced beacon node") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("broadcast") .long("broadcast") @@ -167,14 +153,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("produce-block-v3") - .long("produce-block-v3") - .help("This flag is deprecated and is no longer in use.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("distributed") .long("distributed") @@ -403,15 +381,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("latency-measurement-service") - .long("latency-measurement-service") - .help("DEPRECATED") - .action(ArgAction::Set) - .help_heading(FLAG_HEADER) - .display_order(0) - .hide(true) - ) .arg( Arg::new("validator-registration-batch-size") .long("validator-registration-batch-size") @@ -444,6 +413,33 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) + .arg( + Arg::new("beacon-nodes-sync-tolerances") + .long("beacon-nodes-sync-tolerances") + .value_name("SYNC_TOLERANCES") + .help("A comma-separated list of 3 values which sets the size of each sync distance range when \ + determining the health of each connected beacon node. \ + The first value determines the `Synced` range. \ + If a connected beacon node is synced to within this number of slots it is considered 'Synced'. \ + The second value determines the `Small` sync distance range. \ + This range starts immediately after the `Synced` range. \ + The third value determines the `Medium` sync distance range. \ + This range starts immediately after the `Small` range. \ + Any sync distance value beyond that is considered `Large`. \ + For example, a value of `8,8,48` would have ranges like the following: \ + `Synced`: 0..=8 \ + `Small`: 9..=16 \ + `Medium`: 17..=64 \ + `Large`: 65.. \ + These values are used to determine what ordering beacon node fallbacks are used in. \ + Generally, `Synced` nodes are preferred over `Small` and so on. \ + Nodes in the `Synced` range will tie-break based on their ordering in `--beacon-nodes`. \ + This ensures the primary beacon node is prioritised. \ + [default: 8,8,48]") + .action(ArgAction::Set) + .help_heading(FLAG_HEADER) + .display_order(0) + ) .arg( Arg::new("disable-slashing-protection-web3signer") .long("disable-slashing-protection-web3signer") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 204c5b8b6cc..f42ed551463 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,6 +1,8 @@ use crate::beacon_node_fallback::ApiTopic; use crate::graffiti_file::GraffitiFile; -use crate::{http_api, http_metrics}; +use crate::{ + beacon_node_fallback, beacon_node_health::BeaconNodeSyncDistanceTiers, http_api, http_metrics, +}; use clap::ArgMatches; use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, parse_optional, parse_required}; use directory::{ @@ -14,6 +16,7 @@ use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; use std::path::PathBuf; +use std::str::FromStr; use std::time::Duration; use types::{Address, GRAFFITI_BYTES_LEN}; @@ -21,7 +24,7 @@ pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; pub const DEFAULT_WEB3SIGNER_KEEP_ALIVE: Option = Some(Duration::from_secs(20)); /// Stores the core configuration for this validator instance. -#[derive(Clone, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct Config { /// The data directory, which stores all validator databases pub validator_dir: PathBuf, @@ -52,6 +55,8 @@ pub struct Config { pub http_api: http_api::Config, /// Configuration for the HTTP REST API. pub http_metrics: http_metrics::Config, + /// Configuration for the Beacon Node fallback. + pub beacon_node_fallback: beacon_node_fallback::Config, /// Configuration for sending metrics to a remote explorer endpoint. pub monitoring_api: Option, /// If true, enable functionality that monitors the network for attestations or proposals from @@ -117,6 +122,7 @@ impl Default for Config { fee_recipient: None, http_api: <_>::default(), http_metrics: <_>::default(), + beacon_node_fallback: <_>::default(), monitoring_api: None, enable_doppelganger_protection: false, enable_high_validator_count_metrics: false, @@ -238,14 +244,6 @@ impl Config { config.distributed = true; } - if cli_args.get_flag("disable-run-on-all") { - warn!( - log, - "The --disable-run-on-all flag is deprecated"; - "msg" => "please use --broadcast instead" - ); - config.broadcast_topics = vec![]; - } if let Some(broadcast_topics) = cli_args.get_one::("broadcast") { config.broadcast_topics = broadcast_topics .split(',') @@ -258,6 +256,16 @@ impl Config { .collect::>()?; } + /* + * Beacon node fallback + */ + if let Some(sync_tolerance) = cli_args.get_one::("beacon-nodes-sync-tolerances") { + config.beacon_node_fallback.sync_tolerances = + BeaconNodeSyncDistanceTiers::from_str(sync_tolerance)?; + } else { + config.beacon_node_fallback.sync_tolerances = BeaconNodeSyncDistanceTiers::default(); + } + /* * Web3 signer */ @@ -381,14 +389,6 @@ impl Config { config.prefer_builder_proposals = true; } - if cli_args.get_flag("produce-block-v3") { - warn!( - log, - "produce-block-v3 flag"; - "note" => "deprecated flag has no effect and should be removed" - ); - } - config.gas_limit = cli_args .get_one::("gas-limit") .map(|gas_limit| { @@ -413,17 +413,6 @@ impl Config { config.enable_latency_measurement_service = !cli_args.get_flag("disable-latency-measurement-service"); - if cli_args - .get_one::("latency-measurement-service") - .is_some() - { - warn!( - log, - "latency-measurement-service flag"; - "note" => "deprecated flag has no effect and should be removed" - ); - } - config.validator_registration_batch_size = parse_required(cli_args, "validator-registration-batch-size")?; if config.validator_registration_batch_size == 0 { diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index 2c8eca85601..1d552cc5ad9 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -29,9 +29,8 @@ //! //! Doppelganger protection is a best-effort, last-line-of-defence mitigation. Do not rely upon it. -use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::BeaconNodeFallback; use crate::validator_store::ValidatorStore; -use crate::OfflineOnFailure; use environment::RuntimeContext; use eth2::types::LivenessResponseData; use parking_lot::RwLock; @@ -175,12 +174,11 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( } else { // Request the previous epoch liveness state from the beacon node. beacon_nodes - .first_success( - RequireSynced::Yes, - OfflineOnFailure::Yes, - |beacon_node| async { + .first_success(|beacon_node| { + let validator_indices_ref = &validator_indices; + async move { beacon_node - .post_validator_liveness_epoch(previous_epoch, &validator_indices) + .post_validator_liveness_epoch(previous_epoch, validator_indices_ref) .await .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) .map(|result| { @@ -194,8 +192,8 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( }) .collect() }) - }, - ) + } + }) .await .unwrap_or_else(|e| { crit!( @@ -212,12 +210,11 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( // Request the current epoch liveness state from the beacon node. let current_epoch_responses = beacon_nodes - .first_success( - RequireSynced::Yes, - OfflineOnFailure::Yes, - |beacon_node| async { + .first_success(|beacon_node| { + let validator_indices_ref = &validator_indices; + async move { beacon_node - .post_validator_liveness_epoch(current_epoch, &validator_indices) + .post_validator_liveness_epoch(current_epoch, validator_indices_ref) .await .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) .map(|result| { @@ -231,8 +228,8 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( }) .collect() }) - }, - ) + } + }) .await .unwrap_or_else(|e| { crit!( diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index faa157a8592..cf8d4997920 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -8,7 +8,7 @@ pub mod sync; -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, OfflineOnFailure, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use crate::http_metrics::metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; use crate::{ block_service::BlockServiceNotification, @@ -229,7 +229,7 @@ pub struct DutiesService { /// The runtime for spawning tasks. pub context: RuntimeContext, /// The current chain spec. - pub spec: ChainSpec, + pub spec: Arc, //// Whether we permit large validator counts in the metrics. pub enable_high_validator_count_metrics: bool, /// If this validator is running in distributed mode. @@ -517,22 +517,18 @@ async fn poll_validator_indices( // Query the remote BN to resolve a pubkey to a validator index. let download_result = duties_service .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::VALIDATOR_ID_HTTP_GET], - ); - beacon_node - .get_beacon_states_validator_id( - StateId::Head, - &ValidatorId::PublicKey(pubkey), - ) - .await - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::VALIDATOR_ID_HTTP_GET], + ); + beacon_node + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(pubkey), + ) + .await + }) .await; let fee_recipient = duties_service @@ -744,20 +740,15 @@ async fn poll_beacon_attesters( let subscriptions_ref = &subscriptions; let subscription_result = duties_service .beacon_nodes - .request( - RequireSynced::No, - OfflineOnFailure::Yes, - ApiTopic::Subscriptions, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::SUBSCRIPTIONS_HTTP_POST], - ); - beacon_node - .post_validator_beacon_committee_subscriptions(subscriptions_ref) - .await - }, - ) + .request(ApiTopic::Subscriptions, |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::SUBSCRIPTIONS_HTTP_POST], + ); + beacon_node + .post_validator_beacon_committee_subscriptions(subscriptions_ref) + .await + }) .await; if subscription_result.as_ref().is_ok() { debug!( @@ -769,7 +760,7 @@ async fn poll_beacon_attesters( subscription_slots.record_successful_subscription_at(current_slot); } } else if let Err(e) = subscription_result { - if e.num_errors() < duties_service.beacon_nodes.num_total() { + if e.num_errors() < duties_service.beacon_nodes.num_total().await { warn!( log, "Some subscriptions failed"; @@ -1037,19 +1028,15 @@ async fn post_validator_duties_attester( ) -> Result>, Error> { duties_service .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::ATTESTER_DUTIES_HTTP_POST], - ); - beacon_node - .post_validator_duties_attester(epoch, validator_indices) - .await - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::ATTESTER_DUTIES_HTTP_POST], + ); + beacon_node + .post_validator_duties_attester(epoch, validator_indices) + .await + }) .await .map_err(|e| Error::FailedToDownloadAttesters(e.to_string())) } @@ -1273,19 +1260,15 @@ async fn poll_beacon_proposers( if !local_pubkeys.is_empty() { let download_result = duties_service .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::PROPOSER_DUTIES_HTTP_GET], - ); - beacon_node - .get_validator_duties_proposer(current_epoch) - .await - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::PROPOSER_DUTIES_HTTP_GET], + ); + beacon_node + .get_validator_duties_proposer(current_epoch) + .await + }) .await; match download_result { diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index 3618b47146f..0bd99dc638b 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -1,4 +1,3 @@ -use crate::beacon_node_fallback::{OfflineOnFailure, RequireSynced}; use crate::{ doppelganger_service::DoppelgangerStatus, duties_service::{DutiesService, Error}, @@ -442,19 +441,15 @@ pub async fn poll_sync_committee_duties_for_period for Error { pub struct Context { pub task_executor: TaskExecutor, pub api_secret: ApiSecret, + pub block_service: Option>, pub validator_store: Option>>, pub validator_dir: Option, pub secrets_dir: Option, pub graffiti_file: Option, pub graffiti_flag: Option, - pub spec: ChainSpec, + pub spec: Arc, pub config: Config, pub log: Logger, pub sse_logging_components: Option, @@ -169,6 +171,17 @@ pub fn serve( } }; + let inner_block_service = ctx.block_service.clone(); + let block_service_filter = warp::any() + .map(move || inner_block_service.clone()) + .and_then(|block_service: Option<_>| async move { + block_service.ok_or_else(|| { + warp_utils::reject::custom_not_found( + "block service is not initialized.".to_string(), + ) + }) + }); + let inner_validator_store = ctx.validator_store.clone(); let validator_store_filter = warp::any() .map(move || inner_validator_store.clone()) @@ -217,7 +230,7 @@ pub fn serve( let inner_slot_clock = ctx.slot_clock.clone(); let slot_clock_filter = warp::any().map(move || inner_slot_clock.clone()); - let inner_spec = Arc::new(ctx.spec.clone()); + let inner_spec = ctx.spec.clone(); let spec_filter = warp::any().map(move || inner_spec.clone()); let api_token_path_inner = api_token_path.clone(); @@ -398,6 +411,40 @@ pub fn serve( }, ); + // GET lighthouse/ui/fallback_health + let get_lighthouse_ui_fallback_health = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("fallback_health")) + .and(warp::path::end()) + .and(block_service_filter.clone()) + .then(|block_filter: BlockService| async move { + let mut result: HashMap> = HashMap::new(); + + let mut beacon_nodes = Vec::new(); + for node in &*block_filter.beacon_nodes.candidates.read().await { + beacon_nodes.push(CandidateInfo { + index: node.index, + endpoint: node.beacon_node.to_string(), + health: *node.health.read().await, + }); + } + result.insert("beacon_nodes".to_string(), beacon_nodes); + + if let Some(proposer_nodes_list) = &block_filter.proposer_nodes { + let mut proposer_nodes = Vec::new(); + for node in &*proposer_nodes_list.candidates.read().await { + proposer_nodes.push(CandidateInfo { + index: node.index, + endpoint: node.beacon_node.to_string(), + health: *node.health.read().await, + }); + } + result.insert("proposer_nodes".to_string(), proposer_nodes); + } + + blocking_json_task(move || Ok(api_types::GenericResponse::from(result))).await + }); + // POST lighthouse/validators/ let post_validators = warp::path("lighthouse") .and(warp::path("validators")) @@ -1253,6 +1300,7 @@ pub fn serve( .or(get_lighthouse_validators_pubkey) .or(get_lighthouse_ui_health) .or(get_lighthouse_ui_graffiti) + .or(get_lighthouse_ui_fallback_health) .or(get_fee_recipient) .or(get_gas_limit) .or(get_graffiti) diff --git a/validator_client/src/http_api/test_utils.rs b/validator_client/src/http_api/test_utils.rs index 8bb56e87a32..119c611553e 100644 --- a/validator_client/src/http_api/test_utils.rs +++ b/validator_client/src/http_api/test_utils.rs @@ -96,7 +96,7 @@ impl ApiTester { ..Default::default() }; - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); @@ -110,7 +110,7 @@ impl ApiTester { initialized_validators, slashing_protection, Hash256::repeat_byte(42), - spec, + spec.clone(), Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock.clone(), &config, @@ -127,12 +127,13 @@ impl ApiTester { let context = Arc::new(Context { task_executor: test_runtime.task_executor.clone(), api_secret, + block_service: None, validator_dir: Some(validator_dir.path().into()), secrets_dir: Some(secrets_dir.path().into()), validator_store: Some(validator_store.clone()), graffiti_file: None, graffiti_flag: Some(Graffiti::default()), - spec: E::default_spec(), + spec, config: http_config, log, sse_logging_components: None, diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index ce1937d4379..ba3b7f685b9 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -80,7 +80,7 @@ impl ApiTester { config.validator_dir = validator_dir.path().into(); config.secrets_dir = secrets_dir.path().into(); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); @@ -115,12 +115,13 @@ impl ApiTester { let context = Arc::new(Context { task_executor: test_runtime.task_executor.clone(), api_secret, + block_service: None, validator_dir: Some(validator_dir.path().into()), secrets_dir: Some(secrets_dir.path().into()), validator_store: Some(validator_store.clone()), graffiti_file: None, graffiti_flag: Some(Graffiti::default()), - spec: E::default_spec(), + spec: E::default_spec().into(), config: HttpConfig { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 8bc569c67a2..57e1080fd9b 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -38,7 +38,7 @@ pub const SUBSCRIPTIONS: &str = "subscriptions"; pub const LOCAL_KEYSTORE: &str = "local_keystore"; pub const WEB3SIGNER: &str = "web3signer"; -pub use lighthouse_metrics::*; +pub use metrics::*; pub static GENESIS_DISTANCE: LazyLock> = LazyLock::new(|| { try_create_int_gauge( @@ -316,9 +316,7 @@ pub fn gather_prometheus_metrics( warp_utils::metrics::scrape_health_metrics(); - encoder - .encode(&lighthouse_metrics::gather(), &mut buffer) - .unwrap(); + encoder.encode(&metrics::gather(), &mut buffer).unwrap(); String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index c94115e5ec5..0ef9a6a13d0 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -16,8 +16,8 @@ use account_utils::{ ZeroizeString, }; use eth2_keystore::Keystore; -use lighthouse_metrics::set_gauge; use lockfile::{Lockfile, LockfileError}; +use metrics::set_gauge; use parking_lot::{MappedMutexGuard, Mutex, MutexGuard}; use reqwest::{Certificate, Client, Error as ReqwestError, Identity}; use slog::{debug, error, info, warn, Logger}; diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index dff50582dfe..05ec1e53aa7 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -1,5 +1,6 @@ mod attestation_service; mod beacon_node_fallback; +mod beacon_node_health; mod block_service; mod check_synced; mod cli; @@ -20,17 +21,17 @@ pub mod initialized_validators; pub mod validator_store; pub use beacon_node_fallback::ApiTopic; +pub use beacon_node_health::BeaconNodeSyncDistanceTiers; pub use cli::cli_app; pub use config::Config; use initialized_validators::InitializedValidators; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use monitoring_api::{MonitoringHttpClient, ProcessType}; use sensitive_url::SensitiveUrl; pub use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use crate::beacon_node_fallback::{ - start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, OfflineOnFailure, - RequireSynced, + start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, }; use crate::doppelganger_service::DoppelgangerService; use crate::graffiti_file::GraffitiFile; @@ -364,15 +365,21 @@ impl ProductionValidatorClient { .collect::, String>>()?; let num_nodes = beacon_nodes.len(); + // User order of `beacon_nodes` is preserved, so `index` corresponds to the position of + // the node in `--beacon_nodes`. let candidates = beacon_nodes .into_iter() - .map(CandidateBeaconNode::new) + .enumerate() + .map(|(index, node)| CandidateBeaconNode::new(node, index)) .collect(); let proposer_nodes_num = proposer_nodes.len(); + // User order of `proposer_nodes` is preserved, so `index` corresponds to the position of + // the node in `--proposer_nodes`. let proposer_candidates = proposer_nodes .into_iter() - .map(CandidateBeaconNode::new) + .enumerate() + .map(|(index, node)| CandidateBeaconNode::new(node, index)) .collect(); // Set the count for beacon node fallbacks excluding the primary beacon node. @@ -394,6 +401,7 @@ impl ProductionValidatorClient { let mut beacon_nodes: BeaconNodeFallback<_, E> = BeaconNodeFallback::new( candidates, + config.beacon_node_fallback, config.broadcast_topics.clone(), context.eth2_config.spec.clone(), log.clone(), @@ -401,6 +409,7 @@ impl ProductionValidatorClient { let mut proposer_nodes: BeaconNodeFallback<_, E> = BeaconNodeFallback::new( proposer_candidates, + config.beacon_node_fallback, config.broadcast_topics.clone(), context.eth2_config.spec.clone(), log.clone(), @@ -563,6 +572,7 @@ impl ProductionValidatorClient { let ctx = Arc::new(http_api::Context { task_executor: self.context.executor.clone(), api_secret, + block_service: Some(self.block_service.clone()), validator_store: Some(self.validator_store.clone()), validator_dir: Some(self.config.validator_dir.clone()), secrets_dir: Some(self.config.secrets_dir.clone()), @@ -655,10 +665,10 @@ async fn init_from_beacon_node( proposer_nodes.update_all_candidates().await; let num_available = beacon_nodes.num_available().await; - let num_total = beacon_nodes.num_total(); + let num_total = beacon_nodes.num_total().await; let proposer_available = proposer_nodes.num_available().await; - let proposer_total = proposer_nodes.num_total(); + let proposer_total = proposer_nodes.num_total().await; if proposer_total > 0 && proposer_available == 0 { warn!( @@ -704,11 +714,7 @@ async fn init_from_beacon_node( let genesis = loop { match beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |node| async move { node.get_beacon_genesis().await }, - ) + .first_success(|node| async move { node.get_beacon_genesis().await }) .await { Ok(genesis) => break genesis.data, @@ -795,11 +801,7 @@ async fn poll_whilst_waiting_for_genesis( ) -> Result<(), String> { loop { match beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { beacon_node.get_lighthouse_staking().await }, - ) + .first_success(|beacon_node| async move { beacon_node.get_lighthouse_staking().await }) .await { Ok(is_staking) => { diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 819201978f8..cda13a5e63c 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -1,7 +1,7 @@ use crate::http_metrics; use crate::{DutiesService, ProductionValidatorClient}; -use lighthouse_metrics::set_gauge; -use slog::{error, info, Logger}; +use metrics::set_gauge; +use slog::{debug, error, info, Logger}; use slot_clock::SlotClock; use tokio::time::{sleep, Duration}; use types::EthSpec; @@ -39,25 +39,32 @@ async fn notify( duties_service: &DutiesService, log: &Logger, ) { - let num_available = duties_service.beacon_nodes.num_available().await; + let (candidate_info, num_available, num_synced) = + duties_service.beacon_nodes.get_notifier_info().await; + let num_total = candidate_info.len(); + let num_synced_fallback = num_synced.saturating_sub(1); + set_gauge( &http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, num_available as i64, ); - let num_synced = duties_service.beacon_nodes.num_synced().await; set_gauge( &http_metrics::metrics::SYNCED_BEACON_NODES_COUNT, num_synced as i64, ); - let num_total = duties_service.beacon_nodes.num_total(); set_gauge( &http_metrics::metrics::TOTAL_BEACON_NODES_COUNT, num_total as i64, ); if num_synced > 0 { + let primary = candidate_info + .first() + .map(|candidate| candidate.endpoint.as_str()) + .unwrap_or("None"); info!( log, "Connected to beacon node(s)"; + "primary" => primary, "total" => num_total, "available" => num_available, "synced" => num_synced, @@ -71,13 +78,36 @@ async fn notify( "synced" => num_synced, ) } - let num_synced_fallback = duties_service.beacon_nodes.num_synced_fallback().await; if num_synced_fallback > 0 { set_gauge(&http_metrics::metrics::ETH2_FALLBACK_CONNECTED, 1); } else { set_gauge(&http_metrics::metrics::ETH2_FALLBACK_CONNECTED, 0); } + for info in candidate_info { + if let Ok(health) = info.health { + debug!( + log, + "Beacon node info"; + "status" => "Connected", + "index" => info.index, + "endpoint" => info.endpoint, + "head_slot" => %health.head, + "is_optimistic" => ?health.optimistic_status, + "execution_engine_status" => ?health.execution_status, + "health_tier" => %health.health_tier, + ); + } else { + debug!( + log, + "Beacon node info"; + "status" => "Disconnected", + "index" => info.index, + "endpoint" => info.endpoint, + ); + } + } + if let Some(slot) = duties_service.slot_clock.now() { let epoch = slot.epoch(E::slots_per_epoch()); diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 474f9f47609..010c651c25d 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -1,6 +1,5 @@ -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use crate::validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; -use crate::OfflineOnFailure; use bls::PublicKeyBytes; use environment::RuntimeContext; use parking_lot::RwLock; @@ -342,16 +341,11 @@ impl PreparationService { let preparation_entries = preparation_data.as_slice(); match self .beacon_nodes - .request( - RequireSynced::No, - OfflineOnFailure::Yes, - ApiTopic::Subscriptions, - |beacon_node| async move { - beacon_node - .post_validator_prepare_beacon_proposer(preparation_entries) - .await - }, - ) + .request(ApiTopic::Subscriptions, |beacon_node| async move { + beacon_node + .post_validator_prepare_beacon_proposer(preparation_entries) + .await + }) .await { Ok(()) => debug!( @@ -477,13 +471,9 @@ impl PreparationService { for batch in signed.chunks(self.validator_registration_batch_size) { match self .beacon_nodes - .broadcast( - RequireSynced::No, - OfflineOnFailure::No, - |beacon_node| async move { - beacon_node.post_validator_register_validator(batch).await - }, - ) + .broadcast(|beacon_node| async move { + beacon_node.post_validator_register_validator(batch).await + }) .await { Ok(()) => info!( diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index f7abb3855a3..5c02998e3fc 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -1,8 +1,7 @@ -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use crate::{ duties_service::DutiesService, validator_store::{Error as ValidatorStoreError, ValidatorStore}, - OfflineOnFailure, }; use environment::RuntimeContext; use eth2::types::BlockId; @@ -180,8 +179,6 @@ impl SyncCommitteeService { let response = self .beacon_nodes .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, |beacon_node| async move { match beacon_node.get_beacon_blocks_root(BlockId::Head).await { Ok(Some(block)) if block.execution_optimistic == Some(false) => { @@ -299,16 +296,11 @@ impl SyncCommitteeService { .collect::>(); self.beacon_nodes - .request( - RequireSynced::No, - OfflineOnFailure::Yes, - ApiTopic::SyncCommittee, - |beacon_node| async move { - beacon_node - .post_beacon_pool_sync_committee_signatures(committee_signatures) - .await - }, - ) + .request(ApiTopic::SyncCommittee, |beacon_node| async move { + beacon_node + .post_beacon_pool_sync_committee_signatures(committee_signatures) + .await + }) .await .map_err(|e| { error!( @@ -371,21 +363,17 @@ impl SyncCommitteeService { let contribution = &self .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let sync_contribution_data = SyncContributionData { - slot, - beacon_block_root, - subcommittee_index: subnet_id.into(), - }; + .first_success(|beacon_node| async move { + let sync_contribution_data = SyncContributionData { + slot, + beacon_block_root, + subcommittee_index: subnet_id.into(), + }; - beacon_node - .get_validator_sync_committee_contribution::(&sync_contribution_data) - .await - }, - ) + beacon_node + .get_validator_sync_committee_contribution::(&sync_contribution_data) + .await + }) .await .map_err(|e| { crit!( @@ -453,15 +441,11 @@ impl SyncCommitteeService { // Publish to the beacon node. self.beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - beacon_node - .post_validator_contribution_and_proofs(signed_contributions) - .await - }, - ) + .first_success(|beacon_node| async move { + beacon_node + .post_validator_contribution_and_proofs(signed_contributions) + .await + }) .await .map_err(|e| { error!( @@ -595,16 +579,11 @@ impl SyncCommitteeService { if let Err(e) = self .beacon_nodes - .request( - RequireSynced::No, - OfflineOnFailure::Yes, - ApiTopic::Subscriptions, - |beacon_node| async move { - beacon_node - .post_validator_sync_committee_subscriptions(subscriptions_slice) - .await - }, - ) + .request(ApiTopic::Subscriptions, |beacon_node| async move { + beacon_node + .post_validator_sync_committee_subscriptions(subscriptions_slice) + .await + }) .await { error!( diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 8a9e125936e..af59ad98924 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -19,8 +19,8 @@ use task_executor::TaskExecutor; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, - Domain, Epoch, EthSpec, Fork, ForkName, Graffiti, Hash256, PublicKeyBytes, SelectionProof, - Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, + Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, PublicKeyBytes, SelectionProof, Signature, + SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, @@ -85,7 +85,7 @@ impl ValidatorStore { validators: InitializedValidators, slashing_protection: SlashingDatabase, genesis_validators_root: Hash256, - spec: ChainSpec, + spec: Arc, doppelganger_service: Option>, slot_clock: T, config: &Config, @@ -97,7 +97,7 @@ impl ValidatorStore { slashing_protection, slashing_protection_last_prune: Arc::new(Mutex::new(Epoch::new(0))), genesis_validators_root, - spec: Arc::new(spec), + spec, log, doppelganger_service, slot_clock, @@ -353,17 +353,9 @@ impl ValidatorStore { fn signing_context(&self, domain: Domain, signing_epoch: Epoch) -> SigningContext { if domain == Domain::VoluntaryExit { - match self.spec.fork_name_at_epoch(signing_epoch) { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - SigningContext { - domain, - epoch: signing_epoch, - fork: self.fork(signing_epoch), - genesis_validators_root: self.genesis_validators_root, - } - } + if self.spec.fork_name_at_epoch(signing_epoch).deneb_enabled() { // EIP-7044 - ForkName::Deneb | ForkName::Electra => SigningContext { + SigningContext { domain, epoch: signing_epoch, fork: Fork { @@ -372,7 +364,14 @@ impl ValidatorStore { epoch: signing_epoch, }, genesis_validators_root: self.genesis_validators_root, - }, + } + } else { + SigningContext { + domain, + epoch: signing_epoch, + fork: self.fork(signing_epoch), + genesis_validators_root: self.genesis_validators_root, + } } } else { SigningContext { diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index d06fce1d094..37a6040a9b0 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -112,7 +112,9 @@ pub fn cli_app() -> Command { "When provided don't generate the deposits JSON file that is \ commonly used for submitting validator deposits via a web UI. \ Using this flag will save several seconds per validator if the \ - user has an alternate strategy for submitting deposits.", + user has an alternate strategy for submitting deposits. \ + If used, the --force-bls-withdrawal-credentials is also required \ + to ensure users are aware that an --eth1-withdrawal-address is not set.", ) .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER)