diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index d1a8c9f6144..cd45bd6d98f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -80,7 +80,7 @@ jobs: - name: Set `make` command for lighthouse if: startsWith(matrix.binary, 'lighthouse') run: | - echo "MAKE_CMD=build-${{ matrix.cpu_arch }}-portable" >> $GITHUB_ENV + echo "MAKE_CMD=build-${{ matrix.cpu_arch }}" >> $GITHUB_ENV - name: Set `make` command for lcli if: startsWith(matrix.binary, 'lcli') @@ -124,9 +124,6 @@ jobs: push: true tags: | ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-dev - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-modern - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-modern-dev - name: Build and push (lcli) if: startsWith(matrix.binary, 'lcli') diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 86f99b53e10..f1ec2e46551 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -78,13 +78,13 @@ jobs: if: matrix.arch == 'aarch64-unknown-linux-gnu' run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-aarch64-portable + env CROSS_PROFILE=${{ matrix.profile }} make build-aarch64 - name: Build Lighthouse for x86_64-unknown-linux-gnu if: matrix.arch == 'x86_64-unknown-linux-gnu' run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-x86_64-portable + env CROSS_PROFILE=${{ matrix.profile }} make build-x86_64 - name: Move cross-compiled binary if: contains(matrix.arch, 'unknown-linux-gnu') @@ -113,7 +113,7 @@ jobs: echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz for ext in "tar.gz" "tar.gz.asc";\ do for f in *.$ext;\ - do cp $f "../${f%.$ext}-portable.$ext";\ + do cp $f "../${f%.$ext}.$ext";\ done;\ done mv *tar.gz* .. @@ -144,14 +144,6 @@ jobs: path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz compression-level: 0 - - name: Upload artifact (copy) - if: startsWith(matrix.arch, 'x86_64-windows') != true - uses: actions/upload-artifact@v4 - with: - name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz - path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz - compression-level: 0 - - name: Upload signature uses: actions/upload-artifact@v4 with: @@ -159,14 +151,6 @@ jobs: path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc compression-level: 0 - - name: Upload signature (copy) - if: startsWith(matrix.arch, 'x86_64-windows') != true - uses: actions/upload-artifact@v4 - with: - name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz.asc - path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz.asc - compression-level: 0 - draft-release: name: Draft Release needs: [build, extract-version] @@ -253,13 +237,9 @@ jobs: | System | Architecture | Binary | PGP Signature | |:---:|:---:|:---:|:---| | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz.asc) | - | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-apple-darwin-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin-portable.tar.gz.asc) | | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz.asc) | - | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz.asc) | | | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz.asc) | - | | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz.asc) | | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz.asc) | - | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz.asc) | | | | | | | **System** | **Option** | - | **Resource** | | | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}/tags?page=1&ordering=last_updated&name=${{ env.VERSION }}) | [${{ env.IMAGE_NAME }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}) | diff --git a/Cargo.lock b/Cargo.lock index 9afb3635f12..d76b1987806 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -72,6 +72,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + [[package]] name = "adler32" version = "1.2.0" @@ -178,9 +184,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-consensus" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58047cc851e58c26224521d1ecda466e3d746ebca0274cd5427aa660a88c353" +checksum = "4177d135789e282e925092be8939d421b701c6d92c0a16679faa659d9166289d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -188,15 +194,38 @@ dependencies = [ "c-kzg", ] +[[package]] +name = "alloy-eip2930" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +dependencies = [ + "alloy-primitives", + "alloy-rlp", +] + +[[package]] +name = "alloy-eip7702" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d319bb544ca6caeab58c39cea8921c55d924d4f68f2c60f24f914673f9a74a" +dependencies = [ + "alloy-primitives", + "alloy-rlp", +] + [[package]] name = "alloy-eips" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a3e14fa0d152d00bd8daf605eb74ad397efb0f54bd7155585823dddb4401e" +checksum = "499ee14d296a133d142efd215eb36bf96124829fe91cf8f5d4e5ccdd381eae00" dependencies = [ + "alloy-eip2930", + "alloy-eip7702", "alloy-primitives", "alloy-rlp", "c-kzg", + "derive_more 1.0.0", "once_cell", "serde", "sha2 0.10.8", @@ -204,20 +233,24 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.7" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb3ead547f4532bc8af961649942f0b9c16ee9226e26caa3f38420651cc0bf4" +checksum = "a767e59c86900dd7c3ce3ecef04f3ace5ac9631ee150beb8b7d22f7fa3bbb2d7" dependencies = [ "alloy-rlp", + "arbitrary", "bytes", "cfg-if", "const-hex", - "derive_more", + "derive_arbitrary", + "derive_more 0.99.18", + "getrandom", "hex-literal", "itoa", "k256 0.13.3", "keccak-asm", "proptest", + "proptest-derive", "rand", "ruint", "serde", @@ -226,9 +259,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a43b18702501396fa9bcdeecd533bc85fac75150d308fc0f6800a01e6234a003" +checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -237,13 +270,13 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83524c1f6162fcb5b0decf775498a125066c86dda6066ed609531b0e912f85a" +checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -269,9 +302,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", @@ -284,33 +317,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -380,7 +413,7 @@ dependencies = [ "num-bigint", "num-traits", "paste", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "zeroize", ] @@ -484,15 +517,15 @@ checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "asn1-rs" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" dependencies = [ "asn1-rs-derive", "asn1-rs-impl", @@ -506,13 +539,13 @@ dependencies = [ [[package]] name = "asn1-rs-derive" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", "synstructure", ] @@ -524,7 +557,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -546,9 +579,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" +checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ "async-lock", "cfg-if", @@ -557,10 +590,10 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.34", + "rustix 0.38.35", "slab", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -576,13 +609,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -593,20 +626,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version 0.4.0", -] - -[[package]] -name = "asynchronous-codec" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" -dependencies = [ - "bytes", - "futures-sink", - "futures-util", - "memchr", - "pin-project-lite", + "rustc_version 0.4.1", ] [[package]] @@ -652,7 +672,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -726,7 +746,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.7.4", "object", "rustc-demangle", ] @@ -777,10 +797,11 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" name = "beacon_chain" version = "0.2.0" dependencies = [ + "alloy-primitives", "bitvec 1.0.1", "bls", + "criterion", "derivative", - "environment", "eth1", "eth2", "eth2_network_config", @@ -835,8 +856,9 @@ dependencies = [ [[package]] name = "beacon_node" -version = "5.2.1" +version = "5.3.0" dependencies = [ + "account_utils", "beacon_chain", "clap", "clap_utils", @@ -910,9 +932,9 @@ dependencies = [ "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -1004,14 +1026,16 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" name = "bls" version = "0.2.0" dependencies = [ + "alloy-primitives", "arbitrary", "blst", - "ethereum-types 0.14.1", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", + "fixed_bytes", "hex", "rand", + "safe_arith", "serde", "tree_hash", "zeroize", @@ -1029,6 +1053,22 @@ dependencies = [ "zeroize", ] +[[package]] +name = "blstrs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" +dependencies = [ + "blst", + "byte-slice-cast", + "ff 0.13.0", + "group 0.13.0", + "pairing", + "rand_core", + "serde", + "subtle", +] + [[package]] name = "bollard-stubs" version = "1.42.0-rc.3" @@ -1041,7 +1081,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "5.2.1" +version = "5.3.0" dependencies = [ "beacon_node", "clap", @@ -1107,9 +1147,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.1" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" dependencies = [ "serde", ] @@ -1137,23 +1177,24 @@ dependencies = [ [[package]] name = "c-kzg" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf100c4cea8f207e883ff91ca886d621d8a166cb04971dfaa9bb8fd99ed95df" +checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" dependencies = [ "blst", "cc", "glob", "hex", "libc", + "once_cell", "serde", ] [[package]] name = "camino" -version = "1.1.7" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" dependencies = [ "serde", ] @@ -1189,12 +1230,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.6" +version = "1.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aba8f4e9906c7ce3c73463f62a7f0c65183ada1a2d47e397cc8810827f9694f" +checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -1214,9 +1256,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg_aliases" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chacha20" @@ -1314,9 +1356,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.10" +version = "4.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6b81fb3c84f5563d509c59b5a48d935f689e993afa90fe39047f05adef9142" +checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" dependencies = [ "clap_builder", "clap_derive", @@ -1324,9 +1366,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.10" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca6706fd5224857d9ac5eb9355f6683563cc0541c7cd9d014043b57cbec78ac" +checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" dependencies = [ "anstream", "anstyle", @@ -1337,30 +1379,30 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.8" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bac35c6dafb060fd4d275d9a4ffae97917c13a6327903a8be2153cd964f7085" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] name = "clap_lex" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "clap_utils" version = "0.1.0" dependencies = [ + "alloy-primitives", "clap", "dirs", "eth2_network_config", - "ethereum-types 0.14.1", "ethereum_ssz", "hex", "serde", @@ -1411,18 +1453,18 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.50" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" dependencies = [ "cc", ] [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "compare_fields" @@ -1492,9 +1534,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core2" @@ -1507,13 +1549,65 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" dependencies = [ "libc", ] +[[package]] +name = "crate_crypto_internal_eth_kzg_bls12_381" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23be5253f1bd7fd411721a58712308c4747d0a41d040bbf8ebb78d52909a480" +dependencies = [ + "blst", + "blstrs", + "ff 0.13.0", + "group 0.13.0", + "pairing", + "subtle", +] + +[[package]] +name = "crate_crypto_internal_eth_kzg_erasure_codes" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2067ce20ef380ff33a93ce0af62bea22d35531b7f3586224d8d5176ec6cf578" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", + "crate_crypto_internal_eth_kzg_polynomial", +] + +[[package]] +name = "crate_crypto_internal_eth_kzg_maybe_rayon" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558f50324ff016e5fe93113c78a72776d790d52f244ae9602a8013a67a189b66" + +[[package]] +name = "crate_crypto_internal_eth_kzg_polynomial" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e051c4f5aa5696bd7c504930485436ec62bf14f30a4c2d78504f3f8ec6a3daf" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", +] + +[[package]] +name = "crate_crypto_kzg_multi_open_fk20" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66ed6bf8993d9f3b361da4ed38f067503e08c0b948af0d6f4bb941dd647c0f2c" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", + "crate_crypto_internal_eth_kzg_maybe_rayon", + "crate_crypto_internal_eth_kzg_polynomial", + "hex", + "sha2 0.10.8", +] + [[package]] name = "crc32fast" version = "1.4.2" @@ -1683,12 +1777,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.4" +version = "3.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" +checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" dependencies = [ - "nix 0.28.0", - "windows-sys 0.52.0", + "nix 0.29.0", + "windows-sys 0.59.0", ] [[package]] @@ -1702,7 +1796,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "subtle", "zeroize", ] @@ -1715,7 +1809,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -1763,7 +1857,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -1785,7 +1879,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -1950,7 +2044,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -1962,15 +2056,35 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", - "syn 2.0.72", + "rustc_version 0.4.1", + "syn 2.0.77", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] name = "diesel" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf97ee7261bb708fa3402fa9c17a54b70e90e3cb98afb3dc8999d5512cb03f94" +checksum = "65e13bab2796f412722112327f3e575601a3e9cdcbe426f0d30dbf43f3f5dc71" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -1982,15 +2096,15 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ff2be1e7312c858b2ef974f5c7089833ae57b5311b334b30923af58e5718d8" +checksum = "e7f2c3de51e2ba6bf2a648285696137aaf0f5f487bcbea93972fe8a364e131a4" dependencies = [ "diesel_table_macro_syntax", "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -2010,7 +2124,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -2101,7 +2215,7 @@ dependencies = [ "hex", "hkdf", "lazy_static", - "libp2p", + "libp2p 0.53.2", "lru", "more-asserts", "parking_lot 0.11.2", @@ -2123,7 +2237,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -2137,7 +2251,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -2148,9 +2262,9 @@ checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "dunce" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "ecdsa" @@ -2207,13 +2321,13 @@ dependencies = [ name = "ef_tests" version = "0.2.0" dependencies = [ + "alloy-primitives", "beacon_chain", "bls", "compare_fields", "compare_fields_derive", "derivative", "eth2_network_config", - "ethereum-types 0.14.1", "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", @@ -2317,7 +2431,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -2411,6 +2525,7 @@ dependencies = [ "execution_layer", "futures", "lighthouse_metrics", + "logging", "merkle_proof", "parking_lot 0.12.3", "sensitive_url", @@ -2666,23 +2781,22 @@ dependencies = [ [[package]] name = "ethereum_hashing" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea7b408432c13f71af01197b1d3d0069c48a27bfcfbe72a81fc346e47f6defb" +checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" dependencies = [ "cpufeatures", - "lazy_static", "ring 0.17.8", "sha2 0.10.8", ] [[package]] name = "ethereum_serde_utils" -version = "0.5.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de4d5951468846963c24e8744c133d44f39dff2cd3a233f6be22b370d08a524f" +checksum = "70cbccfccf81d67bff0ab36e591fa536c8a935b078a7b0e58c1d00d418332fc9" dependencies = [ - "ethereum-types 0.14.1", + "alloy-primitives", "hex", "serde", "serde_derive", @@ -2691,25 +2805,25 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.5.4" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3627f83d8b87b432a5fad9934b4565260722a141a2c40f371f8080adec9425" +checksum = "1e999563461faea0ab9bc0024e5e66adcee35881f3d5062f52f31a4070fe1522" dependencies = [ - "ethereum-types 0.14.1", - "itertools 0.10.5", + "alloy-primitives", + "itertools 0.13.0", "smallvec", ] [[package]] name = "ethereum_ssz_derive" -version = "0.5.4" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eccd5378ec34a07edd3d9b48088cbc63309d0367d14ba10b0cdb1d1791080ea" +checksum = "f3deae99c8e74829a00ba7a92d49055732b3c1f093f2ccfa3cbc621679b6fa91" dependencies = [ - "darling 0.13.4", + "darling 0.20.10", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.77", ] [[package]] @@ -2895,12 +3009,12 @@ dependencies = [ "arc-swap", "builder_client", "bytes", - "environment", "eth2", "eth2_network_config", "ethereum_serde_utils", "ethereum_ssz", "ethers-core", + "fixed_bytes", "fork_choice", "hash-db", "hash256-std-hasher", @@ -2910,6 +3024,7 @@ dependencies = [ "kzg", "lighthouse_metrics", "lighthouse_version", + "logging", "lru", "parking_lot 0.12.3", "pretty_reqwest_error", @@ -2960,9 +3075,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fastrlp" @@ -3001,6 +3116,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec 1.0.1", "rand_core", "subtle", ] @@ -3024,7 +3140,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38e2275cc4e4fc009b0669731a1e5ab7ebf11f469eaede2bab9309a5b4d6057f" dependencies = [ "memoffset", - "rustc_version 0.4.0", + "rustc_version 0.4.1", ] [[package]] @@ -3053,22 +3169,29 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ - "arbitrary", "byteorder", "rand", "rustc-hex", "static_assertions", ] +[[package]] +name = "fixed_bytes" +version = "0.1.0" +dependencies = [ + "alloy-primitives", + "safe_arith", +] + [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.8.0", ] [[package]] @@ -3099,6 +3222,7 @@ dependencies = [ "beacon_chain", "ethereum_ssz", "ethereum_ssz_derive", + "lighthouse_metrics", "proto_array", "slog", "state_processing", @@ -3215,7 +3339,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -3373,7 +3497,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -3387,7 +3511,7 @@ name = "gossipsub" version = "0.5.0" dependencies = [ "async-channel", - "asynchronous-codec 0.7.0", + "asynchronous-codec", "base64 0.21.7", "byteorder", "bytes", @@ -3399,10 +3523,10 @@ dependencies = [ "getrandom", "hashlink 0.9.1", "hex_fmt", - "libp2p", + "libp2p 0.54.1", "prometheus-client", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "quickcheck", "rand", "regex", @@ -3431,7 +3555,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", + "rand", "rand_core", + "rand_xorshift", "subtle", ] @@ -3447,7 +3573,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.2.6", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -3584,6 +3710,9 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] [[package]] name = "hex-literal" @@ -3768,7 +3897,6 @@ dependencies = [ "bs58 0.4.0", "bytes", "directory", - "environment", "eth1", "eth2", "ethereum_serde_utils", @@ -3786,6 +3914,7 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "proto_array", + "rand", "safe_arith", "sensitive_url", "serde", @@ -3810,10 +3939,10 @@ name = "http_metrics" version = "0.1.0" dependencies = [ "beacon_chain", - "environment", "lighthouse_metrics", "lighthouse_network", "lighthouse_version", + "logging", "malloc_utils", "reqwest", "serde", @@ -3916,9 +4045,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" dependencies = [ "bytes", "futures-util", @@ -4100,9 +4229,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.6" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.5", @@ -4178,20 +4307,20 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.4.0", "libc", "windows-sys 0.52.0", ] [[package]] name = "is_terminal_polyfill" -version = "1.70.0" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" @@ -4211,6 +4340,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -4259,22 +4397,23 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] [[package]] name = "jsonwebtoken" -version = "8.3.0" +version = "9.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" dependencies = [ "base64 0.21.7", - "pem 1.1.1", - "ring 0.16.20", + "js-sys", + "pem", + "ring 0.17.8", "serde", "serde_json", "simple_asn1", @@ -4318,9 +4457,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47a3633291834c4fbebf8673acbc1b04ec9d151418ff9b8e26dcd79129928758" +checksum = "422fbc7ff2f2f5bdffeb07718e5a5324dca72b0c9293d50df4026652385e3314" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -4342,13 +4481,17 @@ version = "0.1.0" dependencies = [ "arbitrary", "c-kzg", + "criterion", "derivative", + "eth2_network_config", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "hex", + "rust_eth_kzg", "serde", + "serde_json", "tree_hash", ] @@ -4369,7 +4512,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "5.2.1" +version = "5.3.0" dependencies = [ "account_utils", "beacon_chain", @@ -4394,6 +4537,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "sloggers", "snap", "state_processing", "store", @@ -4427,9 +4571,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.155" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libflate" @@ -4478,7 +4622,7 @@ source = "git+https://github.com/sigp/libmdbx-rs?rev=e6ff4b9377c1619bcf0bfdf52be dependencies = [ "bitflags 1.3.2", "byteorder", - "derive_more", + "derive_more 0.99.18", "indexmap 1.9.3", "libc", "mdbx-sys", @@ -4498,9 +4642,31 @@ dependencies = [ "futures-timer", "getrandom", "instant", - "libp2p-allow-block-list", - "libp2p-connection-limits", - "libp2p-core", + "libp2p-allow-block-list 0.3.0", + "libp2p-connection-limits 0.3.1", + "libp2p-core 0.41.3", + "libp2p-identity", + "libp2p-swarm 0.44.2", + "multiaddr", + "pin-project", + "rw-stream-sink", + "thiserror", +] + +[[package]] +name = "libp2p" +version = "0.54.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" +dependencies = [ + "bytes", + "either", + "futures", + "futures-timer", + "getrandom", + "libp2p-allow-block-list 0.4.0", + "libp2p-connection-limits 0.4.0", + "libp2p-core 0.42.0", "libp2p-dns", "libp2p-identify", "libp2p-identity", @@ -4509,7 +4675,7 @@ dependencies = [ "libp2p-noise", "libp2p-plaintext", "libp2p-quic", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "libp2p-tcp", "libp2p-upnp", "libp2p-yamux", @@ -4525,9 +4691,21 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" dependencies = [ - "libp2p-core", + "libp2p-core 0.41.3", + "libp2p-identity", + "libp2p-swarm 0.44.2", + "void", +] + +[[package]] +name = "libp2p-allow-block-list" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" +dependencies = [ + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "void", ] @@ -4537,9 +4715,21 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7cd50a78ccfada14de94cbacd3ce4b0138157f376870f13d3a8422cd075b4fd" dependencies = [ - "libp2p-core", + "libp2p-core 0.41.3", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.44.2", + "void", +] + +[[package]] +name = "libp2p-connection-limits" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" +dependencies = [ + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm 0.45.1", "void", ] @@ -4571,16 +4761,44 @@ dependencies = [ "web-time", ] +[[package]] +name = "libp2p-core" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-identity", + "multiaddr", + "multihash", + "multistream-select", + "once_cell", + "parking_lot 0.12.3", + "pin-project", + "quick-protobuf", + "rand", + "rw-stream-sink", + "smallvec", + "thiserror", + "tracing", + "unsigned-varint 0.8.0", + "void", + "web-time", +] + [[package]] name = "libp2p-dns" -version = "0.41.1" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d17cbcf7160ff35c3e8e560de4a068fe9d6cb777ea72840e48eb76ff9576c4b6" +checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "parking_lot 0.12.3", "smallvec", @@ -4589,21 +4807,21 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.44.2" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5d635ebea5ca0c3c3e77d414ae9b67eccf2a822be06091b9c1a0d13029a1e2f" +checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "either", "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "lru", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "smallvec", "thiserror", "tracing", @@ -4635,17 +4853,17 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.45.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49007d9a339b3e1d7eeebc4d67c05dbf23d300b7d091193ec2d3f26802d7faf2" +checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" dependencies = [ "data-encoding", "futures", "hickory-proto", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "rand", "smallvec", "socket2 0.5.7", @@ -4656,50 +4874,50 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.14.1" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdac91ae4f291046a3b2660c039a2830c931f84df2ee227989af92f7692d3357" +checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ "futures", - "instant", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identify", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "pin-project", "prometheus-client", + "web-time", ] [[package]] name = "libp2p-mplex" -version = "0.41.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e895765e27e30217b25f7cb7ac4686dad1ff80bf2fdeffd1d898566900a924" +checksum = "41187ab8f6c835ad864edf94224f666f636ee2d270601422c1441f739e0abccc" dependencies = [ - "asynchronous-codec 0.6.2", + "asynchronous-codec", "bytes", "futures", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "nohash-hasher", "parking_lot 0.12.3", "rand", "smallvec", "tracing", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", ] [[package]] name = "libp2p-noise" -version = "0.44.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecd0545ce077f6ea5434bcb76e8d0fe942693b4380aaad0d34a358c2bd05793" +checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "curve25519-dalek", "futures", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "multiaddr", "multihash", @@ -4717,31 +4935,31 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.41.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67330af40b67217e746d42551913cfb7ad04c74fa300fb329660a56318590b3f" +checksum = "5b63d926c6be56a2489e0e7316b17fe95a70bc5c4f3e85740bb3e67c0f3c6a44" dependencies = [ - "asynchronous-codec 0.6.2", + "asynchronous-codec", "bytes", "futures", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "quick-protobuf", - "quick-protobuf-codec 0.2.0", + "quick-protobuf-codec", "tracing", ] [[package]] name = "libp2p-quic" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c67296ad4e092e23f92aea3d2bdb6f24eab79c0929ed816dfb460ea2f4567d2b" +checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" dependencies = [ "bytes", "futures", "futures-timer", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-tls", "parking_lot 0.12.3", @@ -4766,7 +4984,28 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core", + "libp2p-core 0.41.3", + "libp2p-identity", + "lru", + "multistream-select", + "once_cell", + "rand", + "smallvec", + "tracing", + "void", +] + +[[package]] +name = "libp2p-swarm" +version = "0.45.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-swarm-derive", "lru", @@ -4777,31 +5016,32 @@ dependencies = [ "tokio", "tracing", "void", + "web-time", ] [[package]] name = "libp2p-swarm-derive" -version = "0.34.2" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5daceb9dd908417b6dfcfe8e94098bc4aac54500c282e78120b885dadc09b999" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] name = "libp2p-tcp" -version = "0.41.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b2460fc2748919adff99ecbc1aab296e4579e41f374fb164149bd2c9e529d4c" +checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "socket2 0.5.7", "tokio", @@ -4810,13 +5050,13 @@ dependencies = [ [[package]] name = "libp2p-tls" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b7b831e55ce2aa6c354e6861a85fdd4dd0a2b97d5e276fabac0e4810a71776" +checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "rcgen", "ring 0.17.8", @@ -4829,15 +5069,15 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccf04b0e3ff3de52d07d5fd6c3b061d0e7f908ffc683c32d9638caedce86fc8" +checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core", - "libp2p-swarm", + "libp2p-core 0.42.0", + "libp2p-swarm 0.45.1", "tokio", "tracing", "void", @@ -4845,13 +5085,13 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.45.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200cbe50349a44760927d50b431d77bed79b9c0a3959de1af8d24a63434b71e5" +checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" dependencies = [ "either", "futures", - "libp2p-core", + "libp2p-core 0.42.0", "thiserror", "tracing", "yamux 0.12.1", @@ -4929,9 +5169,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.18" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "pkg-config", @@ -4940,7 +5180,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "5.2.1" +version = "5.3.0" dependencies = [ "account_manager", "account_utils", @@ -4990,6 +5230,7 @@ dependencies = [ name = "lighthouse_network" version = "0.2.0" dependencies = [ + "alloy-primitives", "async-channel", "bytes", "delay_map", @@ -5005,7 +5246,7 @@ dependencies = [ "gossipsub", "hex", "itertools 0.10.5", - "libp2p", + "libp2p 0.54.1", "libp2p-mplex", "lighthouse_metrics", "lighthouse_version", @@ -5135,9 +5376,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" dependencies = [ "hashbrown 0.14.5", ] @@ -5258,8 +5499,9 @@ dependencies = [ name = "merkle_proof" version = "0.2.0" dependencies = [ - "ethereum-types 0.14.1", + "alloy-primitives", "ethereum_hashing", + "fixed_bytes", "quickcheck", "quickcheck_macros", "safe_arith", @@ -5295,7 +5537,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd01039851e82f8799046eabbb354056283fb265c8ec0996af940f4e85a380ff" dependencies = [ "serde", - "toml 0.8.15", + "toml 0.8.19", ] [[package]] @@ -5311,17 +5553,17 @@ dependencies = [ [[package]] name = "milhouse" -version = "0.1.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3826d3602a3674b07e080ce1982350e454ec253d73f156bd927ac1b652293f4d" +checksum = "f68e33f98199224d1073f7c1468ea6abfea30736306fb79c7181a881e97ea32f" dependencies = [ + "alloy-primitives", "arbitrary", "derivative", - "ethereum-types 0.14.1", "ethereum_hashing", "ethereum_ssz", "ethereum_ssz_derive", - "itertools 0.10.5", + "itertools 0.13.0", "parking_lot 0.12.3", "rayon", "serde", @@ -5363,11 +5605,20 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + [[package]] name = "mio" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", @@ -5546,6 +5797,7 @@ dependencies = [ name = "network" version = "0.2.0" dependencies = [ + "alloy-primitives", "anyhow", "async-channel", "beacon_chain", @@ -5600,9 +5852,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -5737,18 +5989,18 @@ dependencies = [ [[package]] name = "object" -version = "0.36.2" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] [[package]] name = "oid-registry" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" dependencies = [ "asn1-rs", ] @@ -5826,7 +6078,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -5897,6 +6149,15 @@ dependencies = [ "sha2 0.10.8", ] +[[package]] +name = "pairing" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +dependencies = [ + "group 0.13.0", +] + [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -5943,7 +6204,7 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2", "quote", "syn 1.0.109", @@ -6041,15 +6302,6 @@ dependencies = [ "sha2 0.10.8", ] -[[package]] -name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - [[package]] name = "pem" version = "3.0.4" @@ -6093,7 +6345,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version 0.4.0", + "rustc_version 0.4.1", ] [[package]] @@ -6131,7 +6383,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -6208,17 +6460,17 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.2" +version = "3.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ed00ed3fbf728b5816498ecd316d1716eecaced9c0c8d2c5a6740ca214985b" +checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.34", + "rustix 0.38.35", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6293,9 +6545,12 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "pq-sys" @@ -6362,11 +6617,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit 0.22.20", ] [[package]] @@ -6428,7 +6683,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -6451,6 +6706,17 @@ dependencies = [ "unarray", ] +[[package]] +name = "proptest-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "proto_array" version = "0.2.0" @@ -6478,7 +6744,7 @@ checksum = "5e617cc9058daa5e1fe5a0d23ed745773a5ee354111dad1ec0235b0cc16b6730" dependencies = [ "cfg-if", "darwin-libproc", - "derive_more", + "derive_more 0.99.18", "glob", "mach2", "nix 0.24.3", @@ -6498,32 +6764,18 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-protobuf" version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +source = "git+https://github.com/sigp/quick-protobuf.git?rev=681f413312404ab6e51f0b46f39b0075c6f4ebfd#681f413312404ab6e51f0b46f39b0075c6f4ebfd" dependencies = [ "byteorder", ] -[[package]] -name = "quick-protobuf-codec" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" -dependencies = [ - "asynchronous-codec 0.6.2", - "bytes", - "quick-protobuf", - "thiserror", - "unsigned-varint 0.7.2", -] - [[package]] name = "quick-protobuf-codec" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "quick-protobuf", "thiserror", @@ -6554,17 +6806,18 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.2" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" +checksum = "a2d2fb862b7ba45e615c1429def928f2e15f815bdf933b27a2d3824e224c1f46" dependencies = [ "bytes", "futures-io", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash", + "rustc-hash 2.0.0", "rustls 0.23.12", + "socket2 0.5.7", "thiserror", "tokio", "tracing", @@ -6572,14 +6825,14 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.3" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddf517c03a109db8100448a4be38d498df8a210a99fe0e1b9eaf39e78c640efe" +checksum = "ea0a9b3a42929fad8a7c3de7f86ce0814cfa893328157672680e9fb1145549c5" dependencies = [ "bytes", "rand", "ring 0.17.8", - "rustc-hash", + "rustc-hash 2.0.0", "rustls 0.23.12", "slab", "thiserror", @@ -6596,14 +6849,15 @@ dependencies = [ "libc", "once_cell", "socket2 0.5.7", + "tracing", "windows-sys 0.52.0", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -6706,7 +6960,7 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ - "pem 3.0.4", + "pem", "ring 0.16.20", "time", "yasna", @@ -6714,9 +6968,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6dd20d3cdeb9c7d2366a0b16b93b35b75aec15309fbeb7ce477138c9f68c8c0" +checksum = "58323dc32ea52a8ae105ff94bc0460c5d906307533ba3401aa63db3cbe491fe5" dependencies = [ "libc", ] @@ -6750,9 +7004,9 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", @@ -6761,9 +7015,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", @@ -6978,6 +7232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" dependencies = [ "alloy-rlp", + "arbitrary", "ark-ff 0.3.0", "ark-ff 0.4.2", "bytes", @@ -7015,6 +7270,20 @@ dependencies = [ "smallvec", ] +[[package]] +name = "rust_eth_kzg" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3291fd0d9c629a56537d74bbc1e7bcaf5be610f2f7b55af85c4fea843c6aeca3" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", + "crate_crypto_internal_eth_kzg_erasure_codes", + "crate_crypto_kzg_multi_open_fk20", + "hex", + "serde", + "serde_json", +] + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -7027,6 +7296,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc-hex" version = "2.1.0" @@ -7044,9 +7319,9 @@ dependencies = [ [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver 1.0.23", ] @@ -7076,9 +7351,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f" dependencies = [ "bitflags 2.6.0", "errno", @@ -7108,7 +7383,7 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki 0.102.7", "subtle", "zeroize", ] @@ -7122,7 +7397,7 @@ dependencies = [ "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki 0.102.7", "subtle", "zeroize", ] @@ -7138,9 +7413,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ "base64 0.22.1", "rustls-pki-types", @@ -7148,9 +7423,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" @@ -7164,9 +7439,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.6" +version = "0.102.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -7237,7 +7512,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "cfg-if", - "derive_more", + "derive_more 0.99.18", "parity-scale-codec 3.6.12", "scale-info-derive", ] @@ -7248,7 +7523,7 @@ version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2", "quote", "syn 1.0.109", @@ -7400,9 +7675,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.204" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" dependencies = [ "serde_derive", ] @@ -7419,22 +7694,23 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] name = "serde_json" -version = "1.0.120" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -7457,14 +7733,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] name = "serde_spanned" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" dependencies = [ "serde", ] @@ -7509,7 +7785,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.5.0", "itoa", "ryu", "serde", @@ -7575,9 +7851,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9b57fd861253bff08bb1919e995f90ba8f4889de2726091c8876f3a4e823b40" +checksum = "57d79b758b7cb2085612b11a235055e485605a5103faccdd633f35bd7aee69dd" dependencies = [ "cc", "cfg-if", @@ -7877,7 +8153,7 @@ dependencies = [ "curve25519-dalek", "rand_core", "ring 0.17.8", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "sha2 0.10.8", "subtle", ] @@ -7936,15 +8212,15 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "625b20de2d4b3891e6972f4ce5061cb11bd52b3479270c4b177c134b571194a9" +checksum = "35e0719d2b86ac738a55ae71a8429f52aa2741da988f1fd0975b4cc610fd1e08" dependencies = [ "arbitrary", "derivative", "ethereum_serde_utils", "ethereum_ssz", - "itertools 0.10.5", + "itertools 0.13.0", "serde", "serde_derive", "smallvec", @@ -8095,9 +8371,10 @@ dependencies = [ name = "swap_or_not_shuffle" version = "0.2.0" dependencies = [ + "alloy-primitives", "criterion", - "ethereum-types 0.14.1", "ethereum_hashing", + "fixed_bytes", ] [[package]] @@ -8113,9 +8390,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.72" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", @@ -8142,7 +8419,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -8232,14 +8509,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", - "rustix 0.38.34", - "windows-sys 0.52.0", + "once_cell", + "rustix 0.38.35", + "windows-sys 0.59.0", ] [[package]] @@ -8268,7 +8546,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.34", + "rustix 0.38.35", "windows-sys 0.48.0", ] @@ -8322,7 +8600,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -8397,7 +8675,7 @@ dependencies = [ "once_cell", "pbkdf2 0.11.0", "rand", - "rustc-hash", + "rustc-hash 1.1.0", "sha2 0.10.8", "thiserror", "unicode-normalization", @@ -8441,9 +8719,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.1" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d040ac2b29ab03b09d4129c2f5bbd012a3ac2f79d38ff506a4bf8dd34b0eac8a" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", @@ -8474,7 +8752,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -8572,21 +8850,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.15" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac2caab0bf757388c6c0ae23b3293fdb463fee59434529014f85e3263b995c28" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.16", + "toml_edit 0.22.20", ] [[package]] name = "toml_datetime" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] @@ -8597,33 +8875,22 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.5.0", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.21.1" +version = "0.22.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" dependencies = [ - "indexmap 2.2.6", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.22.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "278f3d518e152219c994ce877758516bca5e118eaed6996192a774fb9fbf0788" -dependencies = [ - "indexmap 2.2.6", + "indexmap 2.5.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.15", + "winnow 0.6.18", ] [[package]] @@ -8644,15 +8911,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -8686,7 +8953,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -8759,24 +9026,25 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134d6b24a5b829f30b5ee7de05ba7384557f5f6b00e29409cdf2392f93201bfa" +checksum = "373495c23db675a5192de8b610395e1bec324d596f9e6111192ce903dc11403a" dependencies = [ - "ethereum-types 0.14.1", + "alloy-primitives", "ethereum_hashing", "smallvec", ] [[package]] name = "tree_hash_derive" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce7bccc538359a213436af7bc95804bdbf1c2a21d80e22953cbe9e096837ff1" +checksum = "b0857056ca4eb5de8c417309be42bcff6017b47e86fbaddde609b4633f66061e" dependencies = [ - "darling 0.13.4", + "darling 0.20.10", + "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.77", ] [[package]] @@ -8825,11 +9093,11 @@ dependencies = [ "criterion", "derivative", "eth2_interop_keypairs", - "ethereum-types 0.14.1", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "hex", "int_to_bytes", "itertools 0.10.5", @@ -8876,7 +9144,6 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ - "arbitrary", "byteorder", "crunchy", "hex", @@ -8927,15 +9194,15 @@ dependencies = [ [[package]] name = "unicode-properties" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4259d9d4425d9f0661581b804cb85fe66a4c631cadd8f490d1c13a35d5d9291" +checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" [[package]] name = "unicode-xid" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" [[package]] name = "universal-hash" @@ -8968,10 +9235,6 @@ name = "unsigned-varint" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" -dependencies = [ - "asynchronous-codec 0.6.2", - "bytes", -] [[package]] name = "unsigned-varint" @@ -8979,6 +9242,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" dependencies = [ + "asynchronous-codec", "bytes", "tokio-util", ] @@ -9146,9 +9410,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "void" @@ -9201,7 +9465,7 @@ dependencies = [ "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile 2.1.2", + "rustls-pemfile 2.1.3", "scoped-tls", "serde", "serde_json", @@ -9246,34 +9510,35 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", @@ -9283,9 +9548,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9293,22 +9558,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-streams" @@ -9374,9 +9639,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", @@ -9402,6 +9667,7 @@ dependencies = [ "eth2_keystore", "eth2_network_config", "futures", + "logging", "parking_lot 0.12.3", "reqwest", "serde", @@ -9464,11 +9730,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -9544,6 +9810,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -9733,9 +10008,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "557404e450152cd6795bb558bca69e43c585055f4606e3bcae5894fc6dac9ba0" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ "memchr", ] @@ -9761,7 +10036,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "send_wrapper", "thiserror", "wasm-bindgen", @@ -9815,9 +10090,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" +checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" [[package]] name = "xmltree" @@ -9885,6 +10160,7 @@ version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] @@ -9896,7 +10172,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -9916,7 +10192,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -9960,9 +10236,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.12+zstd.1.5.6" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e40c320c3cb459d9a9ff6de98cff88f4751ee9275d140e2be94a2b74e4c13" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index cf3fd0ab043..8c6aa308c1a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,6 +52,7 @@ members = [ "database_manager", "consensus/int_to_bytes", + "consensus/fixed_bytes", "consensus/fork_choice", "consensus/proto_array", "consensus/safe_arith", @@ -94,9 +95,9 @@ resolver = "2" edition = "2021" [workspace.dependencies] -alloy-primitives = "0.7.7" +alloy-primitives = "0.8" alloy-rlp = "0.3.4" -alloy-consensus = "0.2.0" +alloy-consensus = "0.3.0" anyhow = "1" arbitrary = { version = "1", features = ["derive"] } async-channel = "1.9.0" @@ -107,21 +108,21 @@ bytes = "1" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. -c-kzg = { version = "1", default-features = false } +c-kzg = { version = "1", default-features = false } compare_fields_derive = { path = "common/compare_fields_derive" } criterion = "0.5" delay_map = "0.3" derivative = "2" dirs = "3" either = "1.9" +rust_eth_kzg = "0.5.1" discv5 = { version = "0.4.1", features = ["libp2p"] } env_logger = "0.9" error-chain = "0.12" -ethereum-types = "0.14" -ethereum_hashing = "0.6.0" -ethereum_serde_utils = "0.5.2" -ethereum_ssz = "0.5" -ethereum_ssz_derive = "0.5" +ethereum_hashing = "0.7.0" +ethereum_serde_utils = "0.7" +ethereum_ssz = "0.7" +ethereum_ssz_derive = "0.7" ethers-core = "1" ethers-providers = { version = "1", default-features = false } exit-future = "0.2" @@ -136,7 +137,7 @@ libsecp256k1 = "0.7" log = "0.4" lru = "0.12" maplit = "1" -milhouse = "0.1" +milhouse = "0.3" num_cpus = "1" parking_lot = "0.12" paste = "1" @@ -162,7 +163,7 @@ slog-term = "2" sloggers = { version = "2", features = ["json"] } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" -ssz_types = "0.6" +ssz_types = "0.8" strum = { version = "0.24", features = ["derive"] } superstruct = "0.8" syn = "1" @@ -176,8 +177,8 @@ tracing-appender = "0.2" tracing-core = "0.1" tracing-log = "0.2" tracing-subscriber = { version = "0.3", features = ["env-filter"] } -tree_hash = "0.6" -tree_hash_derive = "0.6" +tree_hash = "0.8" +tree_hash_derive = "0.8" url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } warp = { version = "0.3.7", default-features = false, features = ["tls"] } @@ -204,6 +205,7 @@ eth2_keystore = { path = "crypto/eth2_keystore" } eth2_network_config = { path = "common/eth2_network_config" } eth2_wallet = { path = "crypto/eth2_wallet" } execution_layer = { path = "beacon_node/execution_layer" } +fixed_bytes = { path = "consensus/fixed_bytes" } filesystem = { path = "common/filesystem" } fork_choice = { path = "consensus/fork_choice" } genesis = { path = "beacon_node/genesis" } @@ -244,3 +246,6 @@ inherits = "release" lto = "fat" codegen-units = 1 incremental = false + +[patch.crates-io] +quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "681f413312404ab6e51f0b46f39b0075c6f4ebfd" } diff --git a/Makefile b/Makefile index d18a6738803..d94c2df2613 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,6 @@ AARCH64_TAG = "aarch64-unknown-linux-gnu" BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly -CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 # List of features to use when cross-compiling. Can be overridden via the environment. CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc @@ -61,17 +60,9 @@ install-lcli: # - The current user is in the `docker` group. # # The resulting binaries will be created in the `target/` directory. -# -# The *-portable options compile the blst library *without* the use of some -# optimized CPU functions that may not be available on some systems. This -# results in a more portable binary with ~20% slower BLS verification. build-x86_64: - cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked -build-x86_64-portable: cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked -build-aarch64-portable: cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-lcli-x86_64: @@ -96,12 +87,8 @@ build-release-tarballs: [ -d $(BIN_DIR) ] || mkdir -p $(BIN_DIR) $(MAKE) build-x86_64 $(call tarball_release_binary,$(BUILD_PATH_X86_64),$(X86_64_TAG),"") - $(MAKE) build-x86_64-portable - $(call tarball_release_binary,$(BUILD_PATH_X86_64),$(X86_64_TAG),"-portable") $(MAKE) build-aarch64 $(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"") - $(MAKE) build-aarch64-portable - $(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"-portable") # Runs the full workspace tests in **release**, without downloading any additional # test vectors. @@ -232,13 +219,6 @@ lint: lint-fix: EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint -nightly-lint: - cp .github/custom/clippy.toml . - cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \ - -A clippy::all \ - -D clippy::disallowed_from_async - rm clippy.toml - # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index cfe4d8e94ad..ec5af1e2ece 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -1,8 +1,8 @@ use crate::common::read_wallet_name_from_cli; -use crate::wallet::create::STDIN_INPUTS_FLAG; use crate::{SECRETS_DIR_FLAG, WALLETS_DIR_FLAG}; use account_utils::{ random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText, + STDIN_INPUTS_FLAG, }; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; @@ -114,16 +114,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new(STDIN_INPUTS_FLAG) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .hide(cfg!(windows)) - .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty.") - .display_order(0) - .action(ArgAction::SetTrue) - ) } pub fn cli_run( diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 277d2ae8eca..3fb0e50d225 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -1,4 +1,4 @@ -use crate::wallet::create::STDIN_INPUTS_FLAG; +use account_utils::STDIN_INPUTS_FLAG; use bls::{Keypair, PublicKey}; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; @@ -74,15 +74,6 @@ pub fn cli_app() -> Command { .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) ) - .arg( - Arg::new(STDIN_INPUTS_FLAG) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .hide(cfg!(windows)) - .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty.") - .display_order(0) - ) } pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result<(), String> { diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index a7c72679f74..19ab5ad60ac 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -1,4 +1,4 @@ -use crate::wallet::create::{PASSWORD_FLAG, STDIN_INPUTS_FLAG}; +use crate::wallet::create::PASSWORD_FLAG; use account_utils::validator_definitions::SigningDefinition; use account_utils::{ eth2_keystore::Keystore, @@ -7,7 +7,7 @@ use account_utils::{ recursively_find_voting_keystores, PasswordStorage, ValidatorDefinition, ValidatorDefinitions, CONFIG_FILENAME, }, - ZeroizeString, + ZeroizeString, STDIN_INPUTS_FLAG, }; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; @@ -59,15 +59,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0), ) - .arg( - Arg::new(STDIN_INPUTS_FLAG) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .hide(cfg!(windows)) - .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty.") - .display_order(0), - ) .arg( Arg::new(REUSE_PASSWORD_FLAG) .long(REUSE_PASSWORD_FLAG) @@ -178,7 +169,13 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let password_opt = loop { if let Some(password) = previous_password.clone() { eprintln!("Reuse previous password."); - break Some(password); + if check_password_on_keystore(&keystore, &password)? { + break Some(password); + } else { + eprintln!("Reused password incorrect. Retry!"); + previous_password = None; + continue; + } } eprintln!(); eprintln!("{}", PASSWORD_PROMPT); @@ -201,20 +198,12 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin } }; - match keystore.decrypt_keypair(password.as_ref()) { - Ok(_) => { - eprintln!("Password is correct."); - eprintln!(); - sleep(Duration::from_secs(1)); // Provides nicer UX. - if reuse_password { - previous_password = Some(password.clone()); - } - break Some(password); - } - Err(eth2_keystore::Error::InvalidPassword) => { - eprintln!("Invalid password"); + // Check if the password unlocks the keystore + if check_password_on_keystore(&keystore, &password)? { + if reuse_password { + previous_password = Some(password.clone()); } - Err(e) => return Err(format!("Error whilst decrypting keypair: {:?}", e)), + break Some(password); } }; @@ -317,3 +306,27 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin Ok(()) } + +/// Checks if the given password unlocks the keystore. +/// +/// Returns `Ok(true)` if password unlocks the keystore successfully. +/// Returns `Ok(false` if password is incorrect. +/// Otherwise, returns the keystore error. +fn check_password_on_keystore( + keystore: &Keystore, + password: &ZeroizeString, +) -> Result { + match keystore.decrypt_keypair(password.as_ref()) { + Ok(_) => { + eprintln!("Password is correct."); + eprintln!(); + sleep(Duration::from_secs(1)); // Provides nicer UX. + Ok(true) + } + Err(eth2_keystore::Error::InvalidPassword) => { + eprintln!("Invalid password"); + Ok(false) + } + Err(e) => Err(format!("Error whilst decrypting keypair: {:?}", e)), + } +} diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index b36b10ab008..ddf754edac9 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -1,9 +1,8 @@ use super::create::STORE_WITHDRAW_FLAG; use crate::validator::create::COUNT_FLAG; -use crate::wallet::create::STDIN_INPUTS_FLAG; use crate::SECRETS_DIR_FLAG; use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; -use account_utils::{random_password, read_mnemonic_from_cli}; +use account_utils::{random_password, read_mnemonic_from_cli, STDIN_INPUTS_FLAG}; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use directory::ensure_dir_exists; @@ -76,15 +75,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new(STDIN_INPUTS_FLAG) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .hide(cfg!(windows)) - .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty.") - .display_order(0) - ) } pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index 12aa5d3801a..b22007050fd 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -2,6 +2,7 @@ use crate::common::read_wallet_name_from_cli; use crate::WALLETS_DIR_FLAG; use account_utils::{ is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines, + STDIN_INPUTS_FLAG, }; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2_wallet::{ @@ -20,7 +21,6 @@ pub const NAME_FLAG: &str = "name"; pub const PASSWORD_FLAG: &str = "password-file"; pub const TYPE_FLAG: &str = "type"; pub const MNEMONIC_FLAG: &str = "mnemonic-output-path"; -pub const STDIN_INPUTS_FLAG: &str = "stdin-inputs"; pub const MNEMONIC_LENGTH_FLAG: &str = "mnemonic-length"; pub const MNEMONIC_TYPES: &[MnemonicType] = &[ MnemonicType::Words12, @@ -83,14 +83,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new(STDIN_INPUTS_FLAG) - .action(ArgAction::SetTrue) - .hide(cfg!(windows)) - .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty.") - .display_order(0) - ) .arg( Arg::new(MNEMONIC_LENGTH_FLAG) .long(MNEMONIC_LENGTH_FLAG) diff --git a/account_manager/src/wallet/recover.rs b/account_manager/src/wallet/recover.rs index b9641f11521..766d5dbe0cb 100644 --- a/account_manager/src/wallet/recover.rs +++ b/account_manager/src/wallet/recover.rs @@ -1,6 +1,6 @@ -use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; +use crate::wallet::create::create_wallet_from_mnemonic; use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; -use account_utils::read_mnemonic_from_cli; +use account_utils::{read_mnemonic_from_cli, STDIN_INPUTS_FLAG}; use clap::{Arg, ArgAction, ArgMatches, Command}; use std::path::PathBuf; @@ -56,14 +56,6 @@ pub fn cli_app() -> Command { .default_value(HD_TYPE) .display_order(0), ) - .arg( - Arg::new(STDIN_INPUTS_FLAG) - .action(ArgAction::SetTrue) - .hide(cfg!(windows)) - .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty.") - .display_order(0), - ) } pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> { diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index a5fd29c971f..bb946e3c5a2 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "5.2.1" +version = "5.3.0" authors = [ "Paul Hauner ", "Age Manning ", "Age Manning ( + num_of_blobs: usize, + spec: &ChainSpec, +) -> (SignedBeaconBlock, BlobsList) { + let mut block = BeaconBlock::Deneb(BeaconBlockDeneb::empty(spec)); + let mut body = block.body_mut(); + let blob_kzg_commitments = body.blob_kzg_commitments_mut().unwrap(); + *blob_kzg_commitments = + KzgCommitments::::new(vec![KzgCommitment::empty_for_testing(); num_of_blobs]).unwrap(); + + let signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + + let blobs = (0..num_of_blobs) + .map(|_| Blob::::default()) + .collect::>() + .into(); + + (signed_block, blobs) +} + +fn all_benches(c: &mut Criterion) { + type E = MainnetEthSpec; + let spec = Arc::new(E::default_spec()); + + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + let kzg = Arc::new(Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg")); + + for blob_count in [1, 2, 3, 6] { + let kzg = kzg.clone(); + let (signed_block, blob_sidecars) = create_test_block_and_blobs::(blob_count, &spec); + + let column_sidecars = + blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, &kzg.clone(), &spec) + .unwrap(); + + let spec = spec.clone(); + + c.bench_function(&format!("reconstruct_{}", blob_count), |b| { + b.iter(|| { + black_box(reconstruct_data_columns( + &kzg, + &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + spec.as_ref(), + )) + }) + }); + } +} + +criterion_group!(benches, all_benches); +criterion_main!(benches); diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index b5012e8e4e4..7f356bd6211 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -19,8 +19,8 @@ use types::{ beacon_state::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, }, - BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, RelativeEpoch, - Slot, + BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, + Hash256, RelativeEpoch, Slot, }; type JustifiedCheckpoint = Checkpoint; diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index d63a3ee7ead..0ce33f16891 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -1,5 +1,5 @@ use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BlockProcessStatus}; -use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; +use execution_layer::{ExecutionLayer, ExecutionPayloadBody}; use slog::{crit, debug, error, Logger}; use std::collections::HashMap; use std::sync::Arc; @@ -57,7 +57,7 @@ struct BodiesByRange { struct BlockParts { blinded_block: Box>, header: Box>, - body: Option>>, + body: Option>>, } impl BlockParts { @@ -715,7 +715,9 @@ mod tests { use std::sync::LazyLock; use std::time::Duration; use tokio::sync::mpsc; - use types::{ChainSpec, Epoch, EthSpec, Hash256, Keypair, MinimalEthSpec, Slot}; + use types::{ + ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MinimalEthSpec, Slot, + }; const VALIDATOR_COUNT: usize = 48; diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8cd991cc103..fa9a0c2e697 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -22,6 +22,7 @@ pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; use crate::data_availability_checker::{ Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker, + DataColumnsToPublish, }; use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::early_attester_cache::EarlyAttesterCache; @@ -123,6 +124,7 @@ use task_executor::{ShutdownReason, TaskExecutor}; use tokio_stream::Stream; use tree_hash::TreeHash; use types::blob_sidecar::FixedBlobSidecarList; +use types::data_column_sidecar::{ColumnIndex, DataColumnIdentifier}; use types::payload::BlockProductionVersion; use types::*; @@ -132,10 +134,10 @@ pub type ForkChoiceError = fork_choice::Error; type HashBlockTuple = (Hash256, RpcBlock); // These keys are all zero because they get stored in different columns, see `DBColumn` type. -pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); -pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); -pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::zero(); -pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::zero(); +pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::ZERO; +pub const OP_POOL_DB_KEY: Hash256 = Hash256::ZERO; +pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::ZERO; +pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::ZERO; /// Defines how old a block can be before it's no longer a candidate for the early attester cache. const EARLY_ATTESTER_CACHE_HISTORIC_SLOTS: u64 = 4; @@ -204,14 +206,16 @@ impl TryInto for AvailabilityProcessingStatus { } /// The result of a chain segment processing. -pub enum ChainSegmentResult { +pub enum ChainSegmentResult { /// Processing this chain segment finished successfully. - Successful { imported_blocks: usize }, + Successful { + imported_blocks: Vec<(Hash256, Slot)>, + }, /// There was an error processing this chain segment. Before the error, some blocks could /// have been imported. Failed { - imported_blocks: usize, - error: BlockError, + imported_blocks: Vec<(Hash256, Slot)>, + error: BlockError, }, } @@ -524,7 +528,7 @@ impl BeaconBlockResponseWrapper { } pub fn consensus_block_value_wei(&self) -> Uint256 { - Uint256::from(self.consensus_block_value_gwei()) * 1_000_000_000 + Uint256::from(self.consensus_block_value_gwei()) * Uint256::from(1_000_000_000) } pub fn is_blinded(&self) -> bool { @@ -1155,6 +1159,25 @@ impl BeaconChain { .map_or_else(|| self.get_blobs(block_root), Ok) } + pub fn get_data_column_checking_all_caches( + &self, + block_root: Hash256, + index: ColumnIndex, + ) -> Result>>, Error> { + if let Some(column) = self + .data_availability_checker + .get_data_column(&DataColumnIdentifier { block_root, index })? + { + return Ok(Some(column)); + } + + if let Some(columns) = self.early_attester_cache.get_data_columns(block_root) { + return Ok(columns.iter().find(|c| c.index == index).cloned()); + } + + self.get_data_column(&block_root, &index) + } + /// Returns the block at the given root, if any. /// /// ## Errors @@ -1230,6 +1253,18 @@ impl BeaconChain { } } + /// Returns the data columns at the given root, if any. + /// + /// ## Errors + /// May return a database error. + pub fn get_data_column( + &self, + block_root: &Hash256, + column_index: &ColumnIndex, + ) -> Result>>, Error> { + Ok(self.store.get_data_column(block_root, column_index)?) + } + pub fn get_blinded_block( &self, block_root: &Hash256, @@ -1351,14 +1386,27 @@ impl BeaconChain { ) -> Result<(), Error> { self.light_client_server_cache.recompute_and_cache_updates( self.store.clone(), - &parent_root, slot, + &parent_root, &sync_aggregate, &self.log, &self.spec, ) } + pub fn get_light_client_updates( + &self, + sync_committee_period: u64, + count: u64, + ) -> Result>, Error> { + self.light_client_server_cache.get_light_client_updates( + &self.store, + sync_committee_period, + count, + &self.spec, + ) + } + /// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`. /// /// Returns `(block_root, block_slot)`. @@ -2111,7 +2159,7 @@ impl BeaconChain { self: &Arc, blob_sidecar: Arc>, subnet_id: u64, - ) -> Result, GossipBlobError> { + ) -> Result, GossipBlobError> { metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); GossipVerifiedBlob::new(blob_sidecar, subnet_id, self).map(|v| { @@ -2148,8 +2196,6 @@ impl BeaconChain { &self, verified: &impl VerifiedAttestation, ) -> Result<(), Error> { - let _timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - self.canonical_head .fork_choice_write_lock() .on_attestation( @@ -2652,9 +2698,9 @@ impl BeaconChain { pub fn filter_chain_segment( self: &Arc, chain_segment: Vec>, - ) -> Result>, ChainSegmentResult> { + ) -> Result>, ChainSegmentResult> { // This function will never import any blocks. - let imported_blocks = 0; + let imported_blocks = vec![]; let mut filtered_chain_segment = Vec::with_capacity(chain_segment.len()); // Produce a list of the parent root and slot of the child of each block. @@ -2759,8 +2805,8 @@ impl BeaconChain { self: &Arc, chain_segment: Vec>, notify_execution_layer: NotifyExecutionLayer, - ) -> ChainSegmentResult { - let mut imported_blocks = 0; + ) -> ChainSegmentResult { + let mut imported_blocks = vec![]; // Filter uninteresting blocks from the chain segment in a blocking task. let chain = self.clone(); @@ -2820,6 +2866,7 @@ impl BeaconChain { // Import the blocks into the chain. for signature_verified_block in signature_verified_blocks { + let block_slot = signature_verified_block.slot(); match self .process_block( signature_verified_block.block_root(), @@ -2832,9 +2879,9 @@ impl BeaconChain { { Ok(status) => { match status { - AvailabilityProcessingStatus::Imported(_) => { + AvailabilityProcessingStatus::Imported(block_root) => { // The block was imported successfully. - imported_blocks += 1; + imported_blocks.push((block_root, block_slot)); } AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { warn!(self.log, "Blobs missing in response to range request"; @@ -2867,6 +2914,17 @@ impl BeaconChain { ChainSegmentResult::Successful { imported_blocks } } + /// Updates fork-choice node into a permanent `available` state so it can become a viable head. + /// Only completed sampling results are received. Blocks are unavailable by default and should + /// be pruned on finalization, on a timeout or by a max count. + pub async fn process_sampling_completed(self: &Arc, block_root: Hash256) { + // TODO(das): update fork-choice + // NOTE: It is possible that sampling complets before block is imported into fork choice, + // in that case we may need to update availability cache. + // TODO(das): These log levels are too high, reduce once DAS matures + info!(self.log, "Sampling completed"; "block_root" => %block_root); + } + /// Returns `Ok(GossipVerifiedBlock)` if the supplied `block` should be forwarded onto the /// gossip network. The block is not imported into the chain, it is just partially verified. /// @@ -2880,7 +2938,7 @@ impl BeaconChain { pub async fn verify_block_for_gossip( self: &Arc, block: Arc>, - ) -> Result, BlockError> { + ) -> Result, BlockError> { let chain = self.clone(); self.task_executor .clone() @@ -2928,7 +2986,7 @@ impl BeaconChain { pub async fn process_gossip_blob( self: &Arc, blob: GossipVerifiedBlob, - ) -> Result> { + ) -> Result { let block_root = blob.block_root(); // If this block has already been imported to forkchoice it must have been available, so @@ -2941,6 +2999,11 @@ impl BeaconChain { return Err(BlockError::BlockIsAlreadyKnown(blob.block_root())); } + // No need to process and import blobs beyond the PeerDAS epoch. + if self.spec.is_peer_das_enabled_for_epoch(blob.epoch()) { + return Err(BlockError::BlobNotRequired(blob.slot())); + } + if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_blob_sidecar_subscribers() { event_handler.register(EventKind::BlobSidecar(SseBlobSidecar::from_blob_sidecar( @@ -2958,7 +3021,13 @@ impl BeaconChain { pub async fn process_gossip_data_columns( self: &Arc, data_columns: Vec>, - ) -> Result> { + ) -> Result< + ( + AvailabilityProcessingStatus, + DataColumnsToPublish, + ), + BlockError, + > { let Ok((slot, block_root)) = data_columns .iter() .map(|c| (c.slot(), c.block_root())) @@ -2993,7 +3062,7 @@ impl BeaconChain { slot: Slot, block_root: Hash256, blobs: FixedBlobSidecarList, - ) -> Result> { + ) -> Result { // If this block has already been imported to forkchoice it must have been available, so // we don't need to process its blobs again. if self @@ -3020,13 +3089,52 @@ impl BeaconChain { self.remove_notified(&block_root, r) } + /// Cache the columns in the processing cache, process it, then evict it from the cache if it was + /// imported or errors. + pub async fn process_rpc_custody_columns( + self: &Arc, + custody_columns: DataColumnSidecarList, + ) -> Result< + ( + AvailabilityProcessingStatus, + DataColumnsToPublish, + ), + BlockError, + > { + let Ok((slot, block_root)) = custody_columns + .iter() + .map(|c| (c.slot(), c.block_root())) + .unique() + .exactly_one() + else { + return Err(BlockError::InternalError( + "Columns should be from the same block".to_string(), + )); + }; + + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its columns again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::BlockIsAlreadyKnown(block_root)); + } + + let r = self + .check_rpc_custody_columns_availability_and_import(slot, block_root, custody_columns) + .await; + self.remove_notified_custody_columns(&block_root, r) + } + /// Remove any block components from the *processing cache* if we no longer require them. If the /// block was imported full or erred, we no longer require them. fn remove_notified( &self, block_root: &Hash256, - r: Result>, - ) -> Result> { + r: Result, + ) -> Result { let has_missing_components = matches!(r, Ok(AvailabilityProcessingStatus::MissingComponents(_, _))); if !has_missing_components { @@ -3037,13 +3145,15 @@ impl BeaconChain { /// Remove any block components from the *processing cache* if we no longer require them. If the /// block was imported full or erred, we no longer require them. - fn remove_notified_custody_columns( + fn remove_notified_custody_columns

( &self, block_root: &Hash256, - r: Result>, - ) -> Result> { - let has_missing_components = - matches!(r, Ok(AvailabilityProcessingStatus::MissingComponents(_, _))); + r: Result<(AvailabilityProcessingStatus, P), BlockError>, + ) -> Result<(AvailabilityProcessingStatus, P), BlockError> { + let has_missing_components = matches!( + r, + Ok((AvailabilityProcessingStatus::MissingComponents(_, _), _)) + ); if !has_missing_components { self.reqresp_pre_import_cache.write().remove(block_root); } @@ -3058,7 +3168,7 @@ impl BeaconChain { unverified_block: B, block_source: BlockImportSource, notify_execution_layer: NotifyExecutionLayer, - ) -> Result> { + ) -> Result { self.reqresp_pre_import_cache .write() .insert(block_root, unverified_block.block_cloned()); @@ -3094,8 +3204,8 @@ impl BeaconChain { unverified_block: B, notify_execution_layer: NotifyExecutionLayer, block_source: BlockImportSource, - publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, - ) -> Result> { + publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, + ) -> Result { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -3216,7 +3326,7 @@ impl BeaconChain { pub async fn into_executed_block( self: Arc, execution_pending_block: ExecutionPendingBlock, - ) -> Result, BlockError> { + ) -> Result, BlockError> { let ExecutionPendingBlock { block, import_data, @@ -3271,7 +3381,7 @@ impl BeaconChain { async fn check_block_availability_and_import( self: &Arc, block: AvailabilityPendingExecutedBlock, - ) -> Result> { + ) -> Result { let slot = block.block.slot(); let availability = self .data_availability_checker @@ -3284,7 +3394,7 @@ impl BeaconChain { async fn check_gossip_blob_availability_and_import( self: &Arc, blob: GossipVerifiedBlob, - ) -> Result> { + ) -> Result { let slot = blob.slot(); if let Some(slasher) = self.slasher.as_ref() { slasher.accept_block_header(blob.signed_block_header()); @@ -3301,20 +3411,26 @@ impl BeaconChain { slot: Slot, block_root: Hash256, data_columns: Vec>, - ) -> Result> { + ) -> Result< + ( + AvailabilityProcessingStatus, + DataColumnsToPublish, + ), + BlockError, + > { if let Some(slasher) = self.slasher.as_ref() { for data_colum in &data_columns { slasher.accept_block_header(data_colum.signed_block_header()); } } - let availability = self.data_availability_checker.put_gossip_data_columns( - slot, - block_root, - data_columns, - )?; + let (availability, data_columns_to_publish) = self + .data_availability_checker + .put_gossip_data_columns(slot, block_root, data_columns)?; - self.process_availability(slot, availability).await + self.process_availability(slot, availability) + .await + .map(|result| (result, data_columns_to_publish)) } /// Checks if the provided blobs can make any cached blocks available, and imports immediately @@ -3324,17 +3440,17 @@ impl BeaconChain { slot: Slot, block_root: Hash256, blobs: FixedBlobSidecarList, - ) -> Result> { + ) -> Result { // Need to scope this to ensure the lock is dropped before calling `process_availability` // Even an explicit drop is not enough to convince the borrow checker. { let mut slashable_cache = self.observed_slashable.write(); for header in blobs - .into_iter() + .iter() .filter_map(|b| b.as_ref().map(|b| b.signed_block_header.clone())) .unique() { - if verify_header_signature::>(self, &header).is_ok() { + if verify_header_signature::(self, &header).is_ok() { slashable_cache .observe_slashable( header.message.slot, @@ -3356,6 +3472,56 @@ impl BeaconChain { self.process_availability(slot, availability).await } + /// Checks if the provided columns can make any cached blocks available, and imports immediately + /// if so, otherwise caches the columns in the data availability checker. + async fn check_rpc_custody_columns_availability_and_import( + self: &Arc, + slot: Slot, + block_root: Hash256, + custody_columns: DataColumnSidecarList, + ) -> Result< + ( + AvailabilityProcessingStatus, + DataColumnsToPublish, + ), + BlockError, + > { + // Need to scope this to ensure the lock is dropped before calling `process_availability` + // Even an explicit drop is not enough to convince the borrow checker. + { + let mut slashable_cache = self.observed_slashable.write(); + // Assumes all items in custody_columns are for the same block_root + if let Some(column) = custody_columns.first() { + let header = &column.signed_block_header; + if verify_header_signature::(self, header).is_ok() { + slashable_cache + .observe_slashable( + header.message.slot, + header.message.proposer_index, + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(header.clone()); + } + } + } + } + + // This slot value is purely informative for the consumers of + // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. + let (availability, data_columns_to_publish) = + self.data_availability_checker.put_rpc_custody_columns( + block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + custody_columns, + )?; + + self.process_availability(slot, availability) + .await + .map(|result| (result, data_columns_to_publish)) + } + /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` /// /// An error is returned if the block was unable to be imported. It may be partially imported @@ -3364,7 +3530,7 @@ impl BeaconChain { self: &Arc, slot: Slot, availability: Availability, - ) -> Result> { + ) -> Result { match availability { Availability::Available(block) => { // Block is fully available, import into fork choice @@ -3379,7 +3545,7 @@ impl BeaconChain { pub async fn import_available_block( self: &Arc, block: Box>, - ) -> Result> { + ) -> Result { let AvailableExecutedBlock { block, import_data, @@ -3404,6 +3570,8 @@ impl BeaconChain { ); } + // TODO(das) record custody column available timestamp + // import let chain = self.clone(); let block_root = self @@ -3456,7 +3624,7 @@ impl BeaconChain { parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, mut consensus_context: ConsensusContext, - ) -> Result> { + ) -> Result { // ----------------------------- BLOCK NOT YET ATTESTABLE ---------------------------------- // Everything in this initial section is on the hot path between processing the block and // being able to attest to it. DO NOT add any extra processing in this initial section @@ -3514,8 +3682,6 @@ impl BeaconChain { // Register the new block with the fork choice service. { - let _fork_choice_block_timer = - metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); let block_delay = self .slot_clock .seconds_from_current_slot_start() @@ -3648,16 +3814,15 @@ impl BeaconChain { } } - if let Some(_data_columns) = data_columns { - // TODO(das): depends on https://github.com/sigp/lighthouse/pull/6073 - // if !data_columns.is_empty() { - // debug!( - // self.log, "Writing data_columns to store"; - // "block_root" => %block_root, - // "count" => data_columns.len(), - // ); - // ops.push(StoreOp::PutDataColumns(block_root, data_columns)); - // } + if let Some(data_columns) = data_columns { + if !data_columns.is_empty() { + debug!( + self.log, "Writing data_columns to store"; + "block_root" => %block_root, + "count" => data_columns.len(), + ); + ops.push(StoreOp::PutDataColumns(block_root, data_columns)); + } } let txn_lock = self.store.hot_db.begin_rw_transaction(); @@ -3769,7 +3934,7 @@ impl BeaconChain { block: BeaconBlockRef, block_root: Hash256, state: &BeaconState, - ) -> Result<(), BlockError> { + ) -> Result<(), BlockError> { // Only perform the weak subjectivity check if it was configured. let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint else { return Ok(()); @@ -4104,7 +4269,7 @@ impl BeaconChain { &self, block_root: Hash256, state: &mut BeaconState, - ) -> Result<(), BlockError> { + ) -> Result<(), BlockError> { for relative_epoch in [RelativeEpoch::Current, RelativeEpoch::Next] { let shuffling_id = AttestationShufflingId::new(block_root, state, relative_epoch)?; @@ -5260,7 +5425,7 @@ impl BeaconChain { }, }), None, - Uint256::zero(), + Uint256::ZERO, ), BeaconState::Altair(_) => ( BeaconBlock::Altair(BeaconBlockAltair { @@ -5283,7 +5448,7 @@ impl BeaconChain { }, }), None, - Uint256::zero(), + Uint256::ZERO, ), BeaconState::Bellatrix(_) => { let block_proposal_contents = @@ -5416,8 +5581,6 @@ impl BeaconChain { bls_to_execution_changes: bls_to_execution_changes.into(), blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, - // TODO(electra): finish consolidations when they're more spec'd out - consolidations: Vec::new().into(), }, }), maybe_blobs_and_proofs, @@ -6764,6 +6927,33 @@ impl BeaconChain { self.data_availability_checker.data_availability_boundary() } + /// Returns true if epoch is within the data availability boundary + pub fn da_check_required_for_epoch(&self, epoch: Epoch) -> bool { + self.data_availability_checker + .da_check_required_for_epoch(epoch) + } + + /// Returns true if we should fetch blobs for this block + pub fn should_fetch_blobs(&self, block_epoch: Epoch) -> bool { + self.da_check_required_for_epoch(block_epoch) + && !self.spec.is_peer_das_enabled_for_epoch(block_epoch) + } + + /// Returns true if we should fetch custody columns for this block + pub fn should_fetch_custody_columns(&self, block_epoch: Epoch) -> bool { + self.da_check_required_for_epoch(block_epoch) + && self.spec.is_peer_das_enabled_for_epoch(block_epoch) + } + + /// Returns true if we should issue a sampling request for this block + /// TODO(das): check if the block is still within the da_window + pub fn should_sample_slot(&self, slot: Slot) -> bool { + self.config.enable_sampling + && self + .spec + .is_peer_das_enabled_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch())) + } + pub fn logger(&self) -> &Logger { &self.log } @@ -6852,8 +7042,8 @@ impl From for Error { } } -impl ChainSegmentResult { - pub fn into_block_error(self) -> Result<(), BlockError> { +impl ChainSegmentResult { + pub fn into_block_error(self) -> Result<(), BlockError> { match self { ChainSegmentResult::Failed { error, .. } => Err(error), ChainSegmentResult::Successful { .. } => Ok(()), diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index f746b68996f..a6aedda19d0 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -17,7 +17,7 @@ use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, - Hash256, Slot, + FixedBytesExtended, Hash256, Slot, }; #[derive(Debug)] diff --git a/beacon_node/beacon_chain/src/bellatrix_readiness.rs b/beacon_node/beacon_chain/src/bellatrix_readiness.rs index 60b1abaf098..c2e387c422f 100644 --- a/beacon_node/beacon_chain/src/bellatrix_readiness.rs +++ b/beacon_node/beacon_chain/src/bellatrix_readiness.rs @@ -64,7 +64,7 @@ impl MergeConfig { /// Instantiate `self` from the values in a `ChainSpec`. pub fn from_chainspec(spec: &ChainSpec) -> Self { let mut params = MergeConfig::default(); - if spec.terminal_total_difficulty != Uint256::max_value() { + if spec.terminal_total_difficulty != Uint256::MAX { params.terminal_total_difficulty = Some(spec.terminal_total_difficulty); } if spec.terminal_block_hash != ExecutionBlockHash::zero() { diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index bba58675933..e4646d62882 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -22,7 +22,7 @@ use types::{ /// An error occurred while validating a gossip blob. #[derive(Debug)] -pub enum GossipBlobError { +pub enum GossipBlobError { /// The blob sidecar is from a slot that is later than the current slot (with respect to the /// gossip clock disparity). /// @@ -95,7 +95,7 @@ pub enum GossipBlobError { /// ## Peer scoring /// /// We cannot process the blob without validating its parent, the peer isn't necessarily faulty. - BlobParentUnknown(Arc>), + BlobParentUnknown { parent_root: Hash256 }, /// Invalid kzg commitment inclusion proof /// ## Peer scoring @@ -145,28 +145,19 @@ pub enum GossipBlobError { NotFinalizedDescendant { block_parent_root: Hash256 }, } -impl std::fmt::Display for GossipBlobError { +impl std::fmt::Display for GossipBlobError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - GossipBlobError::BlobParentUnknown(blob_sidecar) => { - write!( - f, - "BlobParentUnknown(parent_root:{})", - blob_sidecar.block_parent_root() - ) - } - other => write!(f, "{:?}", other), - } + write!(f, "{:?}", self) } } -impl From for GossipBlobError { +impl From for GossipBlobError { fn from(e: BeaconChainError) -> Self { GossipBlobError::BeaconChainError(e) } } -impl From for GossipBlobError { +impl From for GossipBlobError { fn from(e: BeaconStateError) -> Self { GossipBlobError::BeaconChainError(BeaconChainError::BeaconStateError(e)) } @@ -190,12 +181,12 @@ impl GossipVerifiedBlob { blob: Arc>, subnet_id: u64, chain: &BeaconChain, - ) -> Result> { + ) -> Result { let header = blob.signed_block_header.clone(); // We only process slashing info if the gossip verification failed // since we do not process the blob any further in that case. validate_blob_sidecar_for_gossip(blob, subnet_id, chain).map_err(|e| { - process_block_slash_info::<_, GossipBlobError>( + process_block_slash_info::<_, GossipBlobError>( chain, BlockSlashInfo::from_early_error_blob(header, e), ) @@ -339,16 +330,16 @@ impl KzgVerifiedBlobList { kzg: &Kzg, seen_timestamp: Duration, ) -> Result { - let blobs = blob_list.into_iter().collect::>(); - verify_kzg_for_blob_list(blobs.iter(), kzg)?; + let blobs = blob_list + .into_iter() + .map(|blob| KzgVerifiedBlob { + blob, + seen_timestamp, + }) + .collect::>(); + verify_kzg_for_blob_list(blobs.iter().map(|b| &b.blob), kzg)?; Ok(Self { - verified_blobs: blobs - .into_iter() - .map(|blob| KzgVerifiedBlob { - blob, - seen_timestamp, - }) - .collect(), + verified_blobs: blobs, }) } } @@ -384,7 +375,7 @@ pub fn validate_blob_sidecar_for_gossip( blob_sidecar: Arc>, subnet: u64, chain: &BeaconChain, -) -> Result, GossipBlobError> { +) -> Result, GossipBlobError> { let blob_slot = blob_sidecar.slot(); let blob_index = blob_sidecar.index; let block_parent_root = blob_sidecar.block_parent_root(); @@ -409,8 +400,8 @@ pub fn validate_blob_sidecar_for_gossip( // Verify that the blob_sidecar was received on the correct subnet. if blob_index != subnet { return Err(GossipBlobError::InvalidSubnet { - expected: blob_index, - received: subnet, + expected: subnet, + received: blob_index, }); } @@ -466,7 +457,9 @@ pub fn validate_blob_sidecar_for_gossip( // We have already verified that the blob is past finalization, so we can // just check fork choice for the block's parent. let Some(parent_block) = fork_choice.get_block(&block_parent_root) else { - return Err(GossipBlobError::BlobParentUnknown(blob_sidecar)); + return Err(GossipBlobError::BlobParentUnknown { + parent_root: block_parent_root, + }); }; // Do not process a blob that does not descend from the finalized root. @@ -516,7 +509,7 @@ pub fn validate_blob_sidecar_for_gossip( )) })?; - let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( + let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( &mut parent_state, Some(parent_state_root), blob_slot, @@ -570,8 +563,9 @@ pub fn validate_blob_sidecar_for_gossip( .kzg .as_ref() .ok_or(GossipBlobError::KzgNotInitialized)?; - let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar.clone(), kzg, seen_timestamp) + let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar, kzg, seen_timestamp) .map_err(GossipBlobError::KzgError)?; + let blob_sidecar = &kzg_verified_blob.blob; chain .observed_slashable @@ -597,7 +591,7 @@ pub fn validate_blob_sidecar_for_gossip( if chain .observed_blob_sidecars .write() - .observe_sidecar(&blob_sidecar) + .observe_sidecar(blob_sidecar) .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? { return Err(GossipBlobError::RepeatBlob { diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index 3b75046f3a4..af122ccdc06 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -294,6 +294,7 @@ impl BlockTimesCache { #[cfg(test)] mod test { use super::*; + use types::FixedBytesExtended; #[test] fn observed_time_uses_minimum() { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 832eaccc803..8bd93a3753c 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -49,17 +49,20 @@ #![allow(clippy::result_large_err)] use crate::beacon_snapshot::PreProcessingSnapshot; -use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob, GossipVerifiedBlobList}; use crate::block_verification_types::{ AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock, }; use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock}; -use crate::data_column_verification::GossipDataColumnError; +use crate::data_column_verification::{ + GossipDataColumnError, GossipVerifiedDataColumn, GossipVerifiedDataColumnList, +}; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; +use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -94,10 +97,12 @@ use std::io::Write; use std::sync::Arc; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; +use types::data_column_sidecar::DataColumnSidecarError; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ExecutionBlockHash, - Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSubnetId, Epoch, + EthSpec, ExecutionBlockHash, FullPayload, Hash256, InconsistentFork, KzgProofs, PublicKey, + PublicKeyBytes, RelativeEpoch, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, + Slot, }; use types::{BlobSidecar, ExecPayload}; @@ -139,14 +144,14 @@ const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); /// - The block is malformed/invalid (indicated by all results other than `BeaconChainError`. /// - We encountered an error whilst trying to verify the block (a `BeaconChainError`). #[derive(Debug)] -pub enum BlockError { +pub enum BlockError { /// The parent block was unknown. /// /// ## Peer scoring /// /// It's unclear if this block is valid, but it cannot be processed without already knowing /// its parent. - ParentUnknown(RpcBlock), + ParentUnknown { parent_root: Hash256 }, /// The block slot is greater than the present slot. /// /// ## Peer scoring @@ -306,6 +311,14 @@ pub enum BlockError { /// TODO: We may need to penalize the peer that gave us a potentially invalid rpc blob. /// https://github.com/sigp/lighthouse/issues/4546 AvailabilityCheck(AvailabilityCheckError), + /// A Blob with a slot after PeerDAS is received and is not required to be imported. + /// This can happen because we stay subscribed to the blob subnet after 2 epochs, as we could + /// still receive valid blobs from a Deneb epoch after PeerDAS is activated. + /// + /// ## Peer scoring + /// + /// This indicates the peer is sending an unexpected gossip blob and should be penalised. + BlobNotRequired(Slot), /// An internal error has occurred when processing the block or sidecars. /// /// ## Peer scoring @@ -315,7 +328,7 @@ pub enum BlockError { InternalError(String), } -impl From for BlockError { +impl From for BlockError { fn from(e: AvailabilityCheckError) -> Self { Self::AvailabilityCheck(e) } @@ -423,30 +436,25 @@ impl From for ExecutionPayloadError { } } -impl From for BlockError { +impl From for BlockError { fn from(e: ExecutionPayloadError) -> Self { BlockError::ExecutionPayloadError(e) } } -impl From for BlockError { +impl From for BlockError { fn from(e: InconsistentFork) -> Self { BlockError::InconsistentFork(e) } } -impl std::fmt::Display for BlockError { +impl std::fmt::Display for BlockError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - BlockError::ParentUnknown(block) => { - write!(f, "ParentUnknown(parent_root:{})", block.parent_root()) - } - other => write!(f, "{:?}", other), - } + write!(f, "{:?}", self) } } -impl From for BlockError { +impl From for BlockError { fn from(e: BlockSignatureVerifierError) -> Self { match e { // Make a special distinction for `IncorrectBlockProposer` since it indicates an @@ -463,31 +471,31 @@ impl From for BlockError { } } -impl From for BlockError { +impl From for BlockError { fn from(e: BeaconChainError) -> Self { BlockError::BeaconChainError(e) } } -impl From for BlockError { +impl From for BlockError { fn from(e: BeaconStateError) -> Self { BlockError::BeaconChainError(BeaconChainError::BeaconStateError(e)) } } -impl From for BlockError { +impl From for BlockError { fn from(e: SlotProcessingError) -> Self { BlockError::BeaconChainError(BeaconChainError::SlotProcessingError(e)) } } -impl From for BlockError { +impl From for BlockError { fn from(e: DBError) -> Self { BlockError::BeaconChainError(BeaconChainError::DBError(e)) } } -impl From for BlockError { +impl From for BlockError { fn from(e: ArithError) -> Self { BlockError::BeaconChainError(BeaconChainError::ArithError(e)) } @@ -511,8 +519,8 @@ pub enum BlockSlashInfo { SignatureValid(SignedBeaconBlockHeader, TErr), } -impl BlockSlashInfo> { - pub fn from_early_error_block(header: SignedBeaconBlockHeader, e: BlockError) -> Self { +impl BlockSlashInfo { + pub fn from_early_error_block(header: SignedBeaconBlockHeader, e: BlockError) -> Self { match e { BlockError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), // `InvalidSignature` could indicate any signature in the block, so we want @@ -522,8 +530,8 @@ impl BlockSlashInfo> { } } -impl BlockSlashInfo> { - pub fn from_early_error_blob(header: SignedBeaconBlockHeader, e: GossipBlobError) -> Self { +impl BlockSlashInfo { + pub fn from_early_error_blob(header: SignedBeaconBlockHeader, e: GossipBlobError) -> Self { match e { GossipBlobError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), // `InvalidSignature` could indicate any signature in the block, so we want @@ -591,7 +599,7 @@ pub(crate) fn process_block_slash_info( mut chain_segment: Vec<(Hash256, RpcBlock)>, chain: &BeaconChain, -) -> Result>, BlockError> { +) -> Result>, BlockError> { if chain_segment.is_empty() { return Ok(vec![]); } @@ -606,7 +614,7 @@ pub fn signature_verify_chain_segment( .map(|(_, block)| block.slot()) .unwrap_or_else(|| slot); - let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, highest_slot, @@ -676,8 +684,7 @@ pub struct SignatureVerifiedBlock { } /// Used to await the result of executing payload with a remote EE. -type PayloadVerificationHandle = - JoinHandle>>>; +type PayloadVerificationHandle = JoinHandle>>; /// A wrapper around a `SignedBeaconBlock` that indicates that this block is fully verified and /// ready to import into the `BeaconChain`. The validation includes: @@ -693,14 +700,14 @@ type PayloadVerificationHandle = pub struct ExecutionPendingBlock { pub block: MaybeAvailableBlock, pub import_data: BlockImportData, - pub payload_verification_handle: PayloadVerificationHandle, + pub payload_verification_handle: PayloadVerificationHandle, } pub trait IntoGossipVerifiedBlockContents: Sized { fn into_gossip_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockContentsError>; + ) -> Result, BlockContentsError>; fn inner_block(&self) -> &SignedBeaconBlock; } @@ -708,7 +715,7 @@ impl IntoGossipVerifiedBlockContents for GossipVerifiedB fn into_gossip_verified_block( self, _chain: &BeaconChain, - ) -> Result, BlockContentsError> { + ) -> Result, BlockContentsError> { Ok(self) } fn inner_block(&self) -> &SignedBeaconBlock { @@ -720,29 +727,26 @@ impl IntoGossipVerifiedBlockContents for PublishBlockReq fn into_gossip_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockContentsError> { + ) -> Result, BlockContentsError> { let (block, blobs) = self.deconstruct(); + let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); + + let (gossip_verified_blobs, gossip_verified_data_columns) = if peer_das_enabled { + let gossip_verified_data_columns = + build_gossip_verified_data_columns(chain, &block, blobs.map(|(_, blobs)| blobs))?; + (None, gossip_verified_data_columns) + } else { + let gossip_verified_blobs = build_gossip_verified_blobs(chain, &block, blobs)?; + (gossip_verified_blobs, None) + }; - let gossip_verified_blobs = blobs - .map(|(kzg_proofs, blobs)| { - let mut gossip_verified_blobs = vec![]; - for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { - let _timer = - metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION); - let blob = BlobSidecar::new(i, blob, &block, *kzg_proof) - .map_err(BlockContentsError::SidecarError)?; - drop(_timer); - let gossip_verified_blob = - GossipVerifiedBlob::new(Arc::new(blob), i as u64, chain)?; - gossip_verified_blobs.push(gossip_verified_blob); - } - let gossip_verified_blobs = VariableList::from(gossip_verified_blobs); - Ok::<_, BlockContentsError>(gossip_verified_blobs) - }) - .transpose()?; let gossip_verified_block = GossipVerifiedBlock::new(block, chain)?; - Ok((gossip_verified_block, gossip_verified_blobs)) + Ok(( + gossip_verified_block, + gossip_verified_blobs, + gossip_verified_data_columns, + )) } fn inner_block(&self) -> &SignedBeaconBlock { @@ -750,6 +754,70 @@ impl IntoGossipVerifiedBlockContents for PublishBlockReq } } +#[allow(clippy::type_complexity)] +fn build_gossip_verified_blobs( + chain: &BeaconChain, + block: &Arc>>, + blobs: Option<(KzgProofs, BlobsList)>, +) -> Result>, BlockContentsError> { + blobs + .map(|(kzg_proofs, blobs)| { + let mut gossip_verified_blobs = vec![]; + for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { + let _timer = + metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION); + let blob = BlobSidecar::new(i, blob, block, *kzg_proof) + .map_err(BlockContentsError::BlobSidecarError)?; + drop(_timer); + let gossip_verified_blob = + GossipVerifiedBlob::new(Arc::new(blob), i as u64, chain)?; + gossip_verified_blobs.push(gossip_verified_blob); + } + let gossip_verified_blobs = VariableList::from(gossip_verified_blobs); + Ok::<_, BlockContentsError>(gossip_verified_blobs) + }) + .transpose() +} + +fn build_gossip_verified_data_columns( + chain: &BeaconChain, + block: &SignedBeaconBlock>, + blobs: Option>, +) -> Result>, BlockContentsError> { + blobs + // Only attempt to build data columns if blobs is non empty to avoid skewing the metrics. + .filter(|b| !b.is_empty()) + .map(|blobs| { + // NOTE: we expect KZG to be initialized if the blobs are present + let kzg = chain + .kzg + .as_ref() + .ok_or(BlockContentsError::DataColumnError( + GossipDataColumnError::KzgNotInitialized, + ))?; + + let timer = metrics::start_timer(&metrics::DATA_COLUMN_SIDECAR_COMPUTATION); + let sidecars = blobs_to_data_column_sidecars(&blobs, block, kzg, &chain.spec)?; + drop(timer); + let mut gossip_verified_data_columns = vec![]; + for sidecar in sidecars { + let subnet = DataColumnSubnetId::from_column_index::( + sidecar.index as usize, + &chain.spec, + ); + let column = GossipVerifiedDataColumn::new(sidecar, subnet.into(), chain)?; + gossip_verified_data_columns.push(column); + } + let gossip_verified_data_columns = RuntimeVariableList::new( + gossip_verified_data_columns, + chain.spec.number_of_columns, + ) + .map_err(DataColumnSidecarError::SszError)?; + Ok::<_, BlockContentsError>(gossip_verified_data_columns) + }) + .transpose() +} + /// Implemented on types that can be converted into a `ExecutionPendingBlock`. /// /// Used to allow functions to accept blocks at various stages of verification. @@ -759,7 +827,7 @@ pub trait IntoExecutionPendingBlock: Sized { block_root: Hash256, chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, - ) -> Result, BlockError> { + ) -> Result, BlockError> { self.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) .map(|execution_pending| { // Supply valid block to slasher. @@ -768,9 +836,7 @@ pub trait IntoExecutionPendingBlock: Sized { } execution_pending }) - .map_err(|slash_info| { - process_block_slash_info::<_, BlockError>(chain, slash_info) - }) + .map_err(|slash_info| process_block_slash_info::<_, BlockError>(chain, slash_info)) } /// Convert the block to fully-verified form while producing data to aid checking slashability. @@ -779,7 +845,7 @@ pub trait IntoExecutionPendingBlock: Sized { block_root: Hash256, chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, - ) -> Result, BlockSlashInfo>>; + ) -> Result, BlockSlashInfo>; fn block(&self) -> &SignedBeaconBlock; fn block_cloned(&self) -> Arc>; @@ -793,7 +859,7 @@ impl GossipVerifiedBlock { pub fn new( block: Arc>, chain: &BeaconChain, - ) -> Result> { + ) -> Result { // If the block is valid for gossip we don't supply it to the slasher here because // we assume it will be transformed into a fully verified block. We *do* need to supply // it to the slasher if an error occurs, because that's the end of this block's journey, @@ -803,7 +869,7 @@ impl GossipVerifiedBlock { // but it's way quicker to calculate root of the header since the hash of the tree rooted // at `BeaconBlockBody` is already computed in the header. Self::new_without_slasher_checks(block, &header, chain).map_err(|e| { - process_block_slash_info::<_, BlockError>( + process_block_slash_info::<_, BlockError>( chain, BlockSlashInfo::from_early_error_block(header, e), ) @@ -815,7 +881,7 @@ impl GossipVerifiedBlock { block: Arc>, block_header: &SignedBeaconBlockHeader, chain: &BeaconChain, - ) -> Result> { + ) -> Result { // Ensure the block is the correct structure for the fork at `block.slot()`. block .fork_name(&chain.spec) @@ -864,7 +930,7 @@ impl GossipVerifiedBlock { let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); let (parent_block, block) = - verify_parent_block_is_known::(block_root, &fork_choice_read_lock, block)?; + verify_parent_block_is_known::(&fork_choice_read_lock, block)?; drop(fork_choice_read_lock); // Track the number of skip slots between the block and its parent. @@ -924,7 +990,7 @@ impl GossipVerifiedBlock { ); // The state produced is only valid for determining proposer/attester shuffling indices. - let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, block.slot(), @@ -1033,7 +1099,7 @@ impl IntoExecutionPendingBlock for GossipVerifiedBlock>, notify_execution_layer: NotifyExecutionLayer, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo> { let execution_pending = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; execution_pending.into_execution_pending_block_slashable( @@ -1061,7 +1127,7 @@ impl SignatureVerifiedBlock { block: MaybeAvailableBlock, block_root: Hash256, chain: &BeaconChain, - ) -> Result> { + ) -> Result { // Ensure the block is the correct structure for the fork at `block.slot()`. block .as_block() @@ -1073,7 +1139,7 @@ impl SignatureVerifiedBlock { let (mut parent, block) = load_parent(block, chain)?; - let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, block.slot(), @@ -1106,7 +1172,7 @@ impl SignatureVerifiedBlock { block: MaybeAvailableBlock, block_root: Hash256, chain: &BeaconChain, - ) -> Result>> { + ) -> Result> { let header = block.signed_block_header(); Self::new(block, block_root, chain) .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) @@ -1117,14 +1183,14 @@ impl SignatureVerifiedBlock { pub fn from_gossip_verified_block( from: GossipVerifiedBlock, chain: &BeaconChain, - ) -> Result> { + ) -> Result { let (mut parent, block) = if let Some(parent) = from.parent { (parent, from.block) } else { load_parent(from.block, chain)? }; - let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, block.slot(), @@ -1160,7 +1226,7 @@ impl SignatureVerifiedBlock { pub fn from_gossip_verified_block_check_slashable( from: GossipVerifiedBlock, chain: &BeaconChain, - ) -> Result>> { + ) -> Result> { let header = from.block.signed_block_header(); Self::from_gossip_verified_block(from, chain) .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) @@ -1169,6 +1235,10 @@ impl SignatureVerifiedBlock { pub fn block_root(&self) -> Hash256 { self.block_root } + + pub fn slot(&self) -> Slot { + self.block.slot() + } } impl IntoExecutionPendingBlock for SignatureVerifiedBlock { @@ -1178,7 +1248,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc block_root: Hash256, chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) @@ -1215,7 +1285,7 @@ impl IntoExecutionPendingBlock for Arc>, notify_execution_layer: NotifyExecutionLayer, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, block_root, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; @@ -1249,7 +1319,7 @@ impl IntoExecutionPendingBlock for RpcBlock block_root: Hash256, chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(self.as_block(), block_root, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; @@ -1290,7 +1360,7 @@ impl ExecutionPendingBlock { mut consensus_context: ConsensusContext, chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, - ) -> Result> { + ) -> Result { chain .observed_slashable .write() @@ -1326,7 +1396,9 @@ impl ExecutionPendingBlock { // because it will revert finalization. Note that the finalized block is stored in fork // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). - return Err(BlockError::ParentUnknown(block.into_rpc_block())); + return Err(BlockError::ParentUnknown { + parent_root: block.parent_root(), + }); } /* @@ -1666,9 +1738,6 @@ impl ExecutionPendingBlock { // Register each attestation in the block with fork choice. for (i, attestation) in block.message().body().attestations().enumerate() { - let _fork_choice_attestation_timer = - metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - let indexed_attestation = consensus_context .get_indexed_attestation(&state, attestation) .map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?; @@ -1706,7 +1775,7 @@ impl ExecutionPendingBlock { fn check_block_against_anchor_slot( block: BeaconBlockRef<'_, T::EthSpec>, chain: &BeaconChain, -) -> Result<(), BlockError> { +) -> Result<(), BlockError> { if let Some(anchor_slot) = chain.store.get_anchor_slot() { if block.slot() <= anchor_slot { return Err(BlockError::WeakSubjectivityConflict); @@ -1723,7 +1792,7 @@ fn check_block_against_finalized_slot( block: BeaconBlockRef<'_, T::EthSpec>, block_root: Hash256, chain: &BeaconChain, -) -> Result<(), BlockError> { +) -> Result<(), BlockError> { // The finalized checkpoint is being read from fork choice, rather than the cached head. // // Fork choice has the most up-to-date view of finalization and there's no point importing a @@ -1758,7 +1827,7 @@ pub fn check_block_is_finalized_checkpoint_or_descendant< chain: &BeaconChain, fork_choice: &BeaconForkChoice, block: B, -) -> Result> { +) -> Result { if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) { Ok(block) } else { @@ -1779,7 +1848,9 @@ pub fn check_block_is_finalized_checkpoint_or_descendant< block_parent_root: block.parent_root(), }) } else { - Err(BlockError::ParentUnknown(block.into_rpc_block())) + Err(BlockError::ParentUnknown { + parent_root: block.parent_root(), + }) } } } @@ -1795,7 +1866,7 @@ pub fn check_block_relevancy( signed_block: &SignedBeaconBlock, block_root: Hash256, chain: &BeaconChain, -) -> Result> { +) -> Result { let block = signed_block.message(); // Do not process blocks from the future. @@ -1863,17 +1934,15 @@ pub fn get_block_header_root(block_header: &SignedBeaconBlockHeader) -> Hash256 /// fork choice. #[allow(clippy::type_complexity)] fn verify_parent_block_is_known( - block_root: Hash256, fork_choice_read_lock: &RwLockReadGuard>, block: Arc>, -) -> Result<(ProtoBlock, Arc>), BlockError> { +) -> Result<(ProtoBlock, Arc>), BlockError> { if let Some(proto_block) = fork_choice_read_lock.get_block(&block.parent_root()) { Ok((proto_block, block)) } else { - Err(BlockError::ParentUnknown(RpcBlock::new_without_blobs( - Some(block_root), - block, - ))) + Err(BlockError::ParentUnknown { + parent_root: block.parent_root(), + }) } } @@ -1885,7 +1954,7 @@ fn verify_parent_block_is_known( fn load_parent>( block: B, chain: &BeaconChain, -) -> Result<(PreProcessingSnapshot, B), BlockError> { +) -> Result<(PreProcessingSnapshot, B), BlockError> { // Reject any block if its parent is not known to fork choice. // // A block that is not in fork choice is either: @@ -1901,7 +1970,9 @@ fn load_parent>( .fork_choice_read_lock() .contains_block(&block.parent_root()) { - return Err(BlockError::ParentUnknown(block.into_rpc_block())); + return Err(BlockError::ParentUnknown { + parent_root: block.parent_root(), + }); } let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); @@ -1997,7 +2068,7 @@ pub trait BlockBlobError: From + From + Debu fn proposer_signature_invalid() -> Self; } -impl BlockBlobError for BlockError { +impl BlockBlobError for BlockError { fn not_later_than_parent_error(block_slot: Slot, parent_slot: Slot) -> Self { BlockError::BlockIsNotLaterThanParent { block_slot, @@ -2014,7 +2085,7 @@ impl BlockBlobError for BlockError { } } -impl BlockBlobError for GossipBlobError { +impl BlockBlobError for GossipBlobError { fn not_later_than_parent_error(blob_slot: Slot, parent_slot: Slot) -> Self { GossipBlobError::BlobIsNotLaterThanParent { blob_slot, diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 426c41bfeab..707dfa56d84 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -2,6 +2,9 @@ use crate::blob_verification::{GossipBlobError, GossipVerifiedBlobList}; use crate::block_verification::BlockError; use crate::data_availability_checker::AvailabilityCheckError; pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; +use crate::data_column_verification::{ + CustodyDataColumn, CustodyDataColumnList, GossipDataColumnError, GossipVerifiedDataColumnList, +}; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::{get_block_root, GossipVerifiedBlock, PayloadVerificationOutcome}; use derivative::Derivative; @@ -9,10 +12,11 @@ use ssz_types::VariableList; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; use std::sync::Arc; -use types::blob_sidecar::{BlobIdentifier, BlobSidecarError, FixedBlobSidecarList}; +use types::blob_sidecar::{self, BlobIdentifier, FixedBlobSidecarList}; +use types::data_column_sidecar::{self}; use types::{ - BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, Epoch, EthSpec, Hash256, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, ChainSpec, Epoch, EthSpec, + Hash256, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; /// A block that has been received over RPC. It has 2 internal variants: @@ -50,6 +54,7 @@ impl RpcBlock { match &self.block { RpcBlockInner::Block(block) => block, RpcBlockInner::BlockAndBlobs(block, _) => block, + RpcBlockInner::BlockAndCustodyColumns(block, _) => block, } } @@ -57,6 +62,7 @@ impl RpcBlock { match &self.block { RpcBlockInner::Block(block) => block.clone(), RpcBlockInner::BlockAndBlobs(block, _) => block.clone(), + RpcBlockInner::BlockAndCustodyColumns(block, _) => block.clone(), } } @@ -64,6 +70,15 @@ impl RpcBlock { match &self.block { RpcBlockInner::Block(_) => None, RpcBlockInner::BlockAndBlobs(_, blobs) => Some(blobs), + RpcBlockInner::BlockAndCustodyColumns(_, _) => None, + } + } + + pub fn custody_columns(&self) -> Option<&CustodyDataColumnList> { + match &self.block { + RpcBlockInner::Block(_) => None, + RpcBlockInner::BlockAndBlobs(_, _) => None, + RpcBlockInner::BlockAndCustodyColumns(_, data_columns) => Some(data_columns), } } } @@ -79,6 +94,9 @@ enum RpcBlockInner { /// This variant is used with parent lookups and by-range responses. It should have all blobs /// ordered, all block roots matching, and the correct number of blobs for this block. BlockAndBlobs(Arc>, BlobSidecarList), + /// This variant is used with parent lookups and by-range responses. It should have all + /// requested data columns, all block roots matching for this block. + BlockAndCustodyColumns(Arc>, CustodyDataColumnList), } impl RpcBlock { @@ -136,6 +154,33 @@ impl RpcBlock { }) } + pub fn new_with_custody_columns( + block_root: Option, + block: Arc>, + custody_columns: Vec>, + spec: &ChainSpec, + ) -> Result { + let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); + + if block.num_expected_blobs() > 0 && custody_columns.is_empty() { + // The number of required custody columns is out of scope here. + return Err(AvailabilityCheckError::MissingCustodyColumns); + } + // Treat empty data column lists as if they are missing. + let inner = if !custody_columns.is_empty() { + RpcBlockInner::BlockAndCustodyColumns( + block, + RuntimeVariableList::new(custody_columns, spec.number_of_columns)?, + ) + } else { + RpcBlockInner::Block(block) + }; + Ok(Self { + block_root, + block: inner, + }) + } + pub fn new_from_fixed( block_root: Hash256, block: Arc>, @@ -153,25 +198,36 @@ impl RpcBlock { Self::new(Some(block_root), block, blobs) } + #[allow(clippy::type_complexity)] pub fn deconstruct( self, ) -> ( Hash256, Arc>, Option>, + Option>, ) { let block_root = self.block_root(); match self.block { - RpcBlockInner::Block(block) => (block_root, block, None), - RpcBlockInner::BlockAndBlobs(block, blobs) => (block_root, block, Some(blobs)), + RpcBlockInner::Block(block) => (block_root, block, None, None), + RpcBlockInner::BlockAndBlobs(block, blobs) => (block_root, block, Some(blobs), None), + RpcBlockInner::BlockAndCustodyColumns(block, data_columns) => { + (block_root, block, None, Some(data_columns)) + } } } pub fn n_blobs(&self) -> usize { match &self.block { - RpcBlockInner::Block(_) => 0, + RpcBlockInner::Block(_) | RpcBlockInner::BlockAndCustodyColumns(_, _) => 0, RpcBlockInner::BlockAndBlobs(_, blobs) => blobs.len(), } } + pub fn n_data_columns(&self) -> usize { + match &self.block { + RpcBlockInner::Block(_) | RpcBlockInner::BlockAndBlobs(_, _) => 0, + RpcBlockInner::BlockAndCustodyColumns(_, data_columns) => data_columns.len(), + } + } } /// A block that has gone through all pre-deneb block processing checks including block processing @@ -334,29 +390,46 @@ impl BlockImportData { } } -pub type GossipVerifiedBlockContents = - (GossipVerifiedBlock, Option>); +pub type GossipVerifiedBlockContents = ( + GossipVerifiedBlock, + Option>, + Option>, +); #[derive(Debug)] -pub enum BlockContentsError { - BlockError(BlockError), - BlobError(GossipBlobError), - SidecarError(BlobSidecarError), +pub enum BlockContentsError { + BlockError(BlockError), + BlobError(GossipBlobError), + BlobSidecarError(blob_sidecar::BlobSidecarError), + DataColumnError(GossipDataColumnError), + DataColumnSidecarError(data_column_sidecar::DataColumnSidecarError), } -impl From> for BlockContentsError { - fn from(value: BlockError) -> Self { +impl From for BlockContentsError { + fn from(value: BlockError) -> Self { Self::BlockError(value) } } -impl From> for BlockContentsError { - fn from(value: GossipBlobError) -> Self { +impl From for BlockContentsError { + fn from(value: GossipBlobError) -> Self { Self::BlobError(value) } } -impl std::fmt::Display for BlockContentsError { +impl From for BlockContentsError { + fn from(value: GossipDataColumnError) -> Self { + Self::DataColumnError(value) + } +} + +impl From for BlockContentsError { + fn from(value: data_column_sidecar::DataColumnSidecarError) -> Self { + Self::DataColumnSidecarError(value) + } +} + +impl std::fmt::Display for BlockContentsError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { BlockContentsError::BlockError(err) => { @@ -365,8 +438,14 @@ impl std::fmt::Display for BlockContentsError { BlockContentsError::BlobError(err) => { write!(f, "BlobError({})", err) } - BlockContentsError::SidecarError(err) => { - write!(f, "SidecarError({:?})", err) + BlockContentsError::BlobSidecarError(err) => { + write!(f, "BlobSidecarError({:?})", err) + } + BlockContentsError::DataColumnError(err) => { + write!(f, "DataColumnError({:?})", err) + } + BlockContentsError::DataColumnSidecarError(err) => { + write!(f, "DataColumnSidecarError({:?})", err) } } } @@ -383,7 +462,6 @@ pub trait AsBlock { fn as_block(&self) -> &SignedBeaconBlock; fn block_cloned(&self) -> Arc>; fn canonical_root(&self) -> Hash256; - fn into_rpc_block(self) -> RpcBlock; } impl AsBlock for Arc> { @@ -422,10 +500,6 @@ impl AsBlock for Arc> { fn canonical_root(&self) -> Hash256 { SignedBeaconBlock::canonical_root(self) } - - fn into_rpc_block(self) -> RpcBlock { - RpcBlock::new_without_blobs(None, self) - } } impl AsBlock for MaybeAvailableBlock { @@ -468,15 +542,6 @@ impl AsBlock for MaybeAvailableBlock { fn canonical_root(&self) -> Hash256 { self.as_block().canonical_root() } - - fn into_rpc_block(self) -> RpcBlock { - match self { - MaybeAvailableBlock::Available(available_block) => available_block.into_rpc_block(), - MaybeAvailableBlock::AvailabilityPending { block_root, block } => { - RpcBlock::new_without_blobs(Some(block_root), block) - } - } - } } impl AsBlock for AvailableBlock { @@ -515,21 +580,6 @@ impl AsBlock for AvailableBlock { fn canonical_root(&self) -> Hash256 { self.block().canonical_root() } - - fn into_rpc_block(self) -> RpcBlock { - // TODO(das): rpc data columns to be merged from `das` branch - let (block_root, block, blobs_opt, _data_columns_opt) = self.deconstruct(); - // Circumvent the constructor here, because an Available block will have already had - // consistency checks performed. - let inner = match blobs_opt { - None => RpcBlockInner::Block(block), - Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs), - }; - RpcBlock { - block_root, - block: inner, - } - } } impl AsBlock for RpcBlock { @@ -555,19 +605,17 @@ impl AsBlock for RpcBlock { match &self.block { RpcBlockInner::Block(block) => block, RpcBlockInner::BlockAndBlobs(block, _) => block, + RpcBlockInner::BlockAndCustodyColumns(block, _) => block, } } fn block_cloned(&self) -> Arc> { match &self.block { RpcBlockInner::Block(block) => block.clone(), RpcBlockInner::BlockAndBlobs(block, _) => block.clone(), + RpcBlockInner::BlockAndCustodyColumns(block, _) => block.clone(), } } fn canonical_root(&self) -> Hash256 { self.as_block().canonical_root() } - - fn into_rpc_block(self) -> RpcBlock { - self - } } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c86e35980ba..d38530b9049 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -39,8 +39,8 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, - Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, + FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -104,6 +104,7 @@ pub struct BeaconChainBuilder { kzg: Option>, task_executor: Option, validator_monitor_config: Option, + import_all_data_columns: bool, } impl @@ -145,6 +146,7 @@ where kzg: None, task_executor: None, validator_monitor_config: None, + import_all_data_columns: false, } } @@ -407,6 +409,11 @@ where .init_blob_info(genesis.beacon_block.slot()) .map_err(|e| format!("Failed to initialize genesis blob info: {:?}", e))?, ); + self.pending_io_batch.push( + store + .init_data_column_info(genesis.beacon_block.slot()) + .map_err(|e| format!("Failed to initialize genesis data column info: {:?}", e))?, + ); let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis) .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; @@ -571,6 +578,11 @@ where .init_blob_info(weak_subj_block.slot()) .map_err(|e| format!("Failed to initialize blob info: {:?}", e))?, ); + self.pending_io_batch.push( + store + .init_data_column_info(weak_subj_block.slot()) + .map_err(|e| format!("Failed to initialize data column info: {:?}", e))?, + ); // Store pruning checkpoint to prevent attempting to prune before the anchor state. self.pending_io_batch @@ -615,6 +627,12 @@ where self } + /// Sets whether to require and import all data columns when importing block. + pub fn import_all_data_columns(mut self, import_all_data_columns: bool) -> Self { + self.import_all_data_columns = import_all_data_columns; + self + } + /// Sets the `BeaconChain` event handler backend. /// /// For example, provide `ServerSentEventHandler` as a `handler`. @@ -965,8 +983,14 @@ where validator_monitor: RwLock::new(validator_monitor), genesis_backfill_slot, data_availability_checker: Arc::new( - DataAvailabilityChecker::new(slot_clock, self.kzg.clone(), store, &log, self.spec) - .map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?, + DataAvailabilityChecker::new( + slot_clock, + self.kzg.clone(), + store, + self.import_all_data_columns, + self.spec, + ) + .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), kzg: self.kzg.clone(), }; @@ -1268,7 +1292,7 @@ mod test { } for v in state.validators() { - let creds = v.withdrawal_credentials.as_bytes(); + let creds = v.withdrawal_credentials.as_slice(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index a5d85d56032..4f92f5ec8f9 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -48,7 +48,7 @@ use fork_choice::{ }; use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use slog::{crit, debug, error, warn, Logger}; +use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use state_processing::AllCaches; use std::sync::Arc; @@ -236,12 +236,12 @@ impl CachedHead { pub struct CanonicalHead { /// Provides an in-memory representation of the non-finalized block tree and is used to run the /// fork choice algorithm and determine the canonical head. - pub fork_choice: CanonicalHeadRwLock>, + fork_choice: CanonicalHeadRwLock>, /// Provides values cached from a previous execution of `self.fork_choice.get_head`. /// /// Although `self.fork_choice` might be slightly more advanced that this value, it is safe to /// consider that these values represent the "canonical head" of the beacon chain. - pub cached_head: CanonicalHeadRwLock>, + cached_head: CanonicalHeadRwLock>, /// A lock used to prevent concurrent runs of `BeaconChain::recompute_head`. /// /// This lock **should not be made public**, it should only be used inside this module. @@ -383,11 +383,13 @@ impl CanonicalHead { /// Access a read-lock for fork choice. pub fn fork_choice_read_lock(&self) -> RwLockReadGuard> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_READ_LOCK_AQUIRE_TIMES); self.fork_choice.read() } /// Access a write-lock for fork choice. pub fn fork_choice_write_lock(&self) -> RwLockWriteGuard> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_WRITE_LOCK_AQUIRE_TIMES); self.fork_choice.write() } } @@ -1212,7 +1214,7 @@ fn detect_reorg( &metrics::FORK_CHOICE_REORG_DISTANCE, reorg_distance.as_u64() as i64, ); - warn!( + info!( log, "Beacon chain re-org"; "previous_head" => ?old_block_root, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index c908efa07c3..20edfbf31a4 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -84,6 +84,10 @@ pub struct ChainConfig { pub epochs_per_migration: u64, /// When set to true Light client server computes and caches state proofs for serving updates pub enable_light_client_server: bool, + /// The number of data columns to withhold / exclude from publishing when proposing a block. + pub malicious_withhold_count: usize, + /// Enable peer sampling on blocks. + pub enable_sampling: bool, } impl Default for ChainConfig { @@ -115,6 +119,8 @@ impl Default for ChainConfig { always_prepare_payload: false, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, enable_light_client_server: false, + malicious_withhold_count: 0, + enable_sampling: false, } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index ce5995a5581..470cee713fa 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -5,7 +5,7 @@ use crate::block_verification_types::{ use crate::data_availability_checker::overflow_lru_cache::DataAvailabilityCheckerInner; use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; use kzg::Kzg; -use slog::{debug, error, Logger}; +use slog::{debug, error}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; @@ -15,18 +15,23 @@ use std::time::Duration; use task_executor::TaskExecutor; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ - BlobSidecarList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, - Slot, + BlobSidecarList, ChainSpec, DataColumnIdentifier, DataColumnSidecar, DataColumnSidecarList, + Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, Slot, }; mod error; mod overflow_lru_cache; mod state_lru_cache; -use crate::data_column_verification::{GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn}; +use crate::data_column_verification::{ + verify_kzg_for_data_column_list, CustodyDataColumn, GossipVerifiedDataColumn, + KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, +}; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; +pub use self::overflow_lru_cache::DataColumnsToPublish; + /// The LRU Cache stores `PendingComponents` which can store up to /// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So /// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this @@ -65,8 +70,7 @@ pub struct DataAvailabilityChecker { availability_cache: Arc>, slot_clock: T::SlotClock, kzg: Option>, - log: Logger, - spec: ChainSpec, + spec: Arc, } /// This type is returned after adding a block / blob to the `DataAvailabilityChecker`. @@ -95,29 +99,39 @@ impl DataAvailabilityChecker { slot_clock: T::SlotClock, kzg: Option>, store: BeaconStore, - log: &Logger, + import_all_data_columns: bool, spec: ChainSpec, ) -> Result { - // TODO(das): support supernode or custom custody requirement - let custody_subnet_count = spec.custody_requirement as usize; + let spec = Arc::new(spec); + let custody_subnet_count = if import_all_data_columns { + spec.data_column_sidecar_subnet_count as usize + } else { + spec.custody_requirement as usize + }; + let custody_column_count = custody_subnet_count.saturating_mul(spec.data_columns_per_subnet()); - let overflow_cache = DataAvailabilityCheckerInner::new( + let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY, store, custody_column_count, spec.clone(), )?; Ok(Self { - availability_cache: Arc::new(overflow_cache), + availability_cache: Arc::new(inner), slot_clock, - log: log.clone(), kzg, spec, }) } + pub fn get_custody_columns_count(&self) -> usize { + self.availability_cache + .custody_subnet_count() + .saturating_mul(self.spec.data_columns_per_subnet()) + } + /// Checks if the block root is currenlty in the availability cache awaiting import because /// of missing components. pub fn get_execution_valid_block( @@ -143,6 +157,15 @@ impl DataAvailabilityChecker { }) } + /// Return the set of imported custody column indexes for `block_root`. Returns None if there is + /// no block component for `block_root`. + pub fn imported_custody_column_indexes(&self, block_root: &Hash256) -> Option> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| components.get_cached_data_columns_indices()) + }) + } + /// Get a blob from the availability cache. pub fn get_blob( &self, @@ -151,6 +174,14 @@ impl DataAvailabilityChecker { self.availability_cache.peek_blob(blob_id) } + /// Get a data column from the availability cache. + pub fn get_data_column( + &self, + data_column_id: &DataColumnIdentifier, + ) -> Result>>, AvailabilityCheckError> { + self.availability_cache.peek_data_column(data_column_id) + } + /// Put a list of blobs received via RPC into the availability cache. This performs KZG /// verification on the blobs in the list. pub fn put_rpc_blobs( @@ -176,6 +207,39 @@ impl DataAvailabilityChecker { .put_kzg_verified_blobs(block_root, epoch, verified_blobs) } + /// Put a list of custody columns received via RPC into the availability cache. This performs KZG + /// verification on the blobs in the list. + #[allow(clippy::type_complexity)] + pub fn put_rpc_custody_columns( + &self, + block_root: Hash256, + epoch: Epoch, + custody_columns: DataColumnSidecarList, + ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> + { + let Some(kzg) = self.kzg.as_ref() else { + return Err(AvailabilityCheckError::KzgNotInitialized); + }; + + // TODO(das): report which column is invalid for proper peer scoring + // TODO(das): batch KZG verification here + let verified_custody_columns = custody_columns + .into_iter() + .map(|column| { + Ok(KzgVerifiedCustodyDataColumn::from_asserted_custody( + KzgVerifiedDataColumn::new(column, kzg).map_err(AvailabilityCheckError::Kzg)?, + )) + }) + .collect::, AvailabilityCheckError>>()?; + + self.availability_cache.put_kzg_verified_data_columns( + kzg, + block_root, + epoch, + verified_custody_columns, + ) + } + /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. @@ -192,20 +256,35 @@ impl DataAvailabilityChecker { ) } + /// Check if we've cached other data columns for this block. If it satisfies the custody requirement and we also + /// have a block cached, return the `Availability` variant triggering block import. + /// Otherwise cache the data column sidecar. + /// + /// This should only accept gossip verified data columns, so we should not have to worry about dupes. + #[allow(clippy::type_complexity)] pub fn put_gossip_data_columns( &self, slot: Slot, block_root: Hash256, gossip_data_columns: Vec>, - ) -> Result, AvailabilityCheckError> { + ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> + { + let Some(kzg) = self.kzg.as_ref() else { + return Err(AvailabilityCheckError::KzgNotInitialized); + }; let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + let custody_columns = gossip_data_columns .into_iter() .map(|c| KzgVerifiedCustodyDataColumn::from_asserted_custody(c.into_inner())) .collect::>(); - self.availability_cache - .put_kzg_verified_data_columns(block_root, epoch, custody_columns) + self.availability_cache.put_kzg_verified_data_columns( + kzg, + block_root, + epoch, + custody_columns, + ) } /// Check if we have all the blobs for a block. Returns `Availability` which has information @@ -232,42 +311,66 @@ impl DataAvailabilityChecker { &self, block: RpcBlock, ) -> Result, AvailabilityCheckError> { - let (block_root, block, blobs) = block.deconstruct(); - match blobs { - None => { - if self.blobs_required_for_block(&block) { - Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) - } else { - Ok(MaybeAvailableBlock::Available(AvailableBlock { - block_root, - block, - blobs: None, - data_columns: None, - blobs_available_timestamp: None, - })) - } - } - Some(blob_list) => { - let verified_blobs = if self.blobs_required_for_block(&block) { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_blob_list(blob_list.iter(), kzg) - .map_err(AvailabilityCheckError::Kzg)?; - Some(blob_list) - } else { - None - }; + let (block_root, block, blobs, data_columns) = block.deconstruct(); + if self.blobs_required_for_block(&block) { + return if let Some(blob_list) = blobs.as_ref() { + let kzg = self + .kzg + .as_ref() + .ok_or(AvailabilityCheckError::KzgNotInitialized)?; + verify_kzg_for_blob_list(blob_list.iter(), kzg) + .map_err(AvailabilityCheckError::Kzg)?; Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, - blobs: verified_blobs, + blobs, + blobs_available_timestamp: None, data_columns: None, + spec: self.spec.clone(), + })) + } else { + Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) + }; + } + if self.data_columns_required_for_block(&block) { + return if let Some(data_column_list) = data_columns.as_ref() { + let kzg = self + .kzg + .as_ref() + .ok_or(AvailabilityCheckError::KzgNotInitialized)?; + verify_kzg_for_data_column_list( + data_column_list + .iter() + .map(|custody_column| custody_column.as_data_column()), + kzg, + ) + .map_err(AvailabilityCheckError::Kzg)?; + Ok(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: None, blobs_available_timestamp: None, + data_columns: Some( + data_column_list + .into_iter() + .map(|d| d.clone_arc()) + .collect(), + ), + spec: self.spec.clone(), })) - } + } else { + Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) + }; } + + Ok(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: None, + blobs_available_timestamp: None, + data_columns: None, + spec: self.spec.clone(), + })) } /// Checks if a vector of blocks are available. Returns a vector of `MaybeAvailableBlock` @@ -299,64 +402,108 @@ impl DataAvailabilityChecker { verify_kzg_for_blob_list(all_blobs.iter(), kzg)?; } + let all_data_columns = blocks + .iter() + .filter(|block| self.data_columns_required_for_block(block.as_block())) + // this clone is cheap as it's cloning an Arc + .filter_map(|block| block.custody_columns().cloned()) + .flatten() + .map(CustodyDataColumn::into_inner) + .collect::>(); + let all_data_columns = + RuntimeVariableList::from_vec(all_data_columns, self.spec.number_of_columns); + + // verify kzg for all data columns at once + if !all_data_columns.is_empty() { + let kzg = self + .kzg + .as_ref() + .ok_or(AvailabilityCheckError::KzgNotInitialized)?; + verify_kzg_for_data_column_list(all_data_columns.iter(), kzg)?; + } + for block in blocks { - let (block_root, block, blobs) = block.deconstruct(); - match blobs { - None => { - if self.blobs_required_for_block(&block) { - results.push(MaybeAvailableBlock::AvailabilityPending { block_root, block }) - } else { - results.push(MaybeAvailableBlock::Available(AvailableBlock { - block_root, - block, - blobs: None, - data_columns: None, - blobs_available_timestamp: None, - })) - } - } - Some(blob_list) => { - let verified_blobs = if self.blobs_required_for_block(&block) { - Some(blob_list) - } else { - None - }; - // already verified kzg for all blobs - results.push(MaybeAvailableBlock::Available(AvailableBlock { + let (block_root, block, blobs, data_columns) = block.deconstruct(); + + let maybe_available_block = if self.blobs_required_for_block(&block) { + if blobs.is_some() { + MaybeAvailableBlock::Available(AvailableBlock { block_root, block, - blobs: verified_blobs, + blobs, + blobs_available_timestamp: None, data_columns: None, + spec: self.spec.clone(), + }) + } else { + MaybeAvailableBlock::AvailabilityPending { block_root, block } + } + } else if self.data_columns_required_for_block(&block) { + if data_columns.is_some() { + MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: None, + data_columns: data_columns.map(|data_columns| { + data_columns.into_iter().map(|d| d.into_inner()).collect() + }), blobs_available_timestamp: None, - })) + spec: self.spec.clone(), + }) + } else { + MaybeAvailableBlock::AvailabilityPending { block_root, block } } - } + } else { + MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: None, + data_columns: None, + blobs_available_timestamp: None, + spec: self.spec.clone(), + }) + }; + + results.push(maybe_available_block); } Ok(results) } /// Determines the blob requirements for a block. If the block is pre-deneb, no blobs are required. - /// If the block's epoch is from prior to the data availability boundary, no blobs are required. + /// If the epoch is from prior to the data availability boundary, no blobs are required. + pub fn blobs_required_for_epoch(&self, epoch: Epoch) -> bool { + self.da_check_required_for_epoch(epoch) && !self.spec.is_peer_das_enabled_for_epoch(epoch) + } + + /// Determines the data column requirements for an epoch. + /// - If the epoch is pre-peerdas, no data columns are required. + /// - If the epoch is from prior to the data availability boundary, no data columns are required. + pub fn data_columns_required_for_epoch(&self, epoch: Epoch) -> bool { + self.da_check_required_for_epoch(epoch) && self.spec.is_peer_das_enabled_for_epoch(epoch) + } + + /// See `Self::blobs_required_for_epoch` fn blobs_required_for_block(&self, block: &SignedBeaconBlock) -> bool { - block.num_expected_blobs() > 0 && self.da_check_required_for_epoch(block.epoch()) + block.num_expected_blobs() > 0 && self.blobs_required_for_epoch(block.epoch()) + } + + /// See `Self::data_columns_required_for_epoch` + fn data_columns_required_for_block(&self, block: &SignedBeaconBlock) -> bool { + block.num_expected_blobs() > 0 && self.data_columns_required_for_epoch(block.epoch()) } /// The epoch at which we require a data availability check in block processing. /// `None` if the `Deneb` fork is disabled. pub fn data_availability_boundary(&self) -> Option { - self.spec.deneb_fork_epoch.and_then(|fork_epoch| { - self.slot_clock - .now() - .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) - .map(|current_epoch| { - std::cmp::max( - fork_epoch, - current_epoch - .saturating_sub(self.spec.min_epochs_for_blob_sidecars_requests), - ) - }) - }) + let fork_epoch = self.spec.deneb_fork_epoch?; + let current_slot = self.slot_clock.now()?; + Some(std::cmp::max( + fork_epoch, + current_slot + .epoch(T::EthSpec::slots_per_epoch()) + .saturating_sub(self.spec.min_epochs_for_blob_sidecars_requests), + )) } /// Returns true if the given epoch lies within the da boundary and false otherwise. @@ -365,18 +512,6 @@ impl DataAvailabilityChecker { .map_or(false, |da_epoch| block_epoch >= da_epoch) } - pub fn da_check_required_for_current_epoch(&self) -> bool { - let Some(current_slot) = self.slot_clock.now_or_genesis() else { - error!( - self.log, - "Failed to read slot clock when checking for missing blob ids" - ); - return false; - }; - - self.da_check_required_for_epoch(current_slot.epoch(T::EthSpec::slots_per_epoch())) - } - /// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch. pub fn is_deneb(&self) -> bool { self.slot_clock.now().map_or(false, |slot| { @@ -495,6 +630,7 @@ pub struct AvailableBlock { data_columns: Option>, /// Timestamp at which this block first became available (UNIX timestamp, time since 1970). blobs_available_timestamp: Option, + pub spec: Arc, } impl AvailableBlock { @@ -503,6 +639,7 @@ impl AvailableBlock { block: Arc>, blobs: Option>, data_columns: Option>, + spec: Arc, ) -> Self { Self { block_root, @@ -510,6 +647,7 @@ impl AvailableBlock { blobs, data_columns, blobs_available_timestamp: None, + spec, } } @@ -528,6 +666,10 @@ impl AvailableBlock { self.blobs_available_timestamp } + pub fn data_columns(&self) -> Option<&DataColumnSidecarList> { + self.data_columns.as_ref() + } + #[allow(clippy::type_complexity)] pub fn deconstruct( self, @@ -543,6 +685,7 @@ impl AvailableBlock { blobs, data_columns, blobs_available_timestamp: _, + .. } = self; (block_root, block, blobs, data_columns) } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index bb92b0b6322..79793d6dc29 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -14,7 +14,9 @@ pub enum Error { Unexpected, SszTypes(ssz_types::Error), MissingBlobs, + MissingCustodyColumns, BlobIndexInvalid(u64), + DataColumnIndexInvalid(u64), StoreError(store::Error), DecodeError(ssz::DecodeError), ParentStateMissing(Hash256), @@ -37,6 +39,7 @@ impl Error { Error::KzgNotInitialized | Error::SszTypes(_) | Error::MissingBlobs + | Error::MissingCustodyColumns | Error::StoreError(_) | Error::DecodeError(_) | Error::Unexpected @@ -47,6 +50,7 @@ impl Error { | Error::SlotClockError => ErrorCategory::Internal, Error::Kzg(_) | Error::BlobIndexInvalid(_) + | Error::DataColumnIndexInvalid(_) | Error::KzgCommitmentMismatch { .. } | Error::KzgVerificationFailed => ErrorCategory::Malicious, } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 6c9964bdf86..36c5a9359dd 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -6,26 +6,36 @@ use crate::block_verification_types::{ }; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; use crate::data_column_verification::KzgVerifiedCustodyDataColumn; +use crate::metrics; use crate::BeaconChainTypes; +use kzg::Kzg; use lru::LruCache; use parking_lot::RwLock; -use ssz_derive::{Decode, Encode}; use ssz_types::{FixedVector, VariableList}; +use std::collections::HashSet; use std::num::NonZeroUsize; use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; -use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; +use types::{ + BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, + DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, +}; + +pub type DataColumnsToPublish = Option>; /// This represents the components of a partially available block /// /// The blobs are all gossip and kzg verified. /// The block has completed all verifications except the availability check. -#[derive(Encode, Decode, Clone)] +/// TODO(das): this struct can potentially be reafactored as blobs and data columns are mutually +/// exclusive and this could simplify `is_importable`. +#[derive(Clone)] pub struct PendingComponents { pub block_root: Hash256, pub verified_blobs: FixedVector>, E::MaxBlobsPerBlock>, pub verified_data_columns: Vec>, pub executed_block: Option>, + pub reconstruction_started: bool, } pub enum BlockImportRequirement { @@ -50,10 +60,11 @@ impl PendingComponents { pub fn get_cached_data_column( &self, data_column_index: u64, - ) -> Option<&KzgVerifiedCustodyDataColumn> { + ) -> Option>> { self.verified_data_columns .iter() .find(|d| d.index() == data_column_index) + .map(|d| d.clone_arc()) } /// Returns a mutable reference to the cached block. @@ -109,6 +120,14 @@ impl PendingComponents { self.verified_data_columns.len() } + /// Returns the indices of cached custody columns + pub fn get_cached_data_columns_indices(&self) -> Vec { + self.verified_data_columns + .iter() + .map(|d| d.index()) + .collect() + } + /// Inserts a block into the cache. pub fn insert_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { *self.get_cached_block_mut() = Some(block) @@ -160,12 +179,14 @@ impl PendingComponents { fn merge_data_columns>>( &mut self, kzg_verified_data_columns: I, - ) { + ) -> Result<(), AvailabilityCheckError> { for data_column in kzg_verified_data_columns { + // TODO(das): Add equivalent checks for data columns if necessary if !self.data_column_exists(data_column.index()) { self.verified_data_columns.push(data_column); } } + Ok(()) } /// Inserts a new block and revalidates the existing blobs against it. @@ -208,6 +229,7 @@ impl PendingComponents { verified_blobs: FixedVector::default(), verified_data_columns: vec![], executed_block: None, + reconstruction_started: false, } } @@ -220,6 +242,7 @@ impl PendingComponents { pub fn make_available( self, block_import_requirement: BlockImportRequirement, + spec: &Arc, recover: R, ) -> Result, AvailabilityCheckError> where @@ -232,6 +255,7 @@ impl PendingComponents { verified_blobs, verified_data_columns, executed_block, + .. } = self; let blobs_available_timestamp = verified_blobs @@ -249,7 +273,6 @@ impl PendingComponents { let num_blobs_expected = diet_executed_block.num_blobs_expected(); let Some(verified_blobs) = verified_blobs .into_iter() - .cloned() .map(|b| b.map(|b| b.to_blob())) .take(num_blobs_expected) .collect::>>() @@ -281,12 +304,17 @@ impl PendingComponents { blobs, data_columns, blobs_available_timestamp, + spec: spec.clone(), }; Ok(Availability::Available(Box::new( AvailableExecutedBlock::new(available_block, import_data, payload_verification_outcome), ))) } + pub fn reconstruction_started(&mut self) { + self.reconstruction_started = true; + } + /// Returns the epoch of the block if it is cached, otherwise returns the epoch of the first blob. pub fn epoch(&self) -> Option { self.executed_block @@ -303,6 +331,15 @@ impl PendingComponents { }); } } + + if let Some(kzg_verified_data_column) = self.verified_data_columns.first() { + let epoch = kzg_verified_data_column + .as_data_column() + .slot() + .epoch(E::slots_per_epoch()); + return Some(epoch); + } + None }) } @@ -318,7 +355,7 @@ pub struct DataAvailabilityCheckerInner { state_cache: StateLRUCache, /// The number of data columns the node is custodying. custody_column_count: usize, - spec: ChainSpec, + spec: Arc, } impl DataAvailabilityCheckerInner { @@ -326,7 +363,7 @@ impl DataAvailabilityCheckerInner { capacity: NonZeroUsize, beacon_store: BeaconStore, custody_column_count: usize, - spec: ChainSpec, + spec: Arc, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), @@ -336,6 +373,10 @@ impl DataAvailabilityCheckerInner { }) } + pub fn custody_subnet_count(&self) -> usize { + self.custody_column_count + } + /// Returns true if the block root is known, without altering the LRU ordering pub fn get_execution_valid_block( &self, @@ -369,6 +410,22 @@ impl DataAvailabilityCheckerInner { } } + /// Fetch a data column from the cache without affecting the LRU ordering + pub fn peek_data_column( + &self, + data_column_id: &DataColumnIdentifier, + ) -> Result>>, AvailabilityCheckError> { + if let Some(pending_components) = self.critical.read().peek(&data_column_id.block_root) { + Ok(pending_components + .verified_data_columns + .iter() + .find(|data_column| data_column.as_data_column().index == data_column_id.index) + .map(|data_column| data_column.clone_arc())) + } else { + Ok(None) + } + } + pub fn peek_pending_components>) -> R>( &self, block_root: &Hash256, @@ -391,6 +448,28 @@ impl DataAvailabilityCheckerInner { } } + /// Potentially trigger reconstruction if: + /// - Our custody requirement is all columns + /// - We >= 50% of columns, but not all columns + fn should_reconstruct( + &self, + block_import_requirement: &BlockImportRequirement, + pending_components: &PendingComponents, + ) -> bool { + let BlockImportRequirement::CustodyColumns(num_expected_columns) = block_import_requirement + else { + return false; + }; + + let num_of_columns = self.spec.number_of_columns; + let has_missing_columns = pending_components.verified_data_columns.len() < num_of_columns; + + has_missing_columns + && !pending_components.reconstruction_started + && *num_expected_columns == num_of_columns + && pending_components.verified_data_columns.len() >= num_of_columns / 2 + } + pub fn put_kzg_verified_blobs>>( &self, block_root: Hash256, @@ -421,7 +500,7 @@ impl DataAvailabilityCheckerInner { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components.make_available(block_import_requirement, |diet_block| { + pending_components.make_available(block_import_requirement, &self.spec, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) }) } else { @@ -430,16 +509,17 @@ impl DataAvailabilityCheckerInner { } } - // TODO(das): rpc code paths to be implemented. - #[allow(dead_code)] + #[allow(clippy::type_complexity)] pub fn put_kzg_verified_data_columns< I: IntoIterator>, >( &self, + kzg: &Kzg, block_root: Hash256, epoch: Epoch, kzg_verified_data_columns: I, - ) -> Result, AvailabilityCheckError> { + ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> + { let mut write_lock = self.critical.write(); // Grab existing entry or create a new entry. @@ -449,19 +529,68 @@ impl DataAvailabilityCheckerInner { .unwrap_or_else(|| PendingComponents::empty(block_root)); // Merge in the data columns. - pending_components.merge_data_columns(kzg_verified_data_columns); + pending_components.merge_data_columns(kzg_verified_data_columns)?; let block_import_requirement = self.block_import_requirement(epoch)?; + + // Potentially trigger reconstruction if: + // - Our custody requirement is all columns + // - We >= 50% of columns + let data_columns_to_publish = + if self.should_reconstruct(&block_import_requirement, &pending_components) { + pending_components.reconstruction_started(); + + let timer = metrics::start_timer(&metrics::DATA_AVAILABILITY_RECONSTRUCTION_TIME); + + let existing_column_indices = pending_components + .verified_data_columns + .iter() + .map(|d| d.index()) + .collect::>(); + + // Will only return an error if: + // - < 50% of columns + // - There are duplicates + let all_data_columns = KzgVerifiedCustodyDataColumn::reconstruct_columns( + kzg, + pending_components.verified_data_columns.as_slice(), + &self.spec, + )?; + + let data_columns_to_publish = all_data_columns + .iter() + .filter(|d| !existing_column_indices.contains(&d.index())) + .map(|d| d.clone_arc()) + .collect::>(); + + pending_components.verified_data_columns = all_data_columns; + + metrics::stop_timer(timer); + metrics::inc_counter_by( + &metrics::DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS, + data_columns_to_publish.len() as u64, + ); + + Some(data_columns_to_publish) + } else { + None + }; + if pending_components.is_available(&block_import_requirement) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components.make_available(block_import_requirement, |diet_block| { - self.state_cache.recover_pending_executed_block(diet_block) - }) + pending_components + .make_available(block_import_requirement, &self.spec, |diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) + .map(|availability| (availability, data_columns_to_publish)) } else { write_lock.put(block_root, pending_components); - Ok(Availability::MissingComponents(block_root)) + Ok(( + Availability::MissingComponents(block_root), + data_columns_to_publish, + )) } } @@ -495,7 +624,7 @@ impl DataAvailabilityCheckerInner { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components.make_available(block_import_requirement, |diet_block| { + pending_components.make_available(block_import_requirement, &self.spec, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) }) } else { @@ -754,7 +883,7 @@ mod test { let log = test_logger(); let chain_db_path = tempdir().expect("should get temp dir"); let harness = get_deneb_chain(log.clone(), &chain_db_path).await; - let spec = harness.spec.clone(); + let spec = Arc::new(harness.spec.clone()); let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); let cache = Arc::new( @@ -1025,7 +1154,9 @@ mod pending_components_tests { use rand::SeedableRng; use state_processing::ConsensusContext; use types::test_utils::TestRandom; - use types::{BeaconState, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot}; + use types::{ + BeaconState, FixedBytesExtended, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot, + }; type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index cf6eb669d5e..03e3289118d 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -70,11 +70,11 @@ impl DietAvailabilityPendingExecutedBlock { pub struct StateLRUCache { states: RwLock>>, store: BeaconStore, - spec: ChainSpec, + spec: Arc, } impl StateLRUCache { - pub fn new(store: BeaconStore, spec: ChainSpec) -> Self { + pub fn new(store: BeaconStore, spec: Arc) -> Self { Self { states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY_NON_ZERO)), store, diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index fa31d6f2e8e..f4a5feaee2a 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -2,7 +2,8 @@ use crate::block_verification::{ cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, BlockSlashInfo, }; -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::kzg_utils::{reconstruct_data_columns, validate_data_columns}; +use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; use derivative::Derivative; use fork_choice::ProtoBlock; use kzg::{Error as KzgError, Kzg}; @@ -11,6 +12,7 @@ use slasher::test_utils::E; use slog::debug; use slot_clock::SlotClock; use ssz_derive::{Decode, Encode}; +use std::iter; use std::sync::Arc; use types::data_column_sidecar::{ColumnIndex, DataColumnIdentifier}; use types::{ @@ -175,10 +177,19 @@ impl GossipVerifiedDataColumn { pub fn id(&self) -> DataColumnIdentifier { DataColumnIdentifier { block_root: self.block_root, - index: self.data_column.data_column_index(), + index: self.data_column.index(), } } + pub fn as_data_column(&self) -> &DataColumnSidecar { + self.data_column.as_data_column() + } + + /// This is cheap as we're calling clone on an Arc + pub fn clone_data_column(&self) -> Arc> { + self.data_column.clone_data_column() + } + pub fn block_root(&self) -> Hash256 { self.block_root } @@ -187,6 +198,10 @@ impl GossipVerifiedDataColumn { self.data_column.data.slot() } + pub fn index(&self) -> ColumnIndex { + self.data_column.data.index + } + pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { self.data_column.data.signed_block_header.clone() } @@ -219,7 +234,39 @@ impl KzgVerifiedDataColumn { self.data.clone() } - pub fn data_column_index(&self) -> u64 { + pub fn index(&self) -> ColumnIndex { + self.data.index + } +} + +pub type CustodyDataColumnList = RuntimeVariableList>; + +/// Data column that we must custody +#[derive(Debug, Derivative, Clone, Encode, Decode)] +#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[ssz(struct_behaviour = "transparent")] +pub struct CustodyDataColumn { + data: Arc>, +} + +impl CustodyDataColumn { + /// Mark a column as custody column. Caller must ensure that our current custody requirements + /// include this column + pub fn from_asserted_custody(data: Arc>) -> Self { + Self { data } + } + + pub fn into_inner(self) -> Arc> { + self.data + } + pub fn as_data_column(&self) -> &Arc> { + &self.data + } + /// This is cheap as we're calling clone on an Arc + pub fn clone_arc(&self) -> Arc> { + self.data.clone() + } + pub fn index(&self) -> u64 { self.data.index } } @@ -241,13 +288,54 @@ impl KzgVerifiedCustodyDataColumn { } } - pub fn index(&self) -> ColumnIndex { - self.data.index + /// Verify a column already marked as custody column + pub fn new(data_column: CustodyDataColumn, kzg: &Kzg) -> Result { + verify_kzg_for_data_column(data_column.clone_arc(), kzg)?; + Ok(Self { + data: data_column.data, + }) + } + + pub fn reconstruct_columns( + kzg: &Kzg, + partial_set_of_columns: &[Self], + spec: &ChainSpec, + ) -> Result, KzgError> { + // Will only return an error if: + // - < 50% of columns + // - There are duplicates + let all_data_columns = reconstruct_data_columns( + kzg, + &partial_set_of_columns + .iter() + .map(|d| d.clone_arc()) + .collect::>(), + spec, + )?; + + Ok(all_data_columns + .into_iter() + .map(|d| { + KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { + data: d, + }) + }) + .collect::>()) } pub fn into_inner(self) -> Arc> { self.data } + + pub fn as_data_column(&self) -> &DataColumnSidecar { + &self.data + } + pub fn clone_arc(&self) -> Arc> { + self.data.clone() + } + pub fn index(&self) -> ColumnIndex { + self.data.index + } } /// Complete kzg verification for a `DataColumnSidecar`. @@ -255,9 +343,10 @@ impl KzgVerifiedCustodyDataColumn { /// Returns an error if the kzg verification check fails. pub fn verify_kzg_for_data_column( data_column: Arc>, - _kzg: &Kzg, + kzg: &Kzg, ) -> Result, KzgError> { - // TODO(das): KZG verification to be implemented + let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES); + validate_data_columns(kzg, iter::once(&data_column))?; Ok(KzgVerifiedDataColumn { data: data_column }) } @@ -267,13 +356,14 @@ pub fn verify_kzg_for_data_column( /// Note: This function should be preferred over calling `verify_kzg_for_data_column` /// in a loop since this function kzg verifies a list of data columns more efficiently. pub fn verify_kzg_for_data_column_list<'a, E: EthSpec, I>( - _data_column_iter: I, - _kzg: &'a Kzg, + data_column_iter: I, + kzg: &'a Kzg, ) -> Result<(), KzgError> where I: Iterator>> + Clone, { - // TODO(das): implement KZG verification + let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES); + validate_data_columns(kzg, data_column_iter)?; Ok(()) } @@ -292,6 +382,7 @@ pub fn validate_data_column_sidecar_for_gossip( let parent_block = verify_parent_block_and_finalized_descendant(data_column.clone(), chain)?; verify_slot_higher_than_parent(&parent_block, column_slot)?; verify_proposer_and_signature(&data_column, &parent_block, chain)?; + let kzg = chain .kzg .clone() @@ -339,9 +430,11 @@ fn verify_is_first_sidecar( fn verify_column_inclusion_proof( data_column: &DataColumnSidecar, ) -> Result<(), GossipDataColumnError> { + let _timer = metrics::start_timer(&metrics::DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION); if !data_column.verify_inclusion_proof() { return Err(GossipDataColumnError::InvalidInclusionProof); } + Ok(()) } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 1e3d67f9d7a..4db3f0ebb41 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -77,8 +77,6 @@ pub enum BeaconChainError { AttesterSlashingValidationError(AttesterSlashingValidationError), BlsExecutionChangeValidationError(BlsExecutionChangeValidationError), MissingFinalizedStateRoot(Slot), - /// Returned when an internal check fails, indicating corrupt data. - InvariantViolated(String), SszTypesError(SszTypesError), NoProposerForSlot(Slot), CanonicalHeadLockTimeout, @@ -216,10 +214,12 @@ pub enum BeaconChainError { InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), UnableToPublish, + UnableToBuildColumnSidecar(String), AvailabilityCheckError(AvailabilityCheckError), LightClientError(LightClientError), UnsupportedFork, MilhouseError(MilhouseError), + EmptyRpcCustodyColumns, AttestationError(AttestationError), AttestationCommitteeIndexNotSet, } diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index b4005f22fd1..9e1bcbe6fa8 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -685,8 +685,7 @@ fn is_candidate_block(block: &Eth1Block, period_start: u64, spec: &ChainSpec) -> #[cfg(test)] mod test { use super::*; - use environment::null_logger; - use types::{DepositData, MinimalEthSpec, Signature}; + use types::{DepositData, FixedBytesExtended, MinimalEthSpec, Signature}; type E = MinimalEthSpec; @@ -743,6 +742,7 @@ mod test { mod eth1_chain_json_backend { use super::*; use eth1::DepositLog; + use logging::test_logger; use types::{test_utils::generate_deterministic_keypair, MainnetEthSpec}; fn get_eth1_chain() -> Eth1Chain, E> { @@ -750,7 +750,7 @@ mod test { ..Eth1Config::default() }; - let log = null_logger().unwrap(); + let log = test_logger(); Eth1Chain::new( CachingEth1Backend::new(eth1_config, log, MainnetEthSpec::default_spec()).unwrap(), ) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index a6e0d247dc2..b9b98bfbc00 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -62,7 +62,7 @@ impl PayloadNotifier { block: Arc>, state: &BeaconState, notify_execution_layer: NotifyExecutionLayer, - ) -> Result> { + ) -> Result { let payload_verification_status = if is_execution_enabled(state, block.message().body()) { // Perform the initial stages of payload verification. // @@ -110,9 +110,7 @@ impl PayloadNotifier { }) } - pub async fn notify_new_payload( - self, - ) -> Result> { + pub async fn notify_new_payload(self) -> Result { if let Some(precomputed_status) = self.payload_verification_status { Ok(precomputed_status) } else { @@ -133,7 +131,7 @@ impl PayloadNotifier { async fn notify_new_payload<'a, T: BeaconChainTypes>( chain: &Arc>, block: BeaconBlockRef<'a, T::EthSpec>, -) -> Result> { +) -> Result { let execution_layer = chain .execution_layer .as_ref() @@ -237,7 +235,7 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( chain: &Arc>, block: BeaconBlockRef<'a, T::EthSpec>, allow_optimistic_import: AllowOptimisticImport, -) -> Result<(), BlockError> { +) -> Result<(), BlockError> { let spec = &chain.spec; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); let execution_payload = block.execution_payload()?; @@ -335,7 +333,7 @@ pub fn validate_execution_payload_for_gossip( parent_block: &ProtoBlock, block: BeaconBlockRef<'_, T::EthSpec>, chain: &BeaconChain, -) -> Result<(), BlockError> { +) -> Result<(), BlockError> { // Only apply this validation if this is a Bellatrix beacon block. if let Ok(execution_payload) = block.body().execution_payload() { // This logic should match `is_execution_enabled`. We use only the execution block hash of @@ -505,7 +503,7 @@ where return Ok(BlockProposalContentsType::Full( BlockProposalContents::Payload { payload: FullPayload::default_at_fork(fork)?, - block_value: Uint256::zero(), + block_value: Uint256::ZERO, }, )); } @@ -523,7 +521,7 @@ where return Ok(BlockProposalContentsType::Full( BlockProposalContents::Payload { payload: FullPayload::default_at_fork(fork)?, - block_value: Uint256::zero(), + block_value: Uint256::ZERO, }, )); } diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index 71e2473cdcf..9c06ef33a18 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -105,7 +105,7 @@ impl SszHeadTracker { mod test { use super::*; use ssz::{Decode, Encode}; - use types::{BeaconBlock, EthSpec, MainnetEthSpec}; + use types::{BeaconBlock, EthSpec, FixedBytesExtended, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index aa2fac2afc8..1372211b175 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -9,8 +9,9 @@ use state_processing::{ use std::borrow::Cow; use std::iter; use std::time::Duration; +use store::metadata::DataColumnInfo; use store::{chunked_vector::BlockRoots, AnchorInfo, BlobInfo, ChunkWriter, KeyValueStore}; -use types::{Hash256, Slot}; +use types::{FixedBytesExtended, Hash256, Slot}; /// Use a longer timeout on the pubkey cache. /// @@ -66,6 +67,7 @@ impl BeaconChain { .get_anchor_info() .ok_or(HistoricalBlockError::NoAnchorInfo)?; let blob_info = self.store.get_blob_info(); + let data_column_info = self.store.get_data_column_info(); // Take all blocks with slots less than the oldest block slot. let num_relevant = blocks.partition_point(|available_block| { @@ -90,18 +92,27 @@ impl BeaconChain { return Ok(0); } - let n_blobs_lists_to_import = blocks_to_import + // Blobs are stored per block, and data columns are each stored individually + let n_blob_ops_per_block = if self.spec.is_peer_das_scheduled() { + self.data_availability_checker.get_custody_columns_count() + } else { + 1 + }; + + let blob_batch_size = blocks_to_import .iter() .filter(|available_block| available_block.blobs().is_some()) - .count(); + .count() + .saturating_mul(n_blob_ops_per_block); let mut expected_block_root = anchor_info.oldest_block_parent; let mut prev_block_slot = anchor_info.oldest_block_slot; let mut chunk_writer = ChunkWriter::::new(&self.store.cold_db, prev_block_slot.as_usize())?; let mut new_oldest_blob_slot = blob_info.oldest_blob_slot; + let mut new_oldest_data_column_slot = data_column_info.oldest_data_column_slot; - let mut blob_batch = Vec::with_capacity(n_blobs_lists_to_import); + let mut blob_batch = Vec::with_capacity(blob_batch_size); let mut cold_batch = Vec::with_capacity(blocks_to_import.len()); let mut hot_batch = Vec::with_capacity(blocks_to_import.len()); let mut signed_blocks = Vec::with_capacity(blocks_to_import.len()); @@ -129,11 +140,10 @@ impl BeaconChain { .blobs_as_kv_store_ops(&block_root, blobs, &mut blob_batch); } // Store the data columns too - if let Some(_data_columns) = maybe_data_columns { - // TODO(das): depends on https://github.com/sigp/lighthouse/pull/6073 - // new_oldest_data_column_slot = Some(block.slot()); - // self.store - // .data_columns_as_kv_store_ops(&block_root, data_columns, &mut blob_batch); + if let Some(data_columns) = maybe_data_columns { + new_oldest_data_column_slot = Some(block.slot()); + self.store + .data_columns_as_kv_store_ops(&block_root, data_columns, &mut blob_batch); } // Store block roots, including at all skip slots in the freezer DB. @@ -212,7 +222,7 @@ impl BeaconChain { self.store.hot_db.do_atomically(hot_batch)?; self.store.cold_db.do_atomically(cold_batch)?; - let mut anchor_and_blob_batch = Vec::with_capacity(2); + let mut anchor_and_blob_batch = Vec::with_capacity(3); // Update the blob info. if new_oldest_blob_slot != blob_info.oldest_blob_slot { @@ -228,6 +238,19 @@ impl BeaconChain { } } + // Update the data column info. + if new_oldest_data_column_slot != data_column_info.oldest_data_column_slot { + if let Some(oldest_data_column_slot) = new_oldest_data_column_slot { + let new_data_column_info = DataColumnInfo { + oldest_data_column_slot: Some(oldest_data_column_slot), + }; + anchor_and_blob_batch.push( + self.store + .compare_and_set_data_column_info(data_column_info, new_data_column_info)?, + ); + } + } + // Update the anchor. let new_anchor = AnchorInfo { oldest_block_slot: prev_block_slot, diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index b554133875a..55c1ee9e980 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -1,5 +1,15 @@ -use kzg::{Blob as KzgBlob, Error as KzgError, Kzg}; -use types::{Blob, EthSpec, Hash256, KzgCommitment, KzgProof}; +use kzg::{ + Blob as KzgBlob, Bytes48, CellRef as KzgCellRef, CellsAndKzgProofs, Error as KzgError, Kzg, +}; +use rayon::prelude::*; +use ssz_types::FixedVector; +use std::sync::Arc; +use types::beacon_block_body::KzgCommitments; +use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError}; +use types::{ + Blob, BlobsList, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, + Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, +}; /// Converts a blob ssz List object to an array to be used with the kzg /// crypto library. @@ -7,6 +17,15 @@ fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result(cell: &Cell) -> Result { + let cell_bytes: &[u8] = cell.as_ref(); + Ok(cell_bytes + .try_into() + .expect("expected cell to have size {BYTES_PER_CELL}. This should be guaranteed by the `FixedVector type")) +} + /// Validate a single blob-commitment-proof triplet from a `BlobSidecar`. pub fn validate_blob( kzg: &Kzg, @@ -19,6 +38,50 @@ pub fn validate_blob( kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) } +/// Validate a batch of `DataColumnSidecar`. +pub fn validate_data_columns<'a, E: EthSpec, I>( + kzg: &Kzg, + data_column_iter: I, +) -> Result<(), KzgError> +where + I: Iterator>> + Clone, +{ + let cells = data_column_iter + .clone() + .flat_map(|data_column| data_column.column.iter().map(ssz_cell_to_crypto_cell::)) + .collect::, KzgError>>()?; + + let proofs = data_column_iter + .clone() + .flat_map(|data_column| { + data_column + .kzg_proofs + .iter() + .map(|&proof| Bytes48::from(proof)) + }) + .collect::>(); + + let column_indices = data_column_iter + .clone() + .flat_map(|data_column| { + let col_index = data_column.index; + data_column.column.iter().map(move |_| col_index) + }) + .collect::>(); + + let commitments = data_column_iter + .clone() + .flat_map(|data_column| { + data_column + .kzg_commitments + .iter() + .map(|&commitment| Bytes48::from(commitment)) + }) + .collect::>(); + + kzg.verify_cell_proof_batch(&cells, &proofs, column_indices, &commitments) +} + /// Validate a batch of blob-commitment-proof triplets from multiple `BlobSidecars`. pub fn validate_blobs( kzg: &Kzg, @@ -76,3 +139,264 @@ pub fn verify_kzg_proof( ) -> Result { kzg.verify_kzg_proof(kzg_commitment, &z.0.into(), &y.0.into(), kzg_proof) } + +/// Build data column sidecars from a signed beacon block and its blobs. +pub fn blobs_to_data_column_sidecars( + blobs: &BlobsList, + block: &SignedBeaconBlock, + kzg: &Kzg, + spec: &ChainSpec, +) -> Result, DataColumnSidecarError> { + if blobs.is_empty() { + return Ok(vec![]); + } + let kzg_commitments = block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_err| DataColumnSidecarError::PreDeneb)?; + let kzg_commitments_inclusion_proof = block.message().body().kzg_commitments_merkle_proof()?; + let signed_block_header = block.signed_block_header(); + + // NOTE: assumes blob sidecars are ordered by index + let blob_cells_and_proofs_vec = blobs + .into_par_iter() + .map(|blob| { + let blob = blob + .as_ref() + .try_into() + .expect("blob should have a guaranteed size due to FixedVector"); + kzg.compute_cells_and_proofs(blob) + }) + .collect::, KzgError>>()?; + + build_data_column_sidecars( + kzg_commitments.clone(), + kzg_commitments_inclusion_proof, + signed_block_header, + blob_cells_and_proofs_vec, + spec, + ) + .map_err(DataColumnSidecarError::BuildSidecarFailed) +} + +fn build_data_column_sidecars( + kzg_commitments: KzgCommitments, + kzg_commitments_inclusion_proof: FixedVector, + signed_block_header: SignedBeaconBlockHeader, + blob_cells_and_proofs_vec: Vec, + spec: &ChainSpec, +) -> Result, String> { + let number_of_columns = spec.number_of_columns; + let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + let mut column_kzg_proofs = + vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + + for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { + // we iterate over each column, and we construct the column from "top to bottom", + // pushing on the cell and the corresponding proof at each column index. we do this for + // each blob (i.e. the outer loop). + for col in 0..number_of_columns { + let cell = blob_cells + .get(col) + .ok_or(format!("Missing blob cell at index {col}"))?; + let cell: Vec = cell.to_vec(); + let cell = Cell::::from(cell); + + let proof = blob_cell_proofs + .get(col) + .ok_or(format!("Missing blob cell KZG proof at index {col}"))?; + + let column = columns + .get_mut(col) + .ok_or(format!("Missing data column at index {col}"))?; + let column_proofs = column_kzg_proofs + .get_mut(col) + .ok_or(format!("Missing data column proofs at index {col}"))?; + + column.push(cell); + column_proofs.push(*proof); + } + } + + let sidecars: Vec>> = columns + .into_iter() + .zip(column_kzg_proofs) + .enumerate() + .map(|(index, (col, proofs))| { + Arc::new(DataColumnSidecar { + index: index as u64, + column: DataColumn::::from(col), + kzg_commitments: kzg_commitments.clone(), + kzg_proofs: KzgProofs::::from(proofs), + signed_block_header: signed_block_header.clone(), + kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), + }) + }) + .collect(); + + Ok(sidecars) +} + +/// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). +pub fn reconstruct_data_columns( + kzg: &Kzg, + data_columns: &[Arc>], + spec: &ChainSpec, +) -> Result, KzgError> { + let first_data_column = data_columns + .first() + .ok_or(KzgError::InconsistentArrayLength( + "data_columns should have at least one element".to_string(), + ))?; + let num_of_blobs = first_data_column.kzg_commitments.len(); + + let blob_cells_and_proofs_vec = + (0..num_of_blobs) + .into_par_iter() + .map(|row_index| { + let mut cells: Vec = vec![]; + let mut cell_ids: Vec = vec![]; + for data_column in data_columns { + let cell = data_column.column.get(row_index).ok_or( + KzgError::InconsistentArrayLength(format!( + "Missing data column at index {row_index}" + )), + )?; + + cells.push(ssz_cell_to_crypto_cell::(cell)?); + cell_ids.push(data_column.index); + } + kzg.recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) + }) + .collect::, KzgError>>()?; + + // Clone sidecar elements from existing data column, no need to re-compute + build_data_column_sidecars( + first_data_column.kzg_commitments.clone(), + first_data_column.kzg_commitments_inclusion_proof.clone(), + first_data_column.signed_block_header.clone(), + blob_cells_and_proofs_vec, + spec, + ) + .map_err(KzgError::ReconstructFailed) +} + +#[cfg(test)] +mod test { + use crate::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; + use bls::Signature; + use eth2_network_config::TRUSTED_SETUP_BYTES; + use kzg::{Kzg, KzgCommitment, TrustedSetup}; + use types::{ + beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, + ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, + }; + + type E = MainnetEthSpec; + + // Loading and initializing PeerDAS KZG is expensive and slow, so we group the tests together + // only load it once. + #[test] + fn test_build_data_columns_sidecars() { + let spec = E::default_spec(); + let kzg = get_kzg(); + test_build_data_columns_empty(&kzg, &spec); + test_build_data_columns(&kzg, &spec); + test_reconstruct_data_columns(&kzg, &spec); + } + + #[track_caller] + fn test_build_data_columns_empty(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 0; + let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + assert!(column_sidecars.is_empty()); + } + + #[track_caller] + fn test_build_data_columns(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 6; + let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + + let column_sidecars = + blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + + let block_kzg_commitments = signed_block + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .clone(); + let block_kzg_commitments_inclusion_proof = signed_block + .message() + .body() + .kzg_commitments_merkle_proof() + .unwrap(); + + assert_eq!(column_sidecars.len(), spec.number_of_columns); + for (idx, col_sidecar) in column_sidecars.iter().enumerate() { + assert_eq!(col_sidecar.index, idx as u64); + + assert_eq!(col_sidecar.kzg_commitments.len(), num_of_blobs); + assert_eq!(col_sidecar.column.len(), num_of_blobs); + assert_eq!(col_sidecar.kzg_proofs.len(), num_of_blobs); + + assert_eq!(col_sidecar.kzg_commitments, block_kzg_commitments); + assert_eq!( + col_sidecar.kzg_commitments_inclusion_proof, + block_kzg_commitments_inclusion_proof + ); + assert!(col_sidecar.verify_inclusion_proof()); + } + } + + #[track_caller] + fn test_reconstruct_data_columns(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 6; + let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + + // Now reconstruct + let reconstructed_columns = reconstruct_data_columns( + kzg, + &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + spec, + ) + .unwrap(); + + for i in 0..spec.number_of_columns { + assert_eq!(reconstructed_columns.get(i), column_sidecars.get(i), "{i}"); + } + } + + fn get_kzg() -> Kzg { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg") + } + + fn create_test_block_and_blobs( + num_of_blobs: usize, + spec: &ChainSpec, + ) -> (SignedBeaconBlock, BlobsList) { + let mut block = BeaconBlock::Deneb(BeaconBlockDeneb::empty(spec)); + let mut body = block.body_mut(); + let blob_kzg_commitments = body.blob_kzg_commitments_mut().unwrap(); + *blob_kzg_commitments = + KzgCommitments::::new(vec![KzgCommitment::empty_for_testing(); num_of_blobs]) + .unwrap(); + + let signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + + let blobs = (0..num_of_blobs) + .map(|_| Blob::::default()) + .collect::>() + .into(); + + (signed_block, blobs) + } +} diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index 87513885f77..efc746675dc 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,14 +1,23 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; use parking_lot::{Mutex, RwLock}; +use safe_arith::SafeArith; use slog::{debug, Logger}; +use ssz::Decode; +use ssz::Encode; use ssz_types::FixedVector; use std::num::NonZeroUsize; -use types::light_client_update::{FinalizedRootProofLen, FINALIZED_ROOT_INDEX}; +use std::sync::Arc; +use store::DBColumn; +use store::KeyValueStore; +use types::light_client_update::{ + FinalizedRootProofLen, NextSyncCommitteeProofLen, FINALIZED_ROOT_INDEX, + NEXT_SYNC_COMMITTEE_INDEX, +}; use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconBlockRef, BeaconState, ChainSpec, EthSpec, ForkName, Hash256, LightClientFinalityUpdate, - LightClientOptimisticUpdate, Slot, SyncAggregate, + LightClientOptimisticUpdate, LightClientUpdate, Slot, SyncAggregate, SyncCommittee, }; /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the @@ -30,8 +39,10 @@ pub struct LightClientServerCache { latest_finality_update: RwLock>>, /// Tracks a single global latest optimistic update out of all imported blocks. latest_optimistic_update: RwLock>>, + /// Caches the most recent light client update + latest_light_client_update: RwLock>>, /// Caches state proofs by block root - prev_block_cache: Mutex>, + prev_block_cache: Mutex>>, } impl LightClientServerCache { @@ -39,13 +50,14 @@ impl LightClientServerCache { Self { latest_finality_update: None.into(), latest_optimistic_update: None.into(), + latest_light_client_update: None.into(), prev_block_cache: lru::LruCache::new(PREV_BLOCK_CACHE_SIZE).into(), } } /// Compute and cache state proofs for latter production of light-client messages. Does not /// trigger block replay. - pub fn cache_state_data( + pub(crate) fn cache_state_data( &self, spec: &ChainSpec, block: BeaconBlockRef, @@ -67,13 +79,13 @@ impl LightClientServerCache { Ok(()) } - /// Given a block with a SyncAggregte computes better or more recent light client updates. The + /// Given a block with a SyncAggregate computes better or more recent light client updates. The /// results are cached either on disk or memory to be served via p2p and rest API pub fn recompute_and_cache_updates( &self, store: BeaconStore, - block_parent_root: &Hash256, block_slot: Slot, + block_parent_root: &Hash256, sync_aggregate: &SyncAggregate, log: &Logger, chain_spec: &ChainSpec, @@ -100,11 +112,17 @@ impl LightClientServerCache { let attested_slot = attested_block.slot(); + let maybe_finalized_block = store.get_blinded_block(&cached_parts.finalized_block_root)?; + + let sync_period = block_slot + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + // Spec: Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest // attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice let is_latest_optimistic = match &self.latest_optimistic_update.read().clone() { Some(latest_optimistic_update) => { - is_latest_optimistic_update(latest_optimistic_update, attested_slot, signature_slot) + latest_optimistic_update.is_latest(attested_slot, signature_slot) } None => true, }; @@ -122,18 +140,17 @@ impl LightClientServerCache { // attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice let is_latest_finality = match &self.latest_finality_update.read().clone() { Some(latest_finality_update) => { - is_latest_finality_update(latest_finality_update, attested_slot, signature_slot) + latest_finality_update.is_latest(attested_slot, signature_slot) } None => true, }; + if is_latest_finality & !cached_parts.finalized_block_root.is_zero() { // Immediately after checkpoint sync the finalized block may not be available yet. - if let Some(finalized_block) = - store.get_blinded_block(&cached_parts.finalized_block_root)? - { + if let Some(finalized_block) = maybe_finalized_block.as_ref() { *self.latest_finality_update.write() = Some(LightClientFinalityUpdate::new( &attested_block, - &finalized_block, + finalized_block, cached_parts.finality_branch.clone(), sync_aggregate.clone(), signature_slot, @@ -148,9 +165,142 @@ impl LightClientServerCache { } } + let new_light_client_update = LightClientUpdate::new( + sync_aggregate, + block_slot, + cached_parts.next_sync_committee, + cached_parts.next_sync_committee_branch, + cached_parts.finality_branch, + &attested_block, + maybe_finalized_block.as_ref(), + chain_spec, + )?; + + // Spec: Full nodes SHOULD provide the best derivable LightClientUpdate (according to is_better_update) + // for each sync committee period + let prev_light_client_update = match &self.latest_light_client_update.read().clone() { + Some(prev_light_client_update) => Some(prev_light_client_update.clone()), + None => self.get_light_client_update(&store, sync_period, chain_spec)?, + }; + + let should_persist_light_client_update = + if let Some(prev_light_client_update) = prev_light_client_update { + let prev_sync_period = prev_light_client_update + .signature_slot() + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + + if sync_period != prev_sync_period { + true + } else { + prev_light_client_update + .is_better_light_client_update(&new_light_client_update, chain_spec)? + } + } else { + true + }; + + if should_persist_light_client_update { + self.store_light_client_update(&store, sync_period, &new_light_client_update)?; + } + Ok(()) } + fn store_light_client_update( + &self, + store: &BeaconStore, + sync_committee_period: u64, + light_client_update: &LightClientUpdate, + ) -> Result<(), BeaconChainError> { + let column = DBColumn::LightClientUpdate; + + store.hot_db.put_bytes( + column.into(), + &sync_committee_period.to_le_bytes(), + &light_client_update.as_ssz_bytes(), + )?; + + *self.latest_light_client_update.write() = Some(light_client_update.clone()); + + Ok(()) + } + + // Used to fetch the most recently persisted "best" light client update. + // Should not be used outside the light client server, as it also caches the fetched + // light client update. + fn get_light_client_update( + &self, + store: &BeaconStore, + sync_committee_period: u64, + chain_spec: &ChainSpec, + ) -> Result>, BeaconChainError> { + if let Some(latest_light_client_update) = self.latest_light_client_update.read().clone() { + let latest_lc_update_sync_committee_period = latest_light_client_update + .signature_slot() + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + if latest_lc_update_sync_committee_period == sync_committee_period { + return Ok(Some(latest_light_client_update)); + } + } + + let column = DBColumn::LightClientUpdate; + let res = store + .hot_db + .get_bytes(column.into(), &sync_committee_period.to_le_bytes())?; + + if let Some(light_client_update_bytes) = res { + let epoch = sync_committee_period + .safe_mul(chain_spec.epochs_per_sync_committee_period.into())?; + + let fork_name = chain_spec.fork_name_at_epoch(epoch.into()); + + let light_client_update = + LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name) + .map_err(store::errors::Error::SszDecodeError)?; + + *self.latest_light_client_update.write() = Some(light_client_update.clone()); + return Ok(Some(light_client_update)); + } + + Ok(None) + } + + pub fn get_light_client_updates( + &self, + store: &BeaconStore, + start_period: u64, + count: u64, + chain_spec: &ChainSpec, + ) -> Result>, BeaconChainError> { + let column = DBColumn::LightClientUpdate; + let mut light_client_updates = vec![]; + for res in store + .hot_db + .iter_column_from::>(column, &start_period.to_le_bytes()) + { + let (sync_committee_bytes, light_client_update_bytes) = res?; + let sync_committee_period = u64::from_ssz_bytes(&sync_committee_bytes) + .map_err(store::errors::Error::SszDecodeError)?; + let epoch = sync_committee_period + .safe_mul(chain_spec.epochs_per_sync_committee_period.into())?; + + let fork_name = chain_spec.fork_name_at_epoch(epoch.into()); + + let light_client_update = + LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name) + .map_err(store::errors::Error::SszDecodeError)?; + + light_client_updates.push(light_client_update); + + if sync_committee_period >= start_period + count { + break; + } + } + Ok(light_client_updates) + } + /// Retrieves prev block cached data from cache. If not present re-computes by retrieving the /// parent state, and inserts an entry to the cache. /// @@ -161,7 +311,7 @@ impl LightClientServerCache { block_root: &Hash256, block_state_root: &Hash256, block_slot: Slot, - ) -> Result { + ) -> Result, BeaconChainError> { // Attempt to get the value from the cache first. if let Some(cached_parts) = self.prev_block_cache.lock().get(block_root) { return Ok(cached_parts.clone()); @@ -199,52 +349,25 @@ impl Default for LightClientServerCache { } type FinalityBranch = FixedVector; +type NextSyncCommitteeBranch = FixedVector; #[derive(Clone)] -struct LightClientCachedData { +struct LightClientCachedData { finality_branch: FinalityBranch, + next_sync_committee_branch: NextSyncCommitteeBranch, + next_sync_committee: Arc>, finalized_block_root: Hash256, } -impl LightClientCachedData { - fn from_state(state: &mut BeaconState) -> Result { +impl LightClientCachedData { + fn from_state(state: &mut BeaconState) -> Result { Ok(Self { finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), + next_sync_committee: state.next_sync_committee()?.clone(), + next_sync_committee_branch: state + .compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)? + .into(), finalized_block_root: state.finalized_checkpoint().root, }) } } - -// Implements spec prioritization rules: -// > Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) -// -// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_finality_update -fn is_latest_finality_update( - prev: &LightClientFinalityUpdate, - attested_slot: Slot, - signature_slot: Slot, -) -> bool { - let prev_slot = prev.get_attested_header_slot(); - if attested_slot > prev_slot { - true - } else { - attested_slot == prev_slot && signature_slot > *prev.signature_slot() - } -} - -// Implements spec prioritization rules: -// > Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) -// -// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_optimistic_update -fn is_latest_optimistic_update( - prev: &LightClientOptimisticUpdate, - attested_slot: Slot, - signature_slot: Slot, -) -> bool { - let prev_slot = prev.get_slot(); - if attested_slot > prev_slot { - true - } else { - attested_slot == prev_slot && signature_slot > *prev.signature_slot() - } -} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index b8969b31f1e..3da2bea36c8 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,6 +1,7 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use bls::FixedBytesExtended; pub use lighthouse_metrics::*; use slot_clock::SlotClock; use std::sync::LazyLock; @@ -569,19 +570,20 @@ pub static FORK_CHOICE_AFTER_FINALIZATION_TIMES: LazyLock> = exponential_buckets(1e-3, 2.0, 10), ) }); -pub static FORK_CHOICE_PROCESS_BLOCK_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_fork_choice_process_block_seconds", - "Time taken to add a block and all attestations to fork choice", +pub static FORK_CHOICE_READ_LOCK_AQUIRE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_fork_choice_read_lock_aquire_seconds", + "Time taken to aquire the fork-choice read lock", + exponential_buckets(1e-4, 4.0, 7), + ) +}); +pub static FORK_CHOICE_WRITE_LOCK_AQUIRE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_fork_choice_write_lock_aquire_seconds", + "Time taken to aquire the fork-choice write lock", + exponential_buckets(1e-3, 4.0, 7), ) }); -pub static FORK_CHOICE_PROCESS_ATTESTATION_TIMES: LazyLock> = - LazyLock::new(|| { - try_create_histogram( - "beacon_fork_choice_process_attestation_seconds", - "Time taken to add an attestation to fork choice", - ) - }); pub static FORK_CHOICE_SET_HEAD_LAG_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram( "beacon_fork_choice_set_head_lag_times", @@ -1645,6 +1647,20 @@ pub static BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: LazyLock> "Time taken to compute blob sidecar inclusion proof", ) }); +pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( + "data_column_sidecar_computation_seconds", + "Time taken to compute data column sidecar, including cells, proofs and inclusion proof", + Ok(vec![0.04, 0.05, 0.1, 0.2, 0.3, 0.5, 0.7, 1.0]), + ) +}); +pub static DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "data_column_sidecar_inclusion_proof_verification_seconds", + "Time taken to verify data_column sidecar inclusion proof", + ) + }); pub static DATA_COLUMN_SIDECAR_PROCESSING_REQUESTS: LazyLock> = LazyLock::new(|| { try_create_int_counter( @@ -1666,6 +1682,13 @@ pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_blobs_column_sidecar_processing_successes_total", + "Number of data column sidecars verified for gossip", + ) + }); /* * Light server message verification @@ -1785,6 +1808,26 @@ pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock: "Runtime of batched kzg verification", ) }); +pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "kzg_verification_data_column_single_seconds", + "Runtime of single data column kzg verification", + Ok(vec![ + 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.005, 0.007, 0.01, 0.02, 0.05, + ]), + ) + }); +pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "kzg_verification_data_column_batch_seconds", + "Runtime of batched data column kzg verification", + Ok(vec![ + 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.015, 0.02, 0.03, 0.05, 0.07, + ]), + ) + }); pub static BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: LazyLock> = LazyLock::new( || { @@ -1828,6 +1871,20 @@ pub static DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "data_availability_reconstruction_time_seconds", + "Time taken to reconstruct columns", + ) + }); +pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "data_availability_reconstructed_columns_total", + "Total count of reconstructed columns", + ) + }); /* * light_client server metrics @@ -1928,6 +1985,11 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { .validator_monitor .read() .scrape_metrics(&beacon_chain.slot_clock, &beacon_chain.spec); + + beacon_chain + .canonical_head + .fork_choice_read_lock() + .scrape_for_metrics(); } /// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`. diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 08b2a51720d..fcb8fb1c897 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -14,8 +14,8 @@ use store::iter::RootsIterator; use store::{Error, ItemStore, StoreItem, StoreOp}; pub use store::{HotColdDB, MemoryStore}; use types::{ - BeaconState, BeaconStateError, BeaconStateHash, Checkpoint, Epoch, EthSpec, Hash256, - SignedBeaconBlockHash, Slot, + BeaconState, BeaconStateError, BeaconStateHash, Checkpoint, Epoch, EthSpec, FixedBytesExtended, + Hash256, SignedBeaconBlockHash, Slot, }; /// Compact at least this frequently, finalization permitting (7 days). diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index 211aecfe63d..7e23edbae8d 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -48,7 +48,7 @@ impl TreeHash for AttestationKey { // Combine the hash of the data with the hash of the index let mut hasher = MerkleHasher::with_leaves(2); hasher - .write(self.data_root.as_bytes()) + .write(self.data_root.as_slice()) .expect("should write data hash"); hasher .write(&index.to_le_bytes()) @@ -582,7 +582,8 @@ mod tests { use tree_hash::TreeHash; use types::{ test_utils::{generate_deterministic_keypair, test_random_instance}, - Attestation, AttestationBase, AttestationElectra, Fork, Hash256, SyncCommitteeMessage, + Attestation, AttestationBase, AttestationElectra, FixedBytesExtended, Fork, Hash256, + SyncCommitteeMessage, }; type E = types::MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index 00476bfe7af..038edfe27f0 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -473,7 +473,7 @@ where #[cfg(not(debug_assertions))] mod tests { use super::*; - use types::{test_utils::test_random_instance, AttestationBase, Hash256}; + use types::{test_utils::test_random_instance, AttestationBase, FixedBytesExtended, Hash256}; type E = types::MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index a1c6adc3e07..efb95f57a96 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -619,6 +619,7 @@ impl SlotSubcommitteeIndex { #[cfg(test)] mod tests { use super::*; + use types::FixedBytesExtended; type E = types::MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs index b934c553e6c..31034a7d59b 100644 --- a/beacon_node/beacon_chain/src/otb_verification_service.rs +++ b/beacon_node/beacon_chain/src/otb_verification_service.rs @@ -64,7 +64,7 @@ impl OptimisticTransitionBlock { store .as_ref() .hot_db - .key_delete(OTBColumn.into(), self.root.as_bytes()) + .key_delete(OTBColumn.into(), self.root.as_slice()) } fn is_canonical( diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs index 4042d328207..fcc8b9884ac 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs @@ -62,7 +62,7 @@ pub fn downgrade_from_v21( message: format!("{e:?}"), })?; - let db_key = get_key_for_col(DBColumn::PubkeyCache.into(), key.as_bytes()); + let db_key = get_key_for_col(DBColumn::PubkeyCache.into(), key.as_slice()); ops.push(KeyValueStoreOp::PutKeyValue( db_key, pubkey_bytes.as_ssz_bytes(), diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 04d58882639..a662cc49c9d 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -294,7 +294,7 @@ impl BlockShufflingIds { #[cfg(not(debug_assertions))] #[cfg(test)] mod test { - use task_executor::test_utils::null_logger; + use task_executor::test_utils::test_logger; use types::*; use crate::test_utils::EphemeralHarnessType; @@ -315,7 +315,7 @@ mod test { previous: Some(shuffling_id(current_epoch - 1)), block_root: Hash256::from_low_u64_le(0), }; - let logger = null_logger().unwrap(); + let logger = test_logger(); ShufflingCache::new(TEST_CACHE_SIZE, head_shuffling_ids, logger) } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 87a3eeb359e..e08e35b08f6 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,4 +1,5 @@ use crate::block_verification_types::{AsBlock, RpcBlock}; +use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::BeaconBlockResponseWrapper; @@ -82,6 +83,14 @@ pub static KZG: LazyLock> = LazyLock::new(|| { Arc::new(kzg) }); +pub static KZG_PEERDAS: LazyLock> = LazyLock::new(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + let kzg = Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg"); + Arc::new(kzg) +}); + pub type BaseHarnessType = Witness, E, THotStore, TColdStore>; @@ -1977,7 +1986,7 @@ where slot: Slot, block_root: Hash256, block_contents: SignedBlockContentsTuple, - ) -> Result> { + ) -> Result { self.set_current_slot(slot); let (block, blob_items) = block_contents; @@ -2004,7 +2013,7 @@ where pub async fn process_block_result( &self, block_contents: SignedBlockContentsTuple, - ) -> Result> { + ) -> Result { let (block, blob_items) = block_contents; let sidecars = blob_items @@ -2089,7 +2098,7 @@ where SignedBlockContentsTuple, BeaconState, ), - BlockError, + BlockError, > { self.set_current_slot(slot); let (block_contents, new_state) = self.make_block(state, slot).await; @@ -2135,7 +2144,7 @@ where state: BeaconState, state_root: Hash256, validators: &[usize], - ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { + ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { self.add_attested_block_at_slot_with_sync( slot, state, @@ -2153,7 +2162,7 @@ where state_root: Hash256, validators: &[usize], sync_committee_strategy: SyncCommitteeStrategy, - ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { + ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?; self.attest_block(&state, state_root, block_hash, &block.0, validators); @@ -2618,6 +2627,7 @@ pub fn generate_rand_block_and_blobs( let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); + let mut blob_sidecars = vec![]; let bundle = match block { @@ -2690,3 +2700,20 @@ pub fn generate_rand_block_and_blobs( } (block, blob_sidecars) } + +#[allow(clippy::type_complexity)] +pub fn generate_rand_block_and_data_columns( + fork_name: ForkName, + num_blobs: NumBlobs, + rng: &mut impl Rng, + spec: &ChainSpec, +) -> ( + SignedBeaconBlock>, + Vec>>, +) { + let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng); + let blob: BlobsList = blobs.into_iter().map(|b| b.blob).collect::>().into(); + let data_columns = blobs_to_data_column_sidecars(&blob, &block, &KZG_PEERDAS, spec).unwrap(); + + (block, data_columns) +} diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 576fbf0fd1f..917c20bfa5a 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -7,7 +7,7 @@ use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use std::marker::PhantomData; use store::{DBColumn, Error as StoreError, StoreItem, StoreOp}; -use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; +use types::{BeaconState, FixedBytesExtended, Hash256, PublicKey, PublicKeyBytes}; /// Provides a mapping of `validator_index -> validator_publickey`. /// diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index a52437e003a..335884d57a9 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -24,8 +24,8 @@ use types::{ signed_aggregate_and_proof::SignedAggregateAndProofRefMut, test_utils::generate_deterministic_keypair, Address, AggregateSignature, Attestation, AttestationRef, AttestationRefMut, BeaconStateError, BitList, ChainSpec, Epoch, EthSpec, - ForkName, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, SignedAggregateAndProof, - Slot, SubnetId, Unsigned, + FixedBytesExtended, ForkName, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, + SignedAggregateAndProof, Slot, SubnetId, Unsigned, }; pub type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 046a3468afc..faa4d74a182 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1098,8 +1098,8 @@ async fn block_gossip_verification() { assert!( matches!( unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), - BlockError::ParentUnknown(block) - if block.parent_root() == parent_root + BlockError::ParentUnknown {parent_root: p} + if p == parent_root ), "should not import a block for an unknown parent" ); @@ -1472,7 +1472,7 @@ async fn add_base_block_to_altair_chain() { ) .await, ChainSegmentResult::Failed { - imported_blocks: 0, + imported_blocks: _, error: BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Altair, object_fork: ForkName::Base, @@ -1608,7 +1608,7 @@ async fn add_altair_block_to_base_chain() { ) .await, ChainSegmentResult::Failed { - imported_blocks: 0, + imported_blocks: _, error: BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Base, object_fork: ForkName::Altair, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 4dc7d20e227..b455c3bace4 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -212,7 +212,7 @@ impl InvalidPayloadRig { .unwrap(); } - async fn import_block_parametric) -> bool>( + async fn import_block_parametric bool>( &mut self, new_payload_response: Payload, forkchoice_response: Payload, @@ -1280,7 +1280,7 @@ struct OptimisticTransitionSetup { impl OptimisticTransitionSetup { async fn new(num_blocks: usize, ttd: u64) -> Self { let mut spec = E::default_spec(); - spec.terminal_total_difficulty = ttd.into(); + spec.terminal_total_difficulty = Uint256::from(ttd); let mut rig = InvalidPayloadRig::new_with_spec(spec).enable_attestations(); rig.move_to_terminal_block(); @@ -1323,7 +1323,7 @@ async fn build_optimistic_chain( // Build a brand-new testing harness. We will apply the blocks from the previous harness to // this one. let mut spec = E::default_spec(); - spec.terminal_total_difficulty = rig_ttd.into(); + spec.terminal_total_difficulty = Uint256::from(rig_ttd); let rig = InvalidPayloadRig::new_with_spec(spec); let spec = &rig.harness.chain.spec; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 01d7798b92c..95bf7f1ce84 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5,6 +5,7 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; +use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::test_utils::{ mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, KZG, @@ -103,6 +104,256 @@ fn get_harness_generic( harness } +#[tokio::test] +async fn light_client_updates_test() { + let spec = test_spec::(); + let Some(_) = spec.altair_fork_epoch else { + // No-op prior to Altair. + return; + }; + + let num_final_blocks = E::slots_per_epoch() * 2; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); + let db_path = tempdir().unwrap(); + let log = test_logger(); + + let seconds_per_slot = spec.seconds_per_slot; + let store = get_store_generic( + &db_path, + StoreConfig { + slots_per_restore_point: 2 * E::slots_per_epoch(), + ..Default::default() + }, + test_spec::(), + ); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + let num_initial_slots = E::slots_per_epoch() * 10; + let slots: Vec = (1..num_initial_slots).map(Slot::new).collect(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + ) + .await; + + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + let wss_block = harness + .chain + .store + .get_full_block(&wss_block_root) + .unwrap() + .unwrap(); + let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); + let wss_state = store + .get_state(&wss_state_root, Some(checkpoint_slot)) + .unwrap() + .unwrap(); + + let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + + let mock = + mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); + + harness.advance_slot(); + harness + .extend_chain( + num_final_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Initialise a new beacon chain from the finalized checkpoint. + // The slot clock must be set to a time ahead of the checkpoint state. + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + + let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); + + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + .store(store.clone()) + .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) + .logger(log.clone()) + .weak_subjectivity_state( + wss_state, + wss_block.clone(), + wss_blobs_opt.clone(), + genesis_state, + ) + .unwrap() + .store_migrator_config(MigratorConfig::default().blocking()) + .dummy_eth1_backend() + .expect("should build dummy backend") + .slot_clock(slot_clock) + .shutdown_sender(shutdown_tx) + .chain_config(ChainConfig::default()) + .event_handler(Some(ServerSentEventHandler::new_with_capacity( + log.clone(), + 1, + ))) + .execution_layer(Some(mock.el)) + .kzg(kzg) + .build() + .expect("should build"); + + let beacon_chain = Arc::new(beacon_chain); + + let current_state = harness.get_current_state(); + + if ForkName::Electra == current_state.fork_name_unchecked() { + // TODO(electra) fix beacon state `compute_merkle_proof` + return; + } + + let block_root = *current_state + .get_block_root(current_state.slot() - Slot::new(1)) + .unwrap(); + + let contributions = harness.make_sync_contributions( + ¤t_state, + block_root, + current_state.slot() - Slot::new(1), + RelativeSyncCommittee::Current, + ); + + // generate sync aggregates + for (_, contribution_and_proof) in contributions { + let contribution = contribution_and_proof + .expect("contribution exists for committee") + .message + .contribution; + beacon_chain + .op_pool + .insert_sync_contribution(contribution.clone()) + .unwrap(); + beacon_chain + .op_pool + .insert_sync_contribution(contribution) + .unwrap(); + } + + // check that we can fetch the newly generated sync aggregate + let sync_aggregate = beacon_chain + .op_pool + .get_sync_aggregate(¤t_state) + .unwrap() + .unwrap(); + + // cache light client data + beacon_chain + .light_client_server_cache + .recompute_and_cache_updates( + store.clone(), + current_state.slot() - Slot::new(1), + &block_root, + &sync_aggregate, + &log, + &spec, + ) + .unwrap(); + + // calculate the sync period from the previous slot + let sync_period = (current_state.slot() - Slot::new(1)) + .epoch(E::slots_per_epoch()) + .sync_committee_period(&spec) + .unwrap(); + + // fetch a range of light client updates. right now there should only be one light client update + // in the db. + let lc_updates = beacon_chain + .get_light_client_updates(sync_period, 100) + .unwrap(); + + assert_eq!(lc_updates.len(), 1); + + // Advance to the next sync committee period + for _i in 0..(E::slots_per_epoch() * u64::from(spec.epochs_per_sync_committee_period)) { + harness.advance_slot(); + } + + harness + .extend_chain( + num_final_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let current_state = harness.get_current_state(); + + let block_root = *current_state + .get_block_root(current_state.slot() - Slot::new(1)) + .unwrap(); + + let contributions = harness.make_sync_contributions( + ¤t_state, + block_root, + current_state.slot() - Slot::new(1), + RelativeSyncCommittee::Current, + ); + + // generate new sync aggregates from this new state + for (_, contribution_and_proof) in contributions { + let contribution = contribution_and_proof + .expect("contribution exists for committee") + .message + .contribution; + beacon_chain + .op_pool + .insert_sync_contribution(contribution.clone()) + .unwrap(); + beacon_chain + .op_pool + .insert_sync_contribution(contribution) + .unwrap(); + } + + let sync_aggregate = beacon_chain + .op_pool + .get_sync_aggregate(¤t_state) + .unwrap() + .unwrap(); + + // cache new light client data + beacon_chain + .light_client_server_cache + .recompute_and_cache_updates( + store.clone(), + current_state.slot() - Slot::new(1), + &block_root, + &sync_aggregate, + &log, + &spec, + ) + .unwrap(); + + // we should now have two light client updates in the db + let lc_updates = beacon_chain + .get_light_client_updates(sync_period, 100) + .unwrap(); + + assert_eq!(lc_updates.len(), 2); +} + /// Tests that `store.heal_freezer_block_roots_at_split` inserts block roots between last restore point /// slot and the split slot. #[tokio::test] @@ -842,7 +1093,7 @@ async fn delete_blocks_and_states() { assert_eq!( harness.head_block_root(), - honest_head.into(), + Hash256::from(honest_head), "the honest chain should be the canonical chain", ); @@ -1186,18 +1437,16 @@ fn check_shuffling_compatible( head_state.current_epoch(), |committee_cache, _| { let state_cache = head_state.committee_cache(RelativeEpoch::Current).unwrap(); + // We used to check for false negatives here, but had to remove that check + // because `shuffling_is_compatible` does not guarantee their absence. + // + // See: https://github.com/sigp/lighthouse/issues/6269 if current_epoch_shuffling_is_compatible { assert_eq!( committee_cache, state_cache.as_ref(), "block at slot {slot}" ); - } else { - assert_ne!( - committee_cache, - state_cache.as_ref(), - "block at slot {slot}" - ); } Ok(()) }, @@ -1228,8 +1477,6 @@ fn check_shuffling_compatible( let state_cache = head_state.committee_cache(RelativeEpoch::Previous).unwrap(); if previous_epoch_shuffling_is_compatible { assert_eq!(committee_cache, state_cache.as_ref()); - } else { - assert_ne!(committee_cache, state_cache.as_ref()); } Ok(()) }, @@ -2545,7 +2792,13 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { let (block_root, block, blobs, data_columns) = available_blocks[0].clone().deconstruct(); let mut corrupt_block = (*block).clone(); *corrupt_block.signature_mut() = Signature::empty(); - AvailableBlock::__new_for_testing(block_root, Arc::new(corrupt_block), blobs, data_columns) + AvailableBlock::__new_for_testing( + block_root, + Arc::new(corrupt_block), + blobs, + data_columns, + Arc::new(spec), + ) }; // Importing the invalid batch should error. diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 5cbb26ffbf5..f8da2e8da1c 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -13,8 +13,8 @@ use store::{SignedContributionAndProof, SyncCommitteeMessage}; use tree_hash::TreeHash; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::{ - AggregateSignature, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, Slot, - SyncContributionData, SyncSelectionProof, SyncSubnetId, Unsigned, + AggregateSignature, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, + SecretKey, Slot, SyncContributionData, SyncSelectionProof, SyncSubnetId, Unsigned, }; pub type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index c2c03baff04..b4a54d26676 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -4,7 +4,7 @@ use beacon_chain::test_utils::{ use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; use logging::test_logger; use std::sync::LazyLock; -use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, ForkName, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; @@ -128,7 +128,7 @@ async fn produces_missed_blocks() { let initial_blocks = slots_per_epoch * nb_epoch_to_simulate.as_u64() - 1; // The validator index of the validator that is 'supposed' to miss a block - let mut validator_index_to_monitor = 1; + let validator_index_to_monitor = 1; // 1st scenario // // @@ -201,34 +201,22 @@ async fn produces_missed_blocks() { // Missed block happens when slot and prev_slot are not in the same epoch // making sure that the cache reloads when the epoch changes // in that scenario the slot that missed a block is the first slot of the epoch - validator_index_to_monitor = 7; // We are adding other validators to monitor as these ones will miss a block depending on // the fork name specified when running the test as the proposer cache differs depending on // the fork name (cf. seed) // // If you are adding a new fork and seeing errors, print // `validator_indexes[slot_in_epoch.as_usize()]` and add it below. - let validator_index_to_monitor_altair = 2; - // Same as above but for the merge upgrade - let validator_index_to_monitor_bellatrix = 4; - // Same as above but for the capella upgrade - let validator_index_to_monitor_capella = 11; - // Same as above but for the deneb upgrade - let validator_index_to_monitor_deneb = 3; - // Same as above but for the electra upgrade - let validator_index_to_monitor_electra = 6; - - let harness2 = get_harness( - validator_count, - vec![ - validator_index_to_monitor, - validator_index_to_monitor_altair, - validator_index_to_monitor_bellatrix, - validator_index_to_monitor_capella, - validator_index_to_monitor_deneb, - validator_index_to_monitor_electra, - ], - ); + let validator_index_to_monitor = match harness1.spec.fork_name_at_slot::(Slot::new(0)) { + ForkName::Base => 7, + ForkName::Altair => 2, + ForkName::Bellatrix => 4, + ForkName::Capella => 11, + ForkName::Deneb => 3, + ForkName::Electra => 1, + }; + + let harness2 = get_harness(validator_count, vec![validator_index_to_monitor]); let advance_slot_by = 9; harness2 .extend_chain( @@ -300,6 +288,12 @@ async fn produces_missed_blocks() { duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); let not_monitored_validator_index = validator_indexes[slot_in_epoch.as_usize()]; + // This could do with a refactor: https://github.com/sigp/lighthouse/issues/6293 + assert_ne!( + not_monitored_validator_index, + validator_index_to_monitor, + "this test has a fragile dependency on hardcoded indices. you need to tweak some settings or rewrite this" + ); assert_eq!( _state2.set_block_root(prev_slot, duplicate_block_root), diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index f491dc7ffb0..f506f0bb94d 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -64,11 +64,11 @@ use types::{ Attestation, BeaconState, ChainSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, SubnetId, }; use types::{EthSpec, Slot}; -use work_reprocessing_queue::IgnoredRpcBlock; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, }; +use work_reprocessing_queue::{IgnoredRpcBlock, QueuedSamplingRequest}; mod metrics; pub mod work_reprocessing_queue; @@ -106,8 +106,12 @@ pub struct BeaconProcessorQueueLengths { finality_update_queue: usize, optimistic_update_queue: usize, unknown_light_client_update_queue: usize, + unknown_block_sampling_request_queue: usize, rpc_block_queue: usize, rpc_blob_queue: usize, + rpc_custody_column_queue: usize, + rpc_verify_data_column_queue: usize, + sampling_result_queue: usize, chain_segment_queue: usize, backfill_chain_segment: usize, gossip_block_queue: usize, @@ -119,6 +123,8 @@ pub struct BeaconProcessorQueueLengths { bbroots_queue: usize, blbroots_queue: usize, blbrange_queue: usize, + dcbroots_queue: usize, + dcbrange_queue: usize, gossip_bls_to_execution_change_queue: usize, lc_bootstrap_queue: usize, lc_optimistic_update_queue: usize, @@ -158,9 +164,14 @@ impl BeaconProcessorQueueLengths { gossip_attester_slashing_queue: 4096, finality_update_queue: 1024, optimistic_update_queue: 1024, + unknown_block_sampling_request_queue: 16384, unknown_light_client_update_queue: 128, rpc_block_queue: 1024, rpc_blob_queue: 1024, + // TODO(das): Placeholder values + rpc_custody_column_queue: 1000, + rpc_verify_data_column_queue: 1000, + sampling_result_queue: 1000, chain_segment_queue: 64, backfill_chain_segment: 64, gossip_block_queue: 1024, @@ -172,6 +183,9 @@ impl BeaconProcessorQueueLengths { bbroots_queue: 1024, blbroots_queue: 1024, blbrange_queue: 1024, + // TODO(das): pick proper values + dcbroots_queue: 1024, + dcbrange_queue: 1024, gossip_bls_to_execution_change_queue: 16384, lc_bootstrap_queue: 1024, lc_optimistic_update_queue: 512, @@ -223,6 +237,9 @@ pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic pub const RPC_BLOCK: &str = "rpc_block"; pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block"; pub const RPC_BLOBS: &str = "rpc_blob"; +pub const RPC_CUSTODY_COLUMN: &str = "rpc_custody_column"; +pub const RPC_VERIFY_DATA_COLUMNS: &str = "rpc_verify_data_columns"; +pub const SAMPLING_RESULT: &str = "sampling_result"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; @@ -230,12 +247,15 @@ pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; pub const BLOBS_BY_ROOTS_REQUEST: &str = "blobs_by_roots_request"; +pub const DATA_COLUMNS_BY_ROOTS_REQUEST: &str = "data_columns_by_roots_request"; +pub const DATA_COLUMNS_BY_RANGE_REQUEST: &str = "data_columns_by_range_request"; pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const LIGHT_CLIENT_FINALITY_UPDATE_REQUEST: &str = "light_client_finality_update_request"; pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE_REQUEST: &str = "light_client_optimistic_update_request"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; pub const UNKNOWN_LIGHT_CLIENT_UPDATE: &str = "unknown_light_client_update"; +pub const UNKNOWN_BLOCK_SAMPLING_REQUEST: &str = "unknown_block_sampling_request"; pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change"; pub const API_REQUEST_P0: &str = "api_request_p0"; pub const API_REQUEST_P1: &str = "api_request_p1"; @@ -491,6 +511,10 @@ impl From for WorkEvent { process_fn, }, }, + ReadyWork::SamplingRequest(QueuedSamplingRequest { process_fn, .. }) => Self { + drop_during_sync: true, + work: Work::UnknownBlockSamplingRequest { process_fn }, + }, ReadyWork::BackfillSync(QueuedBackfillBatch(process_fn)) => Self { drop_during_sync: false, work: Work::ChainSegmentBackfill(process_fn), @@ -574,6 +598,9 @@ pub enum Work { parent_root: Hash256, process_fn: BlockingFn, }, + UnknownBlockSamplingRequest { + process_fn: BlockingFn, + }, GossipAggregateBatch { aggregates: Vec>, process_batch: Box>) + Send + Sync>, @@ -599,6 +626,9 @@ pub enum Work { RpcBlobs { process_fn: AsyncFn, }, + RpcCustodyColumn(AsyncFn), + RpcVerifyDataColumn(AsyncFn), + SamplingResult(AsyncFn), IgnoredRpcBlock { process_fn: BlockingFn, }, @@ -609,6 +639,8 @@ pub enum Work { BlocksByRootsRequest(AsyncFn), BlobsByRangeRequest(BlockingFn), BlobsByRootsRequest(BlockingFn), + DataColumnsByRootsRequest(BlockingFn), + DataColumnsByRangeRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), LightClientBootstrapRequest(BlockingFn), LightClientOptimisticUpdateRequest(BlockingFn), @@ -644,6 +676,9 @@ impl Work { Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, Work::RpcBlobs { .. } => RPC_BLOBS, + Work::RpcCustodyColumn { .. } => RPC_CUSTODY_COLUMN, + Work::RpcVerifyDataColumn(_) => RPC_VERIFY_DATA_COLUMNS, + Work::SamplingResult(_) => SAMPLING_RESULT, Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL, @@ -652,13 +687,16 @@ impl Work { Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST, Work::BlobsByRangeRequest(_) => BLOBS_BY_RANGE_REQUEST, Work::BlobsByRootsRequest(_) => BLOBS_BY_ROOTS_REQUEST, + Work::DataColumnsByRootsRequest(_) => DATA_COLUMNS_BY_ROOTS_REQUEST, + Work::DataColumnsByRangeRequest(_) => DATA_COLUMNS_BY_RANGE_REQUEST, Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::LightClientOptimisticUpdateRequest(_) => LIGHT_CLIENT_OPTIMISTIC_UPDATE_REQUEST, Work::LightClientFinalityUpdateRequest(_) => LIGHT_CLIENT_FINALITY_UPDATE_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, - Work::GossipBlsToExecutionChange(_) => GOSSIP_BLS_TO_EXECUTION_CHANGE, Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, + Work::UnknownBlockSamplingRequest { .. } => UNKNOWN_BLOCK_SAMPLING_REQUEST, + Work::GossipBlsToExecutionChange(_) => GOSSIP_BLS_TO_EXECUTION_CHANGE, Work::ApiRequestP0 { .. } => API_REQUEST_P0, Work::ApiRequestP1 { .. } => API_REQUEST_P1, } @@ -800,10 +838,16 @@ impl BeaconProcessor { let mut optimistic_update_queue = FifoQueue::new(queue_lengths.optimistic_update_queue); let mut unknown_light_client_update_queue = FifoQueue::new(queue_lengths.unknown_light_client_update_queue); + let mut unknown_block_sampling_request_queue = + FifoQueue::new(queue_lengths.unknown_block_sampling_request_queue); // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(queue_lengths.rpc_block_queue); let mut rpc_blob_queue = FifoQueue::new(queue_lengths.rpc_blob_queue); + let mut rpc_custody_column_queue = FifoQueue::new(queue_lengths.rpc_custody_column_queue); + let mut rpc_verify_data_column_queue = + FifoQueue::new(queue_lengths.rpc_verify_data_column_queue); + let mut sampling_result_queue = FifoQueue::new(queue_lengths.sampling_result_queue); let mut chain_segment_queue = FifoQueue::new(queue_lengths.chain_segment_queue); let mut backfill_chain_segment = FifoQueue::new(queue_lengths.backfill_chain_segment); let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); @@ -816,6 +860,8 @@ impl BeaconProcessor { let mut bbroots_queue = FifoQueue::new(queue_lengths.bbroots_queue); let mut blbroots_queue = FifoQueue::new(queue_lengths.blbroots_queue); let mut blbrange_queue = FifoQueue::new(queue_lengths.blbrange_queue); + let mut dcbroots_queue = FifoQueue::new(queue_lengths.dcbroots_queue); + let mut dcbrange_queue = FifoQueue::new(queue_lengths.dcbrange_queue); let mut gossip_bls_to_execution_change_queue = FifoQueue::new(queue_lengths.gossip_bls_to_execution_change_queue); @@ -957,6 +1003,15 @@ impl BeaconProcessor { self.spawn_worker(item, idle_tx); } else if let Some(item) = rpc_blob_queue.pop() { self.spawn_worker(item, idle_tx); + } else if let Some(item) = rpc_custody_column_queue.pop() { + self.spawn_worker(item, idle_tx); + // TODO(das): decide proper prioritization for sampling columns + } else if let Some(item) = rpc_custody_column_queue.pop() { + self.spawn_worker(item, idle_tx); + } else if let Some(item) = rpc_verify_data_column_queue.pop() { + self.spawn_worker(item, idle_tx); + } else if let Some(item) = sampling_result_queue.pop() { + self.spawn_worker(item, idle_tx); // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { @@ -1118,6 +1173,13 @@ impl BeaconProcessor { self.spawn_worker(item, idle_tx); } else if let Some(item) = blbroots_queue.pop() { self.spawn_worker(item, idle_tx); + } else if let Some(item) = dcbroots_queue.pop() { + self.spawn_worker(item, idle_tx); + } else if let Some(item) = dcbrange_queue.pop() { + self.spawn_worker(item, idle_tx); + // Prioritize sampling requests after block syncing requests + } else if let Some(item) = unknown_block_sampling_request_queue.pop() { + self.spawn_worker(item, idle_tx); // Check slashings after all other consensus messages so we prioritize // following head. // @@ -1245,6 +1307,15 @@ impl BeaconProcessor { rpc_block_queue.push(work, work_id, &self.log) } Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log), + Work::RpcCustodyColumn { .. } => { + rpc_custody_column_queue.push(work, work_id, &self.log) + } + Work::RpcVerifyDataColumn(_) => { + rpc_verify_data_column_queue.push(work, work_id, &self.log) + } + Work::SamplingResult(_) => { + sampling_result_queue.push(work, work_id, &self.log) + } Work::ChainSegment { .. } => { chain_segment_queue.push(work, work_id, &self.log) } @@ -1282,9 +1353,18 @@ impl BeaconProcessor { Work::BlobsByRootsRequest { .. } => { blbroots_queue.push(work, work_id, &self.log) } + Work::DataColumnsByRootsRequest { .. } => { + dcbroots_queue.push(work, work_id, &self.log) + } + Work::DataColumnsByRangeRequest { .. } => { + dcbrange_queue.push(work, work_id, &self.log) + } Work::UnknownLightClientOptimisticUpdate { .. } => { unknown_light_client_update_queue.push(work, work_id, &self.log) } + Work::UnknownBlockSamplingRequest { .. } => { + unknown_block_sampling_request_queue.push(work, work_id, &self.log) + } Work::ApiRequestP0 { .. } => { api_request_p0_queue.push(work, work_id, &self.log) } @@ -1335,6 +1415,18 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL, rpc_blob_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_RPC_CUSTODY_COLUMN_QUEUE_TOTAL, + rpc_custody_column_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_RPC_VERIFY_DATA_COLUMN_QUEUE_TOTAL, + rpc_verify_data_column_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_SAMPLING_RESULT_QUEUE_TOTAL, + sampling_result_queue.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, chain_segment_queue.len() as i64, @@ -1463,27 +1555,32 @@ impl BeaconProcessor { Work::ChainSegment(process_fn) => task_spawner.spawn_async(async move { process_fn.await; }), - Work::UnknownBlockAttestation { process_fn } => task_spawner.spawn_blocking(process_fn), - Work::UnknownBlockAggregate { process_fn } => task_spawner.spawn_blocking(process_fn), - Work::UnknownLightClientOptimisticUpdate { - parent_root: _, - process_fn, - } => task_spawner.spawn_blocking(process_fn), + Work::UnknownBlockAttestation { process_fn } + | Work::UnknownBlockAggregate { process_fn } + | Work::UnknownLightClientOptimisticUpdate { process_fn, .. } + | Work::UnknownBlockSamplingRequest { process_fn } => { + task_spawner.spawn_blocking(process_fn) + } Work::DelayedImportBlock { beacon_block_slot: _, beacon_block_root: _, process_fn, } => task_spawner.spawn_async(process_fn), - Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } => { - task_spawner.spawn_async(process_fn) - } + Work::RpcBlock { process_fn } + | Work::RpcBlobs { process_fn } + | Work::RpcCustodyColumn(process_fn) + | Work::RpcVerifyDataColumn(process_fn) + | Work::SamplingResult(process_fn) => task_spawner.spawn_async(process_fn), Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) | Work::GossipBlobSidecar(work) | Work::GossipDataColumnSidecar(work) => task_spawner.spawn_async(async move { work.await; }), - Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) => { + Work::BlobsByRangeRequest(process_fn) + | Work::BlobsByRootsRequest(process_fn) + | Work::DataColumnsByRootsRequest(process_fn) + | Work::DataColumnsByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => { diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index 56105f1e101..8bc03cee6c7 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -133,6 +133,30 @@ pub static BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: LazyLock> = "Count of blobs from the rpc waiting to be verified.", ) }); +// Rpc custody data columns. +pub static BEACON_PROCESSOR_RPC_CUSTODY_COLUMN_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_rpc_custody_column_queue_total", + "Count of custody columns from the rpc waiting to be imported.", + ) + }); +// Rpc verify data columns +pub static BEACON_PROCESSOR_RPC_VERIFY_DATA_COLUMN_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_rpc_verify_data_column_queue_total", + "Count of data columns from the rpc waiting to be verified.", + ) + }); +// Sampling result +pub static BEACON_PROCESSOR_SAMPLING_RESULT_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_sampling_result_queue_total", + "Count of sampling results waiting to be processed.", + ) + }); // Chain segments. pub static BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: LazyLock> = LazyLock::new(|| { @@ -221,6 +245,15 @@ pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: LazyLock, +> = LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_reprocessing_queue_matched_sampling_requests", + "Number of queued sampling requests where a matching block has been imported.", + ) +}); /* * Light client update reprocessing queue metrics. @@ -238,7 +271,7 @@ pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: LazyL > = LazyLock::new(|| { try_create_int_counter( "beacon_processor_reprocessing_queue_matched_optimistic_updates", - "Number of queued light client optimistic updates where as matching block has been imported." + "Number of queued light client optimistic updates where a matching block has been imported." ) }); diff --git a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs index 137010557da..a43310ac834 100644 --- a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs @@ -50,6 +50,9 @@ pub const QUEUED_LIGHT_CLIENT_UPDATE_DELAY: Duration = Duration::from_secs(12); /// For how long to queue rpc blocks before sending them back for reprocessing. pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(4); +/// For how long to queue sampling requests for reprocessing. +pub const QUEUED_SAMPLING_REQUESTS_DELAY: Duration = Duration::from_secs(12); + /// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that /// we signature-verify blocks before putting them in the queue *should* protect against this, but /// it's nice to have extra protection. @@ -61,6 +64,10 @@ const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; /// How many light client updates we keep before new ones get dropped. const MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES: usize = 128; +/// How many sampling requests we queue before new ones get dropped. +/// TODO(das): choose a sensible value +const MAXIMUM_QUEUED_SAMPLING_REQUESTS: usize = 16_384; + // Process backfill batch 50%, 60%, 80% through each slot. // // Note: use caution to set these fractions in a way that won't cause panic-y @@ -97,6 +104,8 @@ pub enum ReprocessQueueMessage { UnknownBlockAggregate(QueuedAggregate), /// A light client optimistic update that references a parent root that has not been seen as a parent. UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), + /// A sampling request that references an unknown block. + UnknownBlockSamplingRequest(QueuedSamplingRequest), /// A new backfill batch that needs to be scheduled for processing. BackfillSync(QueuedBackfillBatch), } @@ -109,6 +118,7 @@ pub enum ReadyWork { Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), LightClientUpdate(QueuedLightClientUpdate), + SamplingRequest(QueuedSamplingRequest), BackfillSync(QueuedBackfillBatch), } @@ -133,6 +143,12 @@ pub struct QueuedLightClientUpdate { pub process_fn: BlockingFn, } +/// A sampling request for which the corresponding block is not known while processing. +pub struct QueuedSamplingRequest { + pub beacon_block_root: Hash256, + pub process_fn: BlockingFn, +} + /// A block that arrived early and has been queued for later import. pub struct QueuedGossipBlock { pub beacon_block_slot: Slot, @@ -215,6 +231,8 @@ struct ReprocessQueue { attestations_delay_queue: DelayQueue, /// Queue to manage scheduled light client updates. lc_updates_delay_queue: DelayQueue, + /// Queue to manage scheduled sampling requests + sampling_requests_delay_queue: DelayQueue, /* Queued items */ /// Queued blocks. @@ -229,6 +247,10 @@ struct ReprocessQueue { queued_lc_updates: FnvHashMap, /// Light Client Updates per parent_root. awaiting_lc_updates_per_parent_root: HashMap>, + /// Queued sampling requests. + queued_sampling_requests: FnvHashMap, + /// Sampling requests per block root. + awaiting_sampling_requests_per_block_root: HashMap>, /// Queued backfill batches queued_backfill_batches: Vec, @@ -236,15 +258,18 @@ struct ReprocessQueue { /// Next attestation id, used for both aggregated and unaggregated attestations next_attestation: usize, next_lc_update: usize, + next_sampling_request_update: usize, early_block_debounce: TimeLatch, rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, lc_update_delay_debounce: TimeLatch, + sampling_request_delay_debounce: TimeLatch, next_backfill_batch_event: Option>>, slot_clock: Arc, } pub type QueuedLightClientUpdateId = usize; +pub type QueuedSamplingRequestId = usize; #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum QueuedAttestationId { @@ -388,19 +413,24 @@ impl ReprocessQueue { rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), lc_updates_delay_queue: DelayQueue::new(), + sampling_requests_delay_queue: <_>::default(), queued_gossip_block_roots: HashSet::new(), queued_lc_updates: FnvHashMap::default(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), + queued_sampling_requests: <_>::default(), awaiting_attestations_per_root: HashMap::new(), awaiting_lc_updates_per_parent_root: HashMap::new(), + awaiting_sampling_requests_per_block_root: <_>::default(), queued_backfill_batches: Vec::new(), next_attestation: 0, next_lc_update: 0, + next_sampling_request_update: 0, early_block_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), lc_update_delay_debounce: TimeLatch::default(), + sampling_request_delay_debounce: <_>::default(), next_backfill_batch_event: None, slot_clock, } @@ -624,6 +654,35 @@ impl ReprocessQueue { self.next_lc_update += 1; } + InboundEvent::Msg(UnknownBlockSamplingRequest(queued_sampling_request)) => { + if self.sampling_requests_delay_queue.len() >= MAXIMUM_QUEUED_SAMPLING_REQUESTS { + if self.sampling_request_delay_debounce.elapsed() { + error!( + log, + "Sampling requests delay queue is full"; + "queue_size" => MAXIMUM_QUEUED_SAMPLING_REQUESTS, + ); + } + // Drop the inbound message. + return; + } + + let id: QueuedSamplingRequestId = self.next_sampling_request_update; + self.next_sampling_request_update += 1; + + // Register the delay. + let delay_key = self + .sampling_requests_delay_queue + .insert(id, QUEUED_SAMPLING_REQUESTS_DELAY); + + self.awaiting_sampling_requests_per_block_root + .entry(queued_sampling_request.beacon_block_root) + .or_default() + .push(id); + + self.queued_sampling_requests + .insert(id, (queued_sampling_request, delay_key)); + } InboundEvent::Msg(BlockImported { block_root, parent_root, @@ -685,6 +744,49 @@ impl ReprocessQueue { ); } } + // Unqueue the sampling requests we have for this root, if any. + if let Some(queued_ids) = self + .awaiting_sampling_requests_per_block_root + .remove(&block_root) + { + let mut sent_count = 0; + let mut failed_to_send_count = 0; + + for id in queued_ids { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_SAMPLING_REQUESTS, + ); + + if let Some((queued, delay_key)) = self.queued_sampling_requests.remove(&id) + { + // Remove the delay. + self.sampling_requests_delay_queue.remove(&delay_key); + + // Send the work. + let work = ReadyWork::SamplingRequest(queued); + + if self.ready_work_tx.try_send(work).is_err() { + failed_to_send_count += 1; + } else { + sent_count += 1; + } + } else { + // This should never happen. + error!(log, "Unknown sampling request for block root"; "block_root" => ?block_root, "id" => ?id); + } + } + + if failed_to_send_count > 0 { + error!( + log, + "Ignored scheduled sampling requests for block"; + "hint" => "system may be overloaded", + "block_root" => ?block_root, + "failed_count" => failed_to_send_count, + "sent_count" => sent_count, + ); + } + } } InboundEvent::Msg(NewLightClientOptimisticUpdate { parent_root }) => { // Unqueue the light client optimistic updates we have for this root, if any. diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 393ce35f000..d299eebec8e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -10,7 +10,6 @@ use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_servic use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; -use beacon_chain::LightClientProducerEvent; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::{CachingEth1Backend, Eth1Chain}, @@ -19,6 +18,7 @@ use beacon_chain::{ store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, }; +use beacon_chain::{Kzg, LightClientProducerEvent}; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; use beacon_processor::{BeaconProcessorConfig, BeaconProcessorQueueLengths}; use environment::RuntimeContext; @@ -207,6 +207,7 @@ where .beacon_graffiti(beacon_graffiti) .event_handler(event_handler) .execution_layer(execution_layer) + .import_all_data_columns(config.network.subscribe_all_data_column_subnets) .validator_monitor_config(config.validator_monitor.clone()); let builder = if let Some(slasher) = self.slasher.clone() { @@ -504,7 +505,7 @@ where deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot( config.eth1, context.log().clone(), - spec, + spec.clone(), &snapshot, ) { Ok(service) => { @@ -623,12 +624,15 @@ where }; let beacon_chain_builder = if let Some(trusted_setup) = config.trusted_setup { - let kzg = trusted_setup - .try_into() - .map(Arc::new) - .map(Some) - .map_err(|e| format!("Failed to load trusted setup: {:?}", e))?; - beacon_chain_builder.kzg(kzg) + let kzg_err_msg = |e| format!("Failed to load trusted setup: {:?}", e); + + let kzg = if spec.is_peer_das_scheduled() { + Kzg::new_from_trusted_setup_das_enabled(trusted_setup).map_err(kzg_err_msg)? + } else { + Kzg::new_from_trusted_setup(trusted_setup).map_err(kzg_err_msg)? + }; + + beacon_chain_builder.kzg(Some(Arc::new(kzg))) } else { beacon_chain_builder }; diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 6733c5bfd31..4910cfd2e1b 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -21,6 +21,7 @@ ethereum_ssz_derive = { workspace = true } tree_hash = { workspace = true } parking_lot = { workspace = true } slog = { workspace = true } +logging = { workspace = true } superstruct = { workspace = true } tokio = { workspace = true } state_processing = { workspace = true } diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs index 0ccdb4fc0eb..9c840aea210 100644 --- a/beacon_node/eth1/src/block_cache.rs +++ b/beacon_node/eth1/src/block_cache.rs @@ -195,6 +195,8 @@ impl BlockCache { #[cfg(test)] mod tests { + use types::FixedBytesExtended; + use super::*; fn get_block(i: u64, interval_secs: u64) -> Eth1Block { diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index b443f739e81..a2d4a1cf06d 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -471,7 +471,7 @@ impl DepositCache { pub mod tests { use super::*; use execution_layer::http::deposit_log::Log; - use types::{EthSpec, MainnetEthSpec}; + use types::{EthSpec, FixedBytesExtended, MainnetEthSpec}; /// The data from a deposit event, using the v0.8.3 version of the deposit contract. pub const EXAMPLE_LOG: &[u8] = &[ diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 9cc1da13826..e5d60fac49c 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -1129,7 +1129,7 @@ impl Service { Ok(BlockCacheUpdateOutcome { blocks_imported, - head_block_number: self.inner.block_cache.read().highest_block_number(), + head_block_number: block_cache.highest_block_number(), }) } } diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 0479ea7c585..3ad9b34381a 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -4,27 +4,24 @@ use eth1::{Config, Eth1Endpoint, Service}; use eth1::{DepositCache, DEFAULT_CHAIN_ID}; use eth1_test_rig::{AnvilEth1Instance, Http, Middleware, Provider}; use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log}; +use logging::test_logger; use merkle_proof::verify_merkle_proof; use sensitive_url::SensitiveUrl; -use slog::Logger; -use sloggers::{null::NullLoggerBuilder, Build}; use std::ops::Range; use std::time::Duration; use tree_hash::TreeHash; -use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature}; +use types::{ + DepositData, EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, + Signature, +}; const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32; -pub fn null_logger() -> Logger { - let log_builder = NullLoggerBuilder; - log_builder.build().expect("should build logger") -} - pub fn new_env() -> Environment { EnvironmentBuilder::minimal() .multi_threaded_tokio_runtime() .expect("should start tokio runtime") - .null_logger() + .test_logger() .expect("should start null logger") .build() .expect("should build env") @@ -103,7 +100,7 @@ mod eth1_cache { #[tokio::test] async fn simple_scenario() { async { - let log = null_logger(); + let log = test_logger(); for follow_distance in 0..3 { let eth1 = new_anvil_instance() @@ -185,7 +182,7 @@ mod eth1_cache { #[tokio::test] async fn big_skip() { async { - let log = null_logger(); + let log = test_logger(); let eth1 = new_anvil_instance() .await @@ -240,7 +237,7 @@ mod eth1_cache { #[tokio::test] async fn pruning() { async { - let log = null_logger(); + let log = test_logger(); let eth1 = new_anvil_instance() .await @@ -292,7 +289,7 @@ mod eth1_cache { #[tokio::test] async fn double_update() { async { - let log = null_logger(); + let log = test_logger(); let n = 16; @@ -345,7 +342,7 @@ mod deposit_tree { #[tokio::test] async fn updating() { async { - let log = null_logger(); + let log = test_logger(); let n = 4; @@ -426,7 +423,7 @@ mod deposit_tree { #[tokio::test] async fn double_update() { async { - let log = null_logger(); + let log = test_logger(); let n = 8; @@ -688,7 +685,7 @@ mod fast { #[tokio::test] async fn deposit_cache_query() { async { - let log = null_logger(); + let log = test_logger(); let eth1 = new_anvil_instance() .await @@ -771,7 +768,7 @@ mod persist { #[tokio::test] async fn test_persist_caches() { async { - let log = null_logger(); + let log = test_logger(); let eth1 = new_anvil_instance() .await diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 0009cd002ea..93d8086149d 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -10,14 +10,14 @@ alloy-primitives = { workspace = true } types = { workspace = true } tokio = { workspace = true } slog = { workspace = true } +logging = { workspace = true } sensitive_url = { workspace = true } reqwest = { workspace = true } ethereum_serde_utils = { workspace = true } serde_json = { workspace = true } serde = { workspace = true } warp = { workspace = true } -jsonwebtoken = "8" -environment = { workspace = true } +jsonwebtoken = "9" bytes = { workspace = true } task_executor = { workspace = true } hex = { workspace = true } @@ -51,3 +51,4 @@ eth2_network_config = { workspace = true } alloy-rlp = { workspace = true } alloy-consensus = { workspace = true } lighthouse_version = { workspace = true } +fixed_bytes = { workspace = true } diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index 10edb7b2fd4..cdc172cff47 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -77,7 +77,7 @@ mod test { use super::*; use hex::FromHex; use std::str::FromStr; - use types::{Address, Hash256, Hash64}; + use types::{Address, Hash256, Hash64, Uint256}; fn test_rlp_encoding( header: &ExecutionBlockHeader, @@ -105,15 +105,15 @@ mod test { transactions_root: Hash256::from_str("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf").unwrap(), receipts_root: Hash256::from_str("29b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9").unwrap(), logs_bloom: <[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), - difficulty: 0x020000.into(), - number: 0x01_u64.into(), - gas_limit: 0x016345785d8a0000_u64.into(), - gas_used: 0x015534_u64.into(), + difficulty: Uint256::from(0x020000), + number: Uint256::from(0x01_u64), + gas_limit: Uint256::from(0x016345785d8a0000_u64), + gas_used: Uint256::from(0x015534_u64), timestamp: 0x079e, extra_data: vec![0x42], mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(), - nonce: Hash64::zero(), - base_fee_per_gas: 0x036b_u64.into(), + nonce: Hash64::ZERO, + base_fee_per_gas: Uint256::from(0x036b_u64), withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, @@ -136,15 +136,15 @@ mod test { transactions_root: Hash256::from_str("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf").unwrap(), receipts_root: Hash256::from_str("29b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9").unwrap(), logs_bloom: <[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), - difficulty: 0x00.into(), - number: 0x01_u64.into(), - gas_limit: 0x016345785d8a0000_u64.into(), - gas_used: 0x015534_u64.into(), + difficulty: Uint256::from(0x00), + number: Uint256::from(0x01_u64), + gas_limit: Uint256::from(0x016345785d8a0000_u64), + gas_used: Uint256::from(0x015534_u64), timestamp: 0x079e, extra_data: vec![0x42], mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap(), - nonce: Hash64::zero(), - base_fee_per_gas: 0x036b_u64.into(), + nonce: Hash64::ZERO, + base_fee_per_gas: Uint256::from(0x036b_u64), withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, @@ -168,15 +168,15 @@ mod test { transactions_root: Hash256::from_str("0223f0cb35f184d2ac409e89dc0768ad738f777bd1c85d3302ca50f307180c94").unwrap(), receipts_root: Hash256::from_str("371c76821b1cc21232574604eac5349d51647eb530e2a45d4f6fe2c501351aa5").unwrap(), logs_bloom: <[u8; 256]>::from_hex("1a2c559955848d2662a0634cb40c7a6192a1524f11061203689bcbcdec901b054084d4f4d688009d24c10918e0089b48e72fe2d7abafb903889d10c3827c6901096612d259801b1b7ba1663a4201f5f88f416a9997c55bcc2c54785280143b057a008764c606182e324216822a2d5913e797a05c16cc1468d001acf3783b18e00e0203033e43106178db554029e83ca46402dc49d929d7882a04a0e7215041bdabf7430bd10ef4bb658a40f064c63c4816660241c2480862f26742fdf9ca41637731350301c344e439428182a03e384484e6d65d0c8a10117c6739ca201b60974519a1ae6b0c3966c0f650b449d10eae065dab2c83ab4edbab5efdea50bbc801").unwrap().into(), - difficulty: 0.into(), - number: 16182891.into(), - gas_limit: 0x1c9c380.into(), - gas_used: 0xe9b752.into(), + difficulty: Uint256::ZERO, + number: Uint256::from(16182891), + gas_limit: Uint256::from(0x1c9c380), + gas_used: Uint256::from(0xe9b752), timestamp: 0x6399bf63, extra_data: hex::decode("496c6c756d696e61746520446d6f63726174697a6520447374726962757465").unwrap(), mix_hash: Hash256::from_str("bf5289894b2ceab3549f92f063febbac896b280ddb18129a57cff13113c11b13").unwrap(), - nonce: Hash64::zero(), - base_fee_per_gas: 0x34187b238_u64.into(), + nonce: Hash64::ZERO, + base_fee_per_gas: Uint256::from(0x34187b238_u64), withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, @@ -198,15 +198,15 @@ mod test { transactions_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), receipts_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), logs_bloom:<[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), - difficulty: 0.into(), - number: 97.into(), - gas_limit: 27482534.into(), - gas_used: 0.into(), + difficulty: Uint256::ZERO, + number: Uint256::from(97), + gas_limit: Uint256::from(27482534), + gas_used: Uint256::ZERO, timestamp: 1692132829u64, extra_data: hex::decode("d883010d00846765746888676f312e32302e37856c696e7578").unwrap(), mix_hash: Hash256::from_str("0b493c22d2ad4ca76c77ae6ad916af429b42b1dc98fdcb8e5ddbd049bbc5d623").unwrap(), - nonce: Hash64::zero(), - base_fee_per_gas: 2374u64.into(), + nonce: Hash64::ZERO, + base_fee_per_gas: Uint256::from(2374u64), withdrawals_root: Some(Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap()), blob_gas_used: Some(0x0u64), excess_blob_gas: Some(0x0u64), @@ -228,15 +228,15 @@ mod test { transactions_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), receipts_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), logs_bloom:<[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), - difficulty: 0.into(), - number: 97.into(), - gas_limit: 27482534.into(), - gas_used: 0.into(), + difficulty: Uint256::ZERO, + number: Uint256::from(97), + gas_limit: Uint256::from(27482534), + gas_used: Uint256::ZERO, timestamp: 1692132829u64, extra_data: hex::decode("d883010d00846765746888676f312e32302e37856c696e7578").unwrap(), mix_hash: Hash256::from_str("0b493c22d2ad4ca76c77ae6ad916af429b42b1dc98fdcb8e5ddbd049bbc5d623").unwrap(), - nonce: Hash64::zero(), - base_fee_per_gas: 2374u64.into(), + nonce: Hash64::ZERO, + base_fee_per_gas: Uint256::from(2374u64), withdrawals_root: Some(Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap()), blob_gas_used: Some(0x0u64), excess_blob_gas: Some(0x0u64), diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 6a56a5d076f..8cfe6e9efde 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -2,7 +2,8 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, }; @@ -20,11 +21,11 @@ use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; use superstruct::superstruct; -use types::execution_payload::{DepositRequests, WithdrawalRequests}; +use types::execution_payload::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; pub use types::{ - Address, BeaconBlockRef, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, - ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList, - Withdrawal, Withdrawals, + Address, BeaconBlockRef, ConsolidationRequest, EthSpec, ExecutionBlockHash, ExecutionPayload, + ExecutionPayloadHeader, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, + Uint256, VariableList, Withdrawal, Withdrawals, }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, @@ -42,7 +43,7 @@ pub use new_payload_request::{ NewPayloadRequestDeneb, NewPayloadRequestElectra, }; -use self::json_structures::{JsonDepositRequest, JsonWithdrawalRequest}; +use self::json_structures::{JsonConsolidationRequest, JsonDepositRequest, JsonWithdrawalRequest}; pub const LATEST_TAG: &str = "latest"; @@ -74,6 +75,7 @@ pub enum Error { UnsupportedForkVariant(String), InvalidClientVersion(String), RlpDecoderError(rlp::DecoderError), + TooManyConsolidationRequests(usize), } impl From for Error { @@ -152,6 +154,7 @@ pub struct ExecutionBlock { pub block_hash: ExecutionBlockHash, #[serde(rename = "number", with = "serde_utils::u64_hex_be")] pub block_number: u64, + pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, #[serde(with = "serde_utils::u64_hex_be")] @@ -173,6 +176,7 @@ pub struct ExecutionBlock { pub struct ExecutionBlockWithTransactions { pub parent_hash: ExecutionBlockHash, #[serde(alias = "miner")] + #[serde(with = "serde_utils::address_hex")] pub fee_recipient: Address, pub state_root: Hash256, pub receipts_root: Hash256, @@ -206,6 +210,8 @@ pub struct ExecutionBlockWithTransactions { pub deposit_requests: Vec, #[superstruct(only(Electra))] pub withdrawal_requests: Vec, + #[superstruct(only(Electra))] + pub consolidation_requests: Vec, } impl TryFrom> for ExecutionBlockWithTransactions { @@ -323,6 +329,11 @@ impl TryFrom> for ExecutionBlockWithTransactions .into_iter() .map(|withdrawal| withdrawal.into()) .collect(), + consolidation_requests: block + .consolidation_requests + .into_iter() + .map(Into::into) + .collect(), }) } }; @@ -541,27 +552,106 @@ impl GetPayloadResponse { } } +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Clone, Debug),), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] #[derive(Clone, Debug)] -pub struct ExecutionPayloadBodyV1 { +pub struct ExecutionPayloadBody { pub transactions: Transactions, pub withdrawals: Option>, + #[superstruct(only(V2))] pub deposit_requests: Option>, + #[superstruct(only(V2))] pub withdrawal_requests: Option>, + #[superstruct(only(V2))] + pub consolidation_requests: Option>, } -impl ExecutionPayloadBodyV1 { +impl ExecutionPayloadBody { + #[allow(clippy::type_complexity)] + pub fn deconstruct( + self, + ) -> ( + Transactions, + Option>, + Option>, + Option>, + Option>, + ) { + match self { + ExecutionPayloadBody::V1(body) => { + (body.transactions, body.withdrawals, None, None, None) + } + ExecutionPayloadBody::V2(body) => ( + body.transactions, + body.withdrawals, + body.deposit_requests, + body.withdrawal_requests, + body.consolidation_requests, + ), + } + } pub fn to_payload( self, header: ExecutionPayloadHeader, ) -> Result, String> { - match header { - ExecutionPayloadHeader::Bellatrix(header) => { - if self.withdrawals.is_some() { + let header_fork = header.fork_name_unchecked(); + match &self { + Self::V1(_) => { + if header_fork.electra_enabled() { return Err(format!( - "block {} is merge but payload body has withdrawals", - header.block_hash + "block {} is {} but response is ExecutionPayloadBodyV1. Does the EL support {}?", + header.block_hash(), + header_fork, + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, )); } + } + Self::V2(_) => {} + } + + let ( + transactions, + withdrawals, + deposit_requests, + withdrawal_requests, + consolidation_requests, + ) = self.deconstruct(); + if !header_fork.capella_enabled() && withdrawals.is_some() { + return Err(format!( + "block {} is {} but payload body has withdrawals", + header.block_hash(), + header_fork + )); + } + if !header_fork.electra_enabled() { + if deposit_requests.is_some() { + return Err(format!( + "block {} is {} but payload body has deposit_requests", + header.block_hash(), + header_fork + )); + } + if withdrawal_requests.is_some() { + return Err(format!( + "block {} is {} but payload body has withdrawal_requests", + header.block_hash(), + header_fork + )); + } + if consolidation_requests.is_some() { + return Err(format!( + "block {} is {} but payload body has consolidation_requests", + header.block_hash(), + header_fork + )); + } + } + + match header { + ExecutionPayloadHeader::Bellatrix(header) => { Ok(ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { parent_hash: header.parent_hash, fee_recipient: header.fee_recipient, @@ -576,100 +666,108 @@ impl ExecutionPayloadBodyV1 { extra_data: header.extra_data, base_fee_per_gas: header.base_fee_per_gas, block_hash: header.block_hash, - transactions: self.transactions, + transactions, })) } ExecutionPayloadHeader::Capella(header) => { - if let Some(withdrawals) = self.withdrawals { - Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions: self.transactions, - withdrawals, - })) - } else { - Err(format!( - "block {} is capella but payload body doesn't have withdrawals", - header.block_hash - )) - } + let withdrawals = withdrawals.ok_or_else(|| { + format!( + "block {} is {} but payload body has withdrawals set to null", + header.block_hash, header_fork + ) + })?; + Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions, + withdrawals, + })) } ExecutionPayloadHeader::Deneb(header) => { - if let Some(withdrawals) = self.withdrawals { - Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions: self.transactions, - withdrawals, - blob_gas_used: header.blob_gas_used, - excess_blob_gas: header.excess_blob_gas, - })) - } else { - Err(format!( - "block {} is post-capella but payload body doesn't have withdrawals", - header.block_hash - )) - } + let withdrawals = withdrawals.ok_or_else(|| { + format!( + "block {} is {} but payload body has withdrawals set to null", + header.block_hash, header_fork + ) + })?; + Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) } ExecutionPayloadHeader::Electra(header) => { - let withdrawals_exist = self.withdrawals.is_some(); - let deposit_requests_exist = self.deposit_requests.is_some(); - let withdrawal_requests_exist = self.withdrawal_requests.is_some(); - if let (Some(withdrawals), Some(deposit_requests), Some(withdrawal_requests)) = ( - self.withdrawals, - self.deposit_requests, - self.withdrawal_requests, - ) { - Ok(ExecutionPayload::Electra(ExecutionPayloadElectra { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions: self.transactions, - withdrawals, - blob_gas_used: header.blob_gas_used, - excess_blob_gas: header.excess_blob_gas, - deposit_requests, - withdrawal_requests, - })) - } else { - Err(format!( - "block {} is post-electra but payload body doesn't have withdrawals/deposit_requests/withdrawal_requests \ - withdrawals: {}, deposit_requests: {}, withdrawal_requests: {}", - header.block_hash, withdrawals_exist, deposit_requests_exist, withdrawal_requests_exist - )) - } + let withdrawals = withdrawals.ok_or_else(|| { + format!( + "block {} is {} but payload body has withdrawals set to null", + header.block_hash, header_fork + ) + })?; + let deposit_requests = deposit_requests.ok_or_else(|| { + format!( + "block {} is {} but payload body has deposit_requests set to null", + header.block_hash, header_fork + ) + })?; + let withdrawal_requests = withdrawal_requests.ok_or_else(|| { + format!( + "block {} is {} but payload body has withdrawal_requests set to null", + header.block_hash, header_fork + ) + })?; + let consolidation_requests = consolidation_requests.ok_or_else(|| { + format!( + "block {} is {} but payload body has consolidation_requests set to null", + header.block_hash, header_fork + ) + })?; + Ok(ExecutionPayload::Electra(ExecutionPayloadElectra { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + deposit_requests, + withdrawal_requests, + consolidation_requests, + })) } } } @@ -686,6 +784,8 @@ pub struct EngineCapabilities { pub forkchoice_updated_v3: bool, pub get_payload_bodies_by_hash_v1: bool, pub get_payload_bodies_by_range_v1: bool, + pub get_payload_bodies_by_hash_v2: bool, + pub get_payload_bodies_by_range_v2: bool, pub get_payload_v1: bool, pub get_payload_v2: bool, pub get_payload_v3: bool, @@ -723,6 +823,12 @@ impl EngineCapabilities { if self.get_payload_bodies_by_range_v1 { response.push(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1); } + if self.get_payload_bodies_by_hash_v2 { + response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2); + } + if self.get_payload_bodies_by_range_v2 { + response.push(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2); + } if self.get_payload_v1 { response.push(ENGINE_GET_PAYLOAD_V1); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index ecaf9c6c23e..5bc1343a0eb 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -50,6 +50,8 @@ pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1"; pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; +pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2: &str = "engine_getPayloadBodiesByHashV2"; +pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2: &str = "engine_getPayloadBodiesByRangeV2"; pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; @@ -78,6 +80,8 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, ENGINE_GET_CLIENT_VERSION_V1, ]; @@ -873,7 +877,7 @@ impl HttpJsonRpc { // Set the V1 payload values from the EE to be zero. This simulates // the pre-block-value functionality of always choosing the builder // block. - block_value: Uint256::zero(), + block_value: Uint256::ZERO, })) } @@ -1035,7 +1039,7 @@ impl HttpJsonRpc { pub async fn get_payload_bodies_by_hash_v1( &self, block_hashes: Vec, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let params = json!([block_hashes]); let response: Vec>> = self @@ -1048,7 +1052,27 @@ impl HttpJsonRpc { Ok(response .into_iter() - .map(|opt_json| opt_json.map(From::from)) + .map(|opt_json| opt_json.map(|v1| JsonExecutionPayloadBody::V1(v1).into())) + .collect()) + } + + pub async fn get_payload_bodies_by_hash_v2( + &self, + block_hashes: Vec, + ) -> Result>>, Error> { + let params = json!([block_hashes]); + + let response: Vec>> = self + .rpc_request( + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, + params, + ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response + .into_iter() + .map(|opt_json| opt_json.map(|v2| JsonExecutionPayloadBody::V2(v2).into())) .collect()) } @@ -1056,7 +1080,7 @@ impl HttpJsonRpc { &self, start: u64, count: u64, - ) -> Result>>, Error> { + ) -> Result>>, Error> { #[derive(Serialize)] #[serde(transparent)] struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); @@ -1072,7 +1096,31 @@ impl HttpJsonRpc { Ok(response .into_iter() - .map(|opt_json| opt_json.map(From::from)) + .map(|opt_json| opt_json.map(|v1| JsonExecutionPayloadBody::V1(v1).into())) + .collect()) + } + + pub async fn get_payload_bodies_by_range_v2( + &self, + start: u64, + count: u64, + ) -> Result>>, Error> { + #[derive(Serialize)] + #[serde(transparent)] + struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); + + let params = json!([Quantity(start), Quantity(count)]); + let response: Vec>> = self + .rpc_request( + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, + params, + ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response + .into_iter() + .map(|opt_json| opt_json.map(|v2| JsonExecutionPayloadBody::V2(v2).into())) .collect()) } @@ -1099,6 +1147,10 @@ impl HttpJsonRpc { .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1), get_payload_bodies_by_range_v1: capabilities .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), + get_payload_bodies_by_hash_v2: capabilities + .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2), + get_payload_bodies_by_range_v2: capabilities + .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2), get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), @@ -1274,6 +1326,39 @@ impl HttpJsonRpc { } } + pub async fn get_payload_bodies_by_hash( + &self, + block_hashes: Vec, + ) -> Result>>, Error> { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.get_payload_bodies_by_hash_v2 { + self.get_payload_bodies_by_hash_v2(block_hashes).await + } else if engine_capabilities.get_payload_bodies_by_hash_v1 { + self.get_payload_bodies_by_hash_v1(block_hashes).await + } else { + Err(Error::RequiredMethodUnsupported( + "engine_getPayloadBodiesByHash", + )) + } + } + + pub async fn get_payload_bodies_by_range( + &self, + start: u64, + count: u64, + ) -> Result>>, Error> { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.get_payload_bodies_by_range_v2 { + self.get_payload_bodies_by_range_v2(start, count).await + } else if engine_capabilities.get_payload_bodies_by_range_v1 { + self.get_payload_bodies_by_range_v1(start, count).await + } else { + Err(Error::RequiredMethodUnsupported( + "engine_getPayloadBodiesByRange", + )) + } + } + // automatically selects the latest version of // forkchoice_updated that the execution engine supports pub async fn forkchoice_updated( @@ -1329,7 +1414,7 @@ mod test { use std::future::Future; use std::str::FromStr; use std::sync::Arc; - use types::{MainnetEthSpec, Unsigned}; + use types::{FixedBytesExtended, MainnetEthSpec, Unsigned}; struct Tester { server: MockServer, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index f654ba4a0ea..dbf889bbc8c 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -5,10 +5,7 @@ use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; -use types::{ - DepositRequest, ExecutionLayerWithdrawalRequest, FixedVector, PublicKeyBytes, Signature, - Unsigned, -}; +use types::{DepositRequest, FixedVector, PublicKeyBytes, Signature, Unsigned, WithdrawalRequest}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -76,6 +73,7 @@ pub struct JsonPayloadIdResponse { #[serde(bound = "E: EthSpec", rename_all = "camelCase", untagged)] pub struct JsonExecutionPayload { pub parent_hash: ExecutionBlockHash, + #[serde(with = "serde_utils::address_hex")] pub fee_recipient: Address, pub state_root: Hash256, pub receipts_root: Hash256, @@ -94,6 +92,7 @@ pub struct JsonExecutionPayload { pub extra_data: VariableList, #[serde(with = "serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, + pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, @@ -106,11 +105,13 @@ pub struct JsonExecutionPayload { #[serde(with = "serde_utils::u64_hex_be")] pub excess_blob_gas: u64, #[superstruct(only(V4))] - // TODO(electra): Field name should be changed post devnet-0. See https://github.com/ethereum/execution-apis/pull/544 pub deposit_requests: VariableList, #[superstruct(only(V4))] pub withdrawal_requests: VariableList, + #[superstruct(only(V4))] + pub consolidation_requests: + VariableList, } impl From> for JsonExecutionPayloadV1 { @@ -225,6 +226,12 @@ impl From> for JsonExecutionPayloadV4 .map(Into::into) .collect::>() .into(), + consolidation_requests: payload + .consolidation_requests + .into_iter() + .map(Into::into) + .collect::>() + .into(), } } } @@ -353,6 +360,12 @@ impl From> for ExecutionPayloadElectra .map(Into::into) .collect::>() .into(), + consolidation_requests: payload + .consolidation_requests + .into_iter() + .map(Into::into) + .collect::>() + .into(), } } } @@ -438,6 +451,7 @@ pub struct JsonWithdrawal { pub index: u64, #[serde(with = "serde_utils::u64_hex_be")] pub validator_index: u64, + #[serde(with = "serde_utils::address_hex")] pub address: Address, #[serde(with = "serde_utils::u64_hex_be")] pub amount: u64, @@ -477,7 +491,7 @@ impl<'a> From<&'a JsonWithdrawal> for EncodableJsonWithdrawal<'a> { Self { index: json_withdrawal.index, validator_index: json_withdrawal.validator_index, - address: json_withdrawal.address.as_bytes(), + address: json_withdrawal.address.as_slice(), amount: json_withdrawal.amount, } } @@ -498,6 +512,7 @@ pub struct JsonPayloadAttributes { #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, pub prev_randao: Hash256, + #[serde(with = "serde_utils::address_hex")] pub suggested_fee_recipient: Address, #[superstruct(only(V2, V3))] pub withdrawals: Vec, @@ -587,7 +602,9 @@ impl From> for BlobsBundle { #[serde(rename_all = "camelCase")] pub struct JsonForkchoiceStateV1 { pub head_block_hash: ExecutionBlockHash, + pub safe_block_hash: ExecutionBlockHash, + pub finalized_block_hash: ExecutionBlockHash, } @@ -737,45 +754,71 @@ impl From for JsonForkchoiceUpdatedV1Response { } } -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "E: EthSpec")] -#[serde(rename_all = "camelCase")] -pub struct JsonExecutionPayloadBodyV1 { +#[superstruct( + variants(V1, V2), + variant_attributes( + derive(Clone, Debug, Serialize, Deserialize), + serde(bound = "E: EthSpec", rename_all = "camelCase"), + ), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Clone, Debug, Serialize)] +#[serde(bound = "E: EthSpec", rename_all = "camelCase", untagged)] +pub struct JsonExecutionPayloadBody { #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, pub withdrawals: Option>, + #[superstruct(only(V2))] pub deposit_requests: Option>, + #[superstruct(only(V2))] pub withdrawal_requests: Option>, -} - -impl From> for ExecutionPayloadBodyV1 { - fn from(value: JsonExecutionPayloadBodyV1) -> Self { - Self { - transactions: value.transactions, - withdrawals: value.withdrawals.map(|json_withdrawals| { - Withdrawals::::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) + #[superstruct(only(V2))] + pub consolidation_requests: + Option>, +} + +impl From> for ExecutionPayloadBody { + fn from(value: JsonExecutionPayloadBody) -> Self { + match value { + JsonExecutionPayloadBody::V1(body_v1) => Self::V1(ExecutionPayloadBodyV1 { + transactions: body_v1.transactions, + withdrawals: body_v1.withdrawals.map(|json_withdrawals| { + Withdrawals::::from( + json_withdrawals + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), }), - deposit_requests: value.deposit_requests.map(|json_receipts| { - DepositRequests::::from( - json_receipts - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - withdrawal_requests: value.withdrawal_requests.map(|json_withdrawal_requests| { - WithdrawalRequests::::from( - json_withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) + JsonExecutionPayloadBody::V2(body_v2) => Self::V2(ExecutionPayloadBodyV2 { + transactions: body_v2.transactions, + withdrawals: body_v2.withdrawals.map(|json_withdrawals| { + Withdrawals::::from( + json_withdrawals + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + deposit_requests: body_v2.deposit_requests.map(|json_receipts| { + DepositRequests::::from( + json_receipts + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + withdrawal_requests: body_v2.withdrawal_requests.map(|json_withdrawal_requests| { + WithdrawalRequests::::from( + json_withdrawal_requests + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + consolidation_requests: body_v2.consolidation_requests, }), } } @@ -786,6 +829,7 @@ impl From> for ExecutionPayloadBodyV1< pub struct TransitionConfigurationV1 { #[serde(with = "serde_utils::u256_hex_be")] pub terminal_total_difficulty: Uint256, + pub terminal_block_hash: ExecutionBlockHash, #[serde(with = "serde_utils::u64_hex_be")] pub terminal_block_number: u64, @@ -896,27 +940,55 @@ impl From for DepositRequest { #[serde(rename_all = "camelCase")] pub struct JsonWithdrawalRequest { pub source_address: Address, - pub validator_public_key: PublicKeyBytes, + pub validator_pubkey: PublicKeyBytes, #[serde(with = "serde_utils::u64_hex_be")] pub amount: u64, } -impl From for JsonWithdrawalRequest { - fn from(withdrawal_request: ExecutionLayerWithdrawalRequest) -> Self { +impl From for JsonWithdrawalRequest { + fn from(withdrawal_request: WithdrawalRequest) -> Self { Self { source_address: withdrawal_request.source_address, - validator_public_key: withdrawal_request.validator_pubkey, + validator_pubkey: withdrawal_request.validator_pubkey, amount: withdrawal_request.amount, } } } -impl From for ExecutionLayerWithdrawalRequest { +impl From for WithdrawalRequest { fn from(json_withdrawal_request: JsonWithdrawalRequest) -> Self { Self { source_address: json_withdrawal_request.source_address, - validator_pubkey: json_withdrawal_request.validator_public_key, + validator_pubkey: json_withdrawal_request.validator_pubkey, amount: json_withdrawal_request.amount, } } } + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct JsonConsolidationRequest { + pub source_address: Address, + pub source_pubkey: PublicKeyBytes, + pub target_pubkey: PublicKeyBytes, +} + +impl From for JsonConsolidationRequest { + fn from(consolidation_request: ConsolidationRequest) -> Self { + Self { + source_address: consolidation_request.source_address, + source_pubkey: consolidation_request.source_pubkey, + target_pubkey: consolidation_request.target_pubkey, + } + } +} + +impl From for ConsolidationRequest { + fn from(json_consolidation_request: JsonConsolidationRequest) -> Self { + Self { + source_address: json_consolidation_request.source_address, + source_pubkey: json_consolidation_request.source_pubkey, + target_pubkey: json_consolidation_request.target_pubkey, + } + } +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index eaa739d7a5d..6e3aca39594 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -18,6 +18,7 @@ pub use engines::{EngineState, ForkchoiceState}; use eth2::types::FullPayloadContents; use eth2::types::{builder_bid::SignedBuilderBid, BlobsBundle, ForkVersionedResponse}; use ethers_core::types::Transaction as EthersTransaction; +use fixed_bytes::UintExtended; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -1132,9 +1133,8 @@ impl ExecutionLayer { let relay_value = *relay.data.message.value(); let boosted_relay_value = match builder_boost_factor { - Some(builder_boost_factor) => { - (relay_value / 100).saturating_mul(builder_boost_factor.into()) - } + Some(builder_boost_factor) => (relay_value / Uint256::from(100)) + .saturating_mul(Uint256::from(builder_boost_factor)), None => relay_value, }; @@ -1771,10 +1771,10 @@ impl ExecutionLayer { pub async fn get_payload_bodies_by_hash( &self, hashes: Vec, - ) -> Result>>, Error> { + ) -> Result>>, Error> { self.engine() .request(|engine: &Engine| async move { - engine.api.get_payload_bodies_by_hash_v1(hashes).await + engine.api.get_payload_bodies_by_hash(hashes).await }) .await .map_err(Box::new) @@ -1785,14 +1785,11 @@ impl ExecutionLayer { &self, start: u64, count: u64, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE); self.engine() .request(|engine: &Engine| async move { - engine - .api - .get_payload_bodies_by_range_v1(start, count) - .await + engine.api.get_payload_bodies_by_range(start, count).await }) .await .map_err(Box::new) @@ -2010,6 +2007,15 @@ impl ExecutionLayer { .collect(), ) .map_err(ApiError::DeserializeWithdrawalRequests)?; + let n_consolidations = electra_block.consolidation_requests.len(); + let consolidation_requests = VariableList::new( + electra_block + .consolidation_requests + .into_iter() + .map(Into::into) + .collect::>(), + ) + .map_err(|_| ApiError::TooManyConsolidationRequests(n_consolidations))?; ExecutionPayload::Electra(ExecutionPayloadElectra { parent_hash: electra_block.parent_hash, fee_recipient: electra_block.fee_recipient, @@ -2030,6 +2036,7 @@ impl ExecutionLayer { excess_blob_gas: electra_block.excess_blob_gas, deposit_requests, withdrawal_requests, + consolidation_requests, }) } }; @@ -2189,15 +2196,11 @@ fn verify_builder_bid( let is_signature_valid = bid.data.verify_signature(spec); let header = &bid.data.message.header(); - // Avoid logging values that we can't represent with our Prometheus library. - let payload_value_gwei = bid.data.message.value() / 1_000_000_000; - if payload_value_gwei <= Uint256::from(i64::MAX) { - metrics::set_gauge_vec( - &metrics::EXECUTION_LAYER_PAYLOAD_BIDS, - &[metrics::BUILDER], - payload_value_gwei.low_u64() as i64, - ); - } + metrics::set_gauge_vec( + &metrics::EXECUTION_LAYER_PAYLOAD_BIDS, + &[metrics::BUILDER], + bid.data.message.value().to_i64(), + ); let expected_withdrawals_root = payload_attributes .withdrawals() diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 8619e24a238..041b31e2b08 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -23,7 +23,8 @@ use tree_hash_derive::TreeHash; use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, - ExecutionPayloadHeader, ForkName, Hash256, Transaction, Transactions, Uint256, + ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, Transaction, Transactions, + Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; @@ -107,7 +108,9 @@ impl Block { #[serde(rename_all = "camelCase")] pub struct PoWBlock { pub block_number: u64, + pub block_hash: ExecutionBlockHash, + pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, pub timestamp: u64, @@ -581,7 +584,7 @@ impl ExecutionBlockGenerator { gas_used: GAS_USED, timestamp: pa.timestamp, extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), + base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), }), @@ -598,7 +601,7 @@ impl ExecutionBlockGenerator { gas_used: GAS_USED, timestamp: pa.timestamp, extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), + base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), }), @@ -614,7 +617,7 @@ impl ExecutionBlockGenerator { gas_used: GAS_USED, timestamp: pa.timestamp, extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), + base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), withdrawals: pa.withdrawals.clone().into(), @@ -634,7 +637,7 @@ impl ExecutionBlockGenerator { gas_used: GAS_USED, timestamp: pa.timestamp, extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), + base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), withdrawals: pa.withdrawals.clone().into(), @@ -653,14 +656,16 @@ impl ExecutionBlockGenerator { gas_used: GAS_USED, timestamp: pa.timestamp, extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), + base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), withdrawals: pa.withdrawals.clone().into(), blob_gas_used: 0, excess_blob_gas: 0, + // TODO(electra): consider how to test these fields below deposit_requests: vec![].into(), withdrawal_requests: vec![].into(), + consolidation_requests: vec![].into(), }), _ => unreachable!(), }, @@ -878,7 +883,7 @@ mod test { const DIFFICULTY_INCREMENT: u64 = 1; let mut generator: ExecutionBlockGenerator = ExecutionBlockGenerator::new( - TERMINAL_DIFFICULTY.into(), + Uint256::from(TERMINAL_DIFFICULTY), TERMINAL_BLOCK, ExecutionBlockHash::zero(), None, @@ -907,7 +912,7 @@ mod test { assert_eq!( block.total_difficulty().unwrap(), - (i * DIFFICULTY_INCREMENT).into() + Uint256::from(i * DIFFICULTY_INCREMENT) ); assert_eq!(generator.block_by_hash(block.block_hash()).unwrap(), block); diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 0dc7a7759c5..81c69caf829 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -330,14 +330,14 @@ pub async fn handle_rpc( JsonExecutionPayload::V1(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV1 { execution_payload, - block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), }) .unwrap() } JsonExecutionPayload::V2(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV2 { execution_payload, - block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), }) .unwrap() } @@ -350,7 +350,7 @@ pub async fn handle_rpc( JsonExecutionPayload::V3(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV3 { execution_payload, - block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), blobs_bundle: maybe_blobs .ok_or(( "No blobs returned despite V3 Payload".to_string(), @@ -367,7 +367,7 @@ pub async fn handle_rpc( JsonExecutionPayload::V4(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV4 { execution_payload, - block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), blobs_bundle: maybe_blobs .ok_or(( "No blobs returned despite V4 Payload".to_string(), @@ -589,6 +589,65 @@ pub async fn handle_rpc( .withdrawals() .ok() .map(|withdrawals| VariableList::from(withdrawals.clone())), + })); + } + None => response.push(None), + } + } + + Ok(serde_json::to_value(response).unwrap()) + } + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2 => { + #[derive(Deserialize)] + #[serde(transparent)] + struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] pub u64); + + let start = get_param::(params, 0) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + .0; + let count = get_param::(params, 1) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + .0; + + let mut response = vec![]; + for block_num in start..(start + count) { + let maybe_block = ctx + .execution_block_generator + .read() + .execution_block_with_txs_by_number(block_num); + + match maybe_block { + Some(block) => { + let transactions = Transactions::::new( + block + .transactions() + .iter() + .map(|transaction| VariableList::new(transaction.rlp().to_vec())) + .collect::>() + .map_err(|e| { + ( + format!("failed to deserialize transaction: {:?}", e), + GENERIC_ERROR_CODE, + ) + })?, + ) + .map_err(|e| { + ( + format!("failed to deserialize transactions: {:?}", e), + GENERIC_ERROR_CODE, + ) + })?; + + // TODO(electra): add testing for: + // deposit_requests + // withdrawal_requests + // consolidation_requests + response.push(Some(JsonExecutionPayloadBodyV2:: { + transactions, + withdrawals: block + .withdrawals() + .ok() + .map(|withdrawals| VariableList::from(withdrawals.clone())), deposit_requests: block.deposit_requests().ok().map( |deposit_requests| VariableList::from(deposit_requests.clone()), ), @@ -597,6 +656,17 @@ pub async fn handle_rpc( VariableList::from(withdrawal_requests.clone()) }, ), + consolidation_requests: block.consolidation_requests().ok().map( + |consolidation_requests| { + VariableList::from( + consolidation_requests + .clone() + .into_iter() + .map(Into::into) + .collect::>(), + ) + }, + ), })); } None => response.push(None), diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index c9ae1e60cdc..46830256b09 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -20,9 +20,9 @@ use types::builder_bid::{ }; use types::{ Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, ForkName, ForkVersionedResponse, Hash256, PublicKeyBytes, - Signature, SignedBlindedBeaconBlock, SignedRoot, SignedValidatorRegistrationData, Slot, - Uint256, + ExecutionPayloadHeaderRefMut, FixedBytesExtended, ForkName, ForkVersionedResponse, Hash256, + PublicKeyBytes, Signature, SignedBlindedBeaconBlock, SignedRoot, + SignedValidatorRegistrationData, Slot, Uint256, }; use types::{ExecutionBlockHash, SecretKey}; use warp::{Filter, Rejection}; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index da9b2817f69..a9f1313e462 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -4,10 +4,10 @@ use crate::{ }, *, }; -use keccak_hash::H256; +use alloy_primitives::B256 as H256; use kzg::Kzg; use tempfile::NamedTempFile; -use types::MainnetEthSpec; +use types::{FixedBytesExtended, MainnetEthSpec}; pub struct MockExecutionLayer { pub server: MockServer, @@ -19,7 +19,7 @@ pub struct MockExecutionLayer { impl MockExecutionLayer { pub fn default_params(executor: TaskExecutor) -> Self { let mut spec = MainnetEthSpec::default_spec(); - spec.terminal_total_difficulty = DEFAULT_TERMINAL_DIFFICULTY.into(); + spec.terminal_total_difficulty = Uint256::from(DEFAULT_TERMINAL_DIFFICULTY); spec.terminal_block_hash = ExecutionBlockHash::zero(); spec.terminal_block_hash_activation_epoch = Epoch::new(0); Self::new( diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 6fd853975d1..fe847ec3eda 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -6,10 +6,10 @@ use crate::engine_api::{ }; use crate::json_structures::JsonClientVersionV1; use bytes::Bytes; -use environment::null_logger; use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; use kzg::Kzg; +use logging::test_logger; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -47,7 +47,9 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { forkchoice_updated_v2: true, forkchoice_updated_v3: true, get_payload_bodies_by_hash_v1: true, + get_payload_bodies_by_hash_v2: true, get_payload_bodies_by_range_v1: true, + get_payload_bodies_by_range_v2: true, get_payload_v1: true, get_payload_v2: true, get_payload_v3: true, @@ -86,7 +88,7 @@ impl Default for MockExecutionConfig { fn default() -> Self { Self { jwt_key: JwtKey::random(), - terminal_difficulty: DEFAULT_TERMINAL_DIFFICULTY.into(), + terminal_difficulty: Uint256::from(DEFAULT_TERMINAL_DIFFICULTY), terminal_block: DEFAULT_TERMINAL_BLOCK, terminal_block_hash: ExecutionBlockHash::zero(), server_config: Config::default(), @@ -109,7 +111,7 @@ impl MockServer { Self::new( &runtime::Handle::current(), JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), - DEFAULT_TERMINAL_DIFFICULTY.into(), + Uint256::from(DEFAULT_TERMINAL_DIFFICULTY), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), None, // FIXME(capella): should this be the default? @@ -149,7 +151,7 @@ impl MockServer { let ctx: Arc> = Arc::new(Context { config: server_config, jwt_key, - log: null_logger().unwrap(), + log: test_logger(), last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), previous_request: <_>::default(), diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 9a4f85c064c..3347f6c6c2a 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -13,7 +13,7 @@ use std::sync::{ }; use std::time::Duration; use tokio::time::sleep; -use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256}; +use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, FixedBytesExtended, Hash256}; /// The number of blocks that are pulled per request whilst waiting for genesis. const BLOCKS_PER_GENESIS_POLL: usize = 99; diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index 4c78b8efd8f..90c4ad6e665 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -180,12 +180,12 @@ mod test { for v in state.validators() { let creds = v.withdrawal_credentials; assert_eq!( - creds.as_bytes()[0], + creds.as_slice()[0], spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" ); assert_eq!( - &creds.as_bytes()[1..], + &creds.as_slice()[1..], &hash(&v.pubkey.as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) @@ -242,7 +242,7 @@ mod test { for (index, v) in state.validators().iter().enumerate() { let withdrawal_credientials = v.withdrawal_credentials; - let creds = withdrawal_credientials.as_bytes(); + let creds = withdrawal_credientials.as_slice(); if index % 2 == 0 { assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 1252e0100b6..b5c6d85afeb 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -6,13 +6,15 @@ use genesis::{Eth1Config, Eth1GenesisService}; use sensitive_url::SensitiveUrl; use state_processing::is_valid_genesis_state; use std::time::Duration; -use types::{test_utils::generate_deterministic_keypair, Hash256, MinimalEthSpec}; +use types::{ + test_utils::generate_deterministic_keypair, FixedBytesExtended, Hash256, MinimalEthSpec, +}; pub fn new_env() -> Environment { EnvironmentBuilder::minimal() .multi_threaded_tokio_runtime() .expect("should start tokio runtime") - .null_logger() + .test_logger() .expect("should start null logger") .build() .expect("should build env") diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 2c54c1375a0..f3779f0e4ac 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -42,12 +42,13 @@ sensitive_url = { workspace = true } store = { workspace = true } bytes = { workspace = true } beacon_processor = { workspace = true } +rand = { workspace = true } [dev-dependencies] -environment = { workspace = true } serde_json = { workspace = true } proto_array = { workspace = true } genesis = { workspace = true } +logging = { workspace = true } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 45fc651f05c..dba8eb1ef32 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -5,7 +5,10 @@ use eth2::types::BlockId as CoreBlockId; use std::fmt; use std::str::FromStr; use std::sync::Arc; -use types::{BlobSidecarList, EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; +use types::{ + BlobSidecarList, EthSpec, FixedBytesExtended, Hash256, SignedBeaconBlock, + SignedBlindedBeaconBlock, Slot, +}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -123,6 +126,15 @@ impl BlockId { } } + pub fn blinded_block_by_root( + root: &Hash256, + chain: &BeaconChain, + ) -> Result>, warp::Rejection> { + chain + .get_blinded_block(root) + .map_err(warp_utils::reject::beacon_chain_error) + } + /// Return the `SignedBeaconBlock` identified by `self`. pub fn blinded_block( &self, @@ -149,38 +161,32 @@ impl BlockId { } CoreBlockId::Slot(slot) => { let (root, execution_optimistic, finalized) = self.root(chain)?; - chain - .get_blinded_block(&root) - .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|block_opt| match block_opt { - Some(block) => { - if block.slot() != *slot { - return Err(warp_utils::reject::custom_not_found(format!( - "slot {} was skipped", - slot - ))); - } - Ok((block, execution_optimistic, finalized)) + BlockId::blinded_block_by_root(&root, chain).and_then(|block_opt| match block_opt { + Some(block) => { + if block.slot() != *slot { + return Err(warp_utils::reject::custom_not_found(format!( + "slot {} was skipped", + slot + ))); } - None => Err(warp_utils::reject::custom_not_found(format!( - "beacon block with root {}", - root - ))), - }) + Ok((block, execution_optimistic, finalized)) + } + None => Err(warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + ))), + }) } _ => { let (root, execution_optimistic, finalized) = self.root(chain)?; - let block = chain - .get_blinded_block(&root) - .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|root_opt| { - root_opt.ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "beacon block with root {}", - root - )) - }) - })?; + let block = BlockId::blinded_block_by_root(&root, chain).and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + }) + })?; Ok((block, execution_optimistic, finalized)) } } @@ -252,23 +258,47 @@ impl BlockId { } } - /// Return the `BlobSidecarList` identified by `self`. - pub fn blob_sidecar_list( - &self, - chain: &BeaconChain, - ) -> Result, warp::Rejection> { - let root = self.root(chain)?.0; - chain - .get_blobs(&root) - .map_err(warp_utils::reject::beacon_chain_error) - } - - pub fn blob_sidecar_list_filtered( + #[allow(clippy::type_complexity)] + pub fn get_blinded_block_and_blob_list_filtered( &self, indices: BlobIndicesQuery, chain: &BeaconChain, - ) -> Result, warp::Rejection> { - let blob_sidecar_list = self.blob_sidecar_list(chain)?; + ) -> Result< + ( + SignedBlindedBeaconBlock, + BlobSidecarList, + ExecutionOptimistic, + Finalized, + ), + warp::Rejection, + > { + let (root, execution_optimistic, finalized) = self.root(chain)?; + let block = BlockId::blinded_block_by_root(&root, chain)?.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("beacon block with root {}", root)) + })?; + + // Error if the block is pre-Deneb and lacks blobs. + let blob_kzg_commitments = block.message().body().blob_kzg_commitments().map_err(|_| { + warp_utils::reject::custom_bad_request( + "block is pre-Deneb and has no blobs".to_string(), + ) + })?; + + // Return the `BlobSidecarList` identified by `self`. + let blob_sidecar_list = if !blob_kzg_commitments.is_empty() { + chain + .store + .get_blobs(&root) + .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "no blobs stored for block {root}" + )) + })? + } else { + BlobSidecarList::default() + }; + let blob_sidecar_list_filtered = match indices.indices { Some(vec) => { let list = blob_sidecar_list @@ -280,7 +310,12 @@ impl BlockId { } None => blob_sidecar_list, }; - Ok(blob_sidecar_list_filtered) + Ok(( + block, + blob_sidecar_list_filtered, + execution_optimistic, + finalized, + )) } } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index f98f4493964..22e9931043e 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -13,6 +13,7 @@ mod block_rewards; mod build_block_contents; mod builder_states; mod database; +mod light_client; mod metrics; mod produce_block; mod proposer_duties; @@ -30,6 +31,7 @@ mod validator_inclusion; mod validators; mod version; +use crate::light_client::get_light_client_updates; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use crate::version::fork_versioned_response; use beacon_chain::{ @@ -44,8 +46,8 @@ use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, - PublishBlockRequest, ValidatorBalancesRequestBody, ValidatorId, ValidatorStatus, - ValidatorsRequestBody, + LightClientUpdatesQuery, PublishBlockRequest, ValidatorBalancesRequestBody, ValidatorId, + ValidatorStatus, ValidatorsRequestBody, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; @@ -712,7 +714,14 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>, query_res: Result| { - task_spawner.blocking_json_task(Priority::P1, move || { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { let query = query_res?; crate::validators::get_beacon_state_validators( state_id, @@ -735,7 +744,14 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>, query: ValidatorsRequestBody| { - task_spawner.blocking_json_task(Priority::P1, move || { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { crate::validators::get_beacon_state_validators( state_id, chain, @@ -761,7 +777,14 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>, validator_id: ValidatorId| { - task_spawner.blocking_json_task(Priority::P1, move || { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { let (data, execution_optimistic, finalized) = state_id .map_state_and_execution_optimistic_and_finalized( &chain, @@ -1261,12 +1284,14 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) + .and(network_globals.clone()) .and(log_filter.clone()) .then( move |block_contents: PublishBlockRequest, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, + network_globals: Arc>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( @@ -1277,6 +1302,7 @@ pub fn serve( log, BroadcastValidation::default(), duplicate_block_status_code, + network_globals, ) .await }) @@ -1292,6 +1318,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) + .and(network_globals.clone()) .and(log_filter.clone()) .then( move |block_bytes: Bytes, @@ -1299,6 +1326,7 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, + network_globals: Arc>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block_contents = PublishBlockRequest::::from_ssz_bytes( @@ -1316,6 +1344,7 @@ pub fn serve( log, BroadcastValidation::default(), duplicate_block_status_code, + network_globals, ) .await }) @@ -1331,6 +1360,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) + .and(network_globals.clone()) .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, @@ -1338,6 +1368,7 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, + network_globals: Arc>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( @@ -1348,6 +1379,7 @@ pub fn serve( log, validation_level.broadcast_validation, duplicate_block_status_code, + network_globals, ) .await }) @@ -1364,6 +1396,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) + .and(network_globals.clone()) .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, @@ -1372,6 +1405,7 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, + network_globals: Arc>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block_contents = PublishBlockRequest::::from_ssz_bytes( @@ -1389,6 +1423,7 @@ pub fn serve( log, validation_level.broadcast_validation, duplicate_block_status_code, + network_globals, ) .await }) @@ -1408,12 +1443,14 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) + .and(network_globals.clone()) .and(log_filter.clone()) .then( move |block_contents: Arc>, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, + network_globals: Arc>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( @@ -1423,6 +1460,7 @@ pub fn serve( log, BroadcastValidation::default(), duplicate_block_status_code, + network_globals, ) .await }) @@ -1438,12 +1476,14 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) + .and(network_globals.clone()) .and(log_filter.clone()) .then( move |block_bytes: Bytes, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, + network_globals: Arc>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block = SignedBlindedBeaconBlock::::from_ssz_bytes( @@ -1461,6 +1501,7 @@ pub fn serve( log, BroadcastValidation::default(), duplicate_block_status_code, + network_globals, ) .await }) @@ -1476,6 +1517,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) + .and(network_globals.clone()) .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, @@ -1483,6 +1525,7 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, + network_globals: Arc>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( @@ -1492,6 +1535,7 @@ pub fn serve( log, validation_level.broadcast_validation, duplicate_block_status_code, + network_globals, ) .await }) @@ -1507,6 +1551,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) + .and(network_globals.clone()) .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, @@ -1514,6 +1559,7 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, + network_globals: Arc>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block = SignedBlindedBeaconBlock::::from_ssz_bytes( @@ -1531,6 +1577,7 @@ pub fn serve( log, validation_level.broadcast_validation, duplicate_block_status_code, + network_globals, ) .await }) @@ -1734,8 +1781,12 @@ pub fn serve( accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { let indices = indices_res?; - let blob_sidecar_list_filtered = - block_id.blob_sidecar_list_filtered(indices, &chain)?; + let (block, blob_sidecar_list_filtered, execution_optimistic, finalized) = + block_id.get_blinded_block_and_blob_list_filtered(indices, &chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) @@ -1747,11 +1798,19 @@ pub fn serve( e )) }), - _ => Ok(warp::reply::json(&api_types::GenericResponse::from( - blob_sidecar_list_filtered, - )) - .into_response()), + _ => { + // Post as a V2 endpoint so we return the fork version. + let res = execution_optimistic_finalized_fork_versioned_response( + V2, + fork_name, + execution_optimistic, + finalized, + &blob_sidecar_list_filtered, + )?; + Ok(warp::reply::json(&res).into_response()) + } } + .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); @@ -2484,6 +2543,25 @@ pub fn serve( }, ); + // GET beacon/light_client/updates + let get_beacon_light_client_updates = beacon_light_client_path + .clone() + .and(task_spawner_filter.clone()) + .and(warp::path("updates")) + .and(warp::path::end()) + .and(warp::query::()) + .and(warp::header::optional::("accept")) + .then( + |chain: Arc>, + task_spawner: TaskSpawner, + query: LightClientUpdatesQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + get_light_client_updates::(chain, query, accept_header) + }) + }, + ); + /* * beacon/rewards */ @@ -4640,6 +4718,10 @@ pub fn serve( enable(ctx.config.enable_light_client_server) .and(get_beacon_light_client_bootstrap), ) + .uor( + enable(ctx.config.enable_light_client_server) + .and(get_beacon_light_client_updates), + ) .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs new file mode 100644 index 00000000000..a6543114b85 --- /dev/null +++ b/beacon_node/http_api/src/light_client.rs @@ -0,0 +1,143 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::{ + self as api_types, ChainSpec, ForkVersionedResponse, LightClientUpdate, + LightClientUpdateResponseChunk, LightClientUpdateSszResponse, LightClientUpdatesQuery, +}; +use ssz::Encode; +use std::sync::Arc; +use warp::{ + hyper::{Body, Response}, + reply::Reply, + Rejection, +}; + +use crate::version::{add_ssz_content_type_header, fork_versioned_response, V1}; + +const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; + +pub fn get_light_client_updates( + chain: Arc>, + query: LightClientUpdatesQuery, + accept_header: Option, +) -> Result, Rejection> { + validate_light_client_updates_request(&chain, &query)?; + + let light_client_updates = chain + .get_light_client_updates(query.start_period, query.count) + .map_err(|_| { + warp_utils::reject::custom_not_found("No LightClientUpdates found".to_string()) + })?; + + match accept_header { + Some(api_types::Accept::Ssz) => { + let response_chunks = light_client_updates + .iter() + .map(|update| map_light_client_update_to_ssz_chunk::(&chain, update)) + .collect::>(); + + let ssz_response = LightClientUpdateSszResponse { + response_chunk_len: (light_client_updates.len() as u64).to_le_bytes().to_vec(), + response_chunk: response_chunks.as_ssz_bytes(), + } + .as_ssz_bytes(); + + Response::builder() + .status(200) + .body(ssz_response) + .map(|res: Response>| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + } + _ => { + let fork_versioned_response = light_client_updates + .iter() + .map(|update| map_light_client_update_to_json_response::(&chain, update.clone())) + .collect::>>, Rejection>>()?; + Ok(warp::reply::json(&fork_versioned_response).into_response()) + } + } +} + +pub fn validate_light_client_updates_request( + chain: &BeaconChain, + query: &LightClientUpdatesQuery, +) -> Result<(), Rejection> { + if query.count > MAX_REQUEST_LIGHT_CLIENT_UPDATES { + return Err(warp_utils::reject::custom_bad_request( + "Invalid count requested".to_string(), + )); + } + + let current_sync_period = chain + .epoch() + .map_err(|_| { + warp_utils::reject::custom_server_error("failed to get current epoch".to_string()) + })? + .sync_committee_period(&chain.spec) + .map_err(|_| { + warp_utils::reject::custom_server_error( + "failed to get current sync committee period".to_string(), + ) + })?; + + if query.start_period > current_sync_period { + return Err(warp_utils::reject::custom_bad_request( + "Invalid sync committee period requested".to_string(), + )); + } + + let earliest_altair_sync_committee = chain + .spec + .altair_fork_epoch + .ok_or(warp_utils::reject::custom_server_error( + "failed to get altair fork epoch".to_string(), + ))? + .sync_committee_period(&chain.spec) + .map_err(|_| { + warp_utils::reject::custom_server_error( + "failed to get earliest altair sync committee".to_string(), + ) + })?; + + if query.start_period < earliest_altair_sync_committee { + return Err(warp_utils::reject::custom_bad_request( + "Invalid sync committee period requested".to_string(), + )); + } + + Ok(()) +} + +fn map_light_client_update_to_ssz_chunk( + chain: &BeaconChain, + light_client_update: &LightClientUpdate, +) -> LightClientUpdateResponseChunk { + let fork_name = chain + .spec + .fork_name_at_slot::(*light_client_update.signature_slot()); + + let fork_digest = ChainSpec::compute_fork_digest( + chain.spec.fork_version_for_name(fork_name), + chain.genesis_validators_root, + ); + + LightClientUpdateResponseChunk { + context: fork_digest, + payload: light_client_update.as_ssz_bytes(), + } +} + +fn map_light_client_update_to_json_response( + chain: &BeaconChain, + light_client_update: LightClientUpdate, +) -> Result>, Rejection> { + let fork_name = chain + .spec + .fork_name_at_slot::(*light_client_update.signature_slot()); + + fork_versioned_response(V1, fork_name, light_client_update) +} diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 10d000ef6f8..e0fc518d46c 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -9,8 +9,9 @@ use beacon_chain::{ use eth2::types::{into_full_block_and_blobs, BroadcastValidation, ErrorMessage}; use eth2::types::{FullPayloadContents, PublishBlockRequest}; use execution_layer::ProvenancedPayload; -use lighthouse_network::PubsubMessage; +use lighthouse_network::{NetworkGlobals, PubsubMessage}; use network::NetworkMessage; +use rand::seq::SliceRandom; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::marker::PhantomData; @@ -19,9 +20,9 @@ use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BeaconBlockRef, BlobSidecarList, BlockImportSource, EthSpec, ExecPayload, - ExecutionBlockHash, ForkName, FullPayload, FullPayloadBellatrix, Hash256, SignedBeaconBlock, - SignedBlindedBeaconBlock, VariableList, + AbstractExecPayload, BeaconBlockRef, BlobSidecarList, BlockImportSource, DataColumnSidecarList, + DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, + FullPayloadBellatrix, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, VariableList, }; use warp::http::StatusCode; use warp::{reply::Response, Rejection, Reply}; @@ -45,6 +46,7 @@ impl> ProvenancedBloc } /// Handles a request from the HTTP API for full blocks. +#[allow(clippy::too_many_arguments)] pub async fn publish_block>( block_root: Option, provenanced_block: ProvenancedBlock, @@ -53,6 +55,7 @@ pub async fn publish_block>, ) -> Result { let seen_timestamp = timestamp_now(); @@ -68,10 +71,13 @@ pub async fn publish_block block.slot()); + let malicious_withhold_count = chain.config.malicious_withhold_count; + let chain_cloned = chain.clone(); /* actually publish a block */ let publish_block = move |block: Arc>, blobs_opt: Option>, + data_cols_opt: Option>, sender, log, seen_timestamp| { @@ -104,6 +110,7 @@ pub async fn publish_block { let mut pubsub_messages = vec![PubsubMessage::BeaconBlock(block)]; if let Some(blob_sidecars) = blobs_opt { + // Publish blob sidecars for (blob_index, blob) in blob_sidecars.into_iter().enumerate() { pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new(( blob_index as u64, @@ -111,6 +118,30 @@ pub async fn publish_block 0 { + let columns_to_keep = data_col_sidecars + .len() + .saturating_sub(malicious_withhold_count); + // Randomize columns before dropping the last malicious_withhold_count items + data_col_sidecars.shuffle(&mut rand::thread_rng()); + data_col_sidecars = data_col_sidecars + .into_iter() + .take(columns_to_keep) + .collect::>(); + } + + for data_col in data_col_sidecars { + let subnet = DataColumnSubnetId::from_column_index::( + data_col.index as usize, + &chain_cloned.spec, + ); + pubsub_messages.push(PubsubMessage::DataColumnSidecar(Box::new(( + subnet, data_col, + )))); + } + } crate::publish_pubsub_messages(&sender, pubsub_messages) .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; } @@ -126,7 +157,7 @@ pub async fn publish_block b, Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown(_))) @@ -155,6 +186,10 @@ pub async fn publish_block>(); VariableList::from(blobs) }); + let data_cols_opt = gossip_verified_data_columns + .as_ref() + .map(|gossip_verified_data_columns| { + gossip_verified_data_columns + .into_iter() + .map(|col| col.clone_data_column()) + .collect::>() + }); let block_root = block_root.unwrap_or(gossip_verified_block.block_root); @@ -172,6 +215,7 @@ pub async fn publish_block publish_block( block_clone, blobs_opt, + data_cols_opt, sender_clone, log_clone, seen_timestamp, @@ -201,6 +246,7 @@ pub async fn publish_block &msg + ); + Err(warp_utils::reject::custom_bad_request(msg)) + }; + } + } + match Box::pin(chain.process_block( block_root, gossip_verified_block, @@ -313,6 +382,7 @@ pub async fn publish_blinded_block( log: Logger, validation_level: BroadcastValidation, duplicate_status_code: StatusCode, + network_globals: Arc>, ) -> Result { let block_root = blinded_block.canonical_root(); let full_block: ProvenancedBlock> = @@ -325,6 +395,7 @@ pub async fn publish_blinded_block( log, validation_level, duplicate_status_code, + network_globals, ) .await } @@ -473,7 +544,7 @@ fn check_slashable( block_root: Hash256, block_clone: &SignedBeaconBlock>, log_clone: &Logger, -) -> Result<(), BlockError> { +) -> Result<(), BlockError> { let slashable_cache = chain_clone.observed_slashable.read(); if let Some(blobs) = blobs_opt.as_ref() { blobs.iter().try_for_each(|blob| { diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 88112de10b6..dcd494a880f 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -151,6 +151,7 @@ pub async fn create_api_server( vec![], false, &log, + chain.spec.clone(), )); // Only a peer manager can add peers, so we create a dummy manager. diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 78f9c819888..59cdbb1c99e 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -7,7 +7,7 @@ use eth2::types::{BroadcastValidation, PublishBlockRequest}; use http_api::test_utils::InteractiveTester; use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; use std::sync::Arc; -use types::{Epoch, EthSpec, ForkName, Hash256, MainnetEthSpec, Slot}; +use types::{Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; @@ -81,9 +81,7 @@ pub async fn gossip_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) - ); + assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); } /// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. @@ -268,10 +266,7 @@ pub async fn consensus_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) - ); + assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); } /// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. @@ -317,10 +312,7 @@ pub async fn consensus_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) - ); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective, but nonetheless equivocates, is accepted when using `broadcast_validation=consensus`. @@ -376,6 +368,7 @@ pub async fn consensus_partial_pass_only_consensus() { /* submit `block_b` which should induce equivocation */ let channel = tokio::sync::mpsc::unbounded_channel(); + let network_globals = tester.ctx.network_globals.clone().unwrap(); let publication_result = publish_block( None, @@ -385,6 +378,7 @@ pub async fn consensus_partial_pass_only_consensus() { test_logger, validation_level.unwrap(), StatusCode::ACCEPTED, + network_globals, ) .await; @@ -487,10 +481,7 @@ pub async fn equivocation_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) - ); + assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -564,9 +555,9 @@ pub async fn equivocation_consensus_early_equivocation() { let error_response: eth2::Error = response.err().unwrap(); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string()) + assert_server_message_error( + error_response, + "BAD_REQUEST: BlockError(Slashable)".to_string(), ); } @@ -614,10 +605,7 @@ pub async fn equivocation_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) - ); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective but @@ -677,6 +665,7 @@ pub async fn equivocation_consensus_late_equivocation() { assert!(gossip_block_contents_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); + let network_globals = tester.ctx.network_globals.clone().unwrap(); let publication_result = publish_block( None, @@ -686,6 +675,7 @@ pub async fn equivocation_consensus_late_equivocation() { test_logger, validation_level.unwrap(), StatusCode::ACCEPTED, + network_globals, ) .await; @@ -793,10 +783,7 @@ pub async fn blinded_gossip_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) - ); + assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); } /// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. @@ -974,10 +961,7 @@ pub async fn blinded_consensus_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) - ); + assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); } /// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. @@ -1023,10 +1007,7 @@ pub async fn blinded_consensus_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) - ); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. @@ -1118,10 +1099,7 @@ pub async fn blinded_equivocation_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) - ); + assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -1191,9 +1169,9 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { let error_response: eth2::Error = response.err().unwrap(); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string()) + assert_server_message_error( + error_response, + "BAD_REQUEST: BlockError(Slashable)".to_string(), ); } @@ -1242,9 +1220,7 @@ pub async fn blinded_equivocation_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) - ); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); } /// This test checks that a block that is valid from both a gossip and @@ -1335,6 +1311,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { assert!(gossip_block_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); + let network_globals = tester.ctx.network_globals.clone().unwrap(); let publication_result = publish_blinded_block( block_b, @@ -1343,6 +1320,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { test_logger, validation_level.unwrap(), StatusCode::ACCEPTED, + network_globals, ) .await; @@ -1395,3 +1373,10 @@ pub async fn blinded_equivocation_full_pass() { .chain .block_is_known_to_fork_choice(&block.canonical_root())); } + +fn assert_server_message_error(error_response: eth2::Error, expected_message: String) { + let eth2::Error::ServerMessage(err) = error_response else { + panic!("Not a eth2::Error::ServerMessage"); + }; + assert_eq!(err.message, expected_message); +} diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index db8a0ab2b54..b5b3edf892e 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -10,7 +10,7 @@ use http_api::test_utils::*; use std::collections::HashSet; use types::{ test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, - Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot, + Address, ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, MinimalEthSpec, Slot, }; type E = MinimalEthSpec; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 2f417cf7ba5..9ff411cf1c9 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -18,8 +18,8 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use types::{ - Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, MainnetEthSpec, - MinimalEthSpec, ProposerPreparationData, Slot, + Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, FixedBytesExtended, ForkName, + Hash256, MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, Uint256, }; type E = MainnetEthSpec; @@ -394,7 +394,7 @@ pub async fn proposer_boost_re_org_test( // Test using the latest fork so that we simulate conditions as similar to mainnet as possible. let mut spec = ForkName::latest().make_genesis_spec(E::default_spec()); - spec.terminal_total_difficulty = 1.into(); + spec.terminal_total_difficulty = Uint256::from(1); // Ensure there are enough validators to have `attesters_per_slot`. let attesters_per_slot = 10; @@ -639,18 +639,20 @@ pub async fn proposer_boost_re_org_test( if should_re_org { // Block C should build on A. - assert_eq!(block_c.parent_root(), block_a_root.into()); + assert_eq!(block_c.parent_root(), Hash256::from(block_a_root)); } else { // Block C should build on B. assert_eq!(block_c.parent_root(), block_b_root); } // Applying block C should cause it to become head regardless (re-org or continuation). - let block_root_c = harness - .process_block_result((block_c.clone(), block_c_blobs)) - .await - .unwrap() - .into(); + let block_root_c = Hash256::from( + harness + .process_block_result((block_c.clone(), block_c_blobs)) + .await + .unwrap(), + ); + assert_eq!(harness.head_block_root(), block_root_c); // Check the fork choice updates that were sent. @@ -814,7 +816,7 @@ pub async fn fork_choice_before_proposal() { // Due to proposer boost, the head should be C during slot C. assert_eq!( harness.chain.canonical_head.cached_head().head_block_root(), - block_root_c.into() + Hash256::from(block_root_c) ); // Ensure that building a block via the HTTP API re-runs fork choice and builds block D upon B. @@ -841,10 +843,10 @@ pub async fn fork_choice_before_proposal() { // Head is now B. assert_eq!( harness.chain.canonical_head.cached_head().head_block_root(), - block_root_b.into() + Hash256::from(block_root_b) ); // D's parent is B. - assert_eq!(block_d.parent_root(), block_root_b.into()); + assert_eq!(block_d.parent_root(), Hash256::from(block_root_b)); } // Test that attestations to unknown blocks are requeued and processed when their block arrives. diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 801cd44074d..8f962995300 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -6,7 +6,7 @@ use beacon_chain::{ use eth2::StatusCode; use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; use http_api::test_utils::InteractiveTester; -use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot}; +use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot, Uint256}; type E = MinimalEthSpec; @@ -14,7 +14,7 @@ type E = MinimalEthSpec; async fn post_merge_tester(chain_depth: u64, validator_count: u64) -> InteractiveTester { // Test using latest fork so that we simulate conditions as similar to mainnet as possible. let mut spec = ForkName::latest().make_genesis_spec(E::default_spec()); - spec.terminal_total_difficulty = 1.into(); + spec.terminal_total_difficulty = Uint256::from(1); let tester = InteractiveTester::::new(Some(spec), validator_count as usize).await; let harness = &tester.harness; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d51799b8661..6e6f72b6c08 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3,7 +3,6 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; -use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, @@ -24,6 +23,7 @@ use http_api::{ BlockId, StateId, }; use lighthouse_network::{types::SyncState, Enr, EnrExt, PeerId}; +use logging::test_logger; use network::NetworkReceivers; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; @@ -251,7 +251,7 @@ impl ApiTester { "precondition: justification" ); - let log = null_logger().unwrap(); + let log = test_logger(); let ApiServer { ctx, @@ -349,7 +349,7 @@ impl ApiTester { let chain = harness.chain.clone(); - let log = null_logger().unwrap(); + let log = test_logger(); let ApiServer { ctx, @@ -1667,6 +1667,93 @@ impl ApiTester { self } + /// Test fetching of blob sidecars that are not available in the database due to pruning. + /// + /// If `zero_blobs` is false, test a block with >0 blobs, which should be unavailable. + /// If `zero_blobs` is true, then test a block with 0 blobs, which should still be available. + pub async fn test_get_blob_sidecars_pruned(self, zero_blobs: bool) -> Self { + // Prune all blobs prior to the database's split epoch. + let store = &self.chain.store; + let split_epoch = store.get_split_slot().epoch(E::slots_per_epoch()); + let force_prune = true; + self.chain + .store + .try_prune_blobs(force_prune, split_epoch) + .unwrap(); + + let oldest_blob_slot = store.get_blob_info().oldest_blob_slot.unwrap(); + + assert_ne!( + oldest_blob_slot, 0, + "blob pruning should have pruned some blobs" + ); + + // Find a block with either 0 blobs or 1+ depending on the value of `zero_blobs`. + let mut test_slot = None; + for slot in 0..oldest_blob_slot.as_u64() { + let block_id = BlockId(CoreBlockId::Slot(Slot::new(slot))); + let (block, _, _) = block_id.blinded_block(&self.chain).unwrap(); + let num_blobs = block.num_expected_blobs(); + + if (zero_blobs && num_blobs == 0) || (!zero_blobs && num_blobs > 0) { + test_slot = Some(Slot::new(slot)); + break; + } + } + let test_slot = test_slot.expect(&format!( + "should be able to find a block matching zero_blobs={zero_blobs}" + )); + + match self + .client + .get_blobs::(CoreBlockId::Slot(test_slot), None) + .await + { + Ok(result) => { + if zero_blobs { + assert_eq!( + &result.unwrap().data[..], + &[], + "empty blobs are always available" + ); + } else { + assert_eq!(result, None, "blobs should have been pruned"); + } + } + Err(e) => panic!("failed with non-404 status: {e:?}"), + } + + self + } + + pub async fn test_get_blob_sidecars_pre_deneb(self) -> Self { + let oldest_blob_slot = self.chain.store.get_blob_info().oldest_blob_slot.unwrap(); + assert_ne!( + oldest_blob_slot, 0, + "oldest_blob_slot should be non-zero and post-Deneb" + ); + let test_slot = oldest_blob_slot - 1; + assert!( + !self + .chain + .spec + .fork_name_at_slot::(test_slot) + .deneb_enabled(), + "Deneb should not be enabled at {test_slot}" + ); + + match self + .client + .get_blobs::(CoreBlockId::Slot(test_slot), None) + .await + { + Ok(result) => panic!("queries for pre-Deneb slots should fail. got: {result:?}"), + Err(e) => assert_eq!(e.status().unwrap(), 400), + } + + self + } + pub async fn test_beacon_blocks_attestations(self) -> Self { for block_id in self.interesting_block_ids() { let result = self @@ -1813,6 +1900,36 @@ impl ApiTester { self } + pub async fn test_get_beacon_light_client_updates(self) -> Self { + let current_epoch = self.chain.epoch().unwrap(); + let current_sync_committee_period = current_epoch + .sync_committee_period(&self.chain.spec) + .unwrap(); + + let result = match self + .client + .get_beacon_light_client_updates::(current_sync_committee_period as u64, 1) + .await + { + Ok(result) => result, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + let expected = self + .chain + .light_client_server_cache + .get_light_client_updates( + &self.chain.store, + current_sync_committee_period as u64, + 1, + &self.chain.spec, + ) + .unwrap(); + + assert_eq!(result.clone().unwrap().len(), expected.len()); + self + } + pub async fn test_get_beacon_light_client_bootstrap(self) -> Self { let block_id = BlockId(CoreBlockId::Finalized); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); @@ -3401,7 +3518,7 @@ impl ApiTester { .get_validator_aggregate_attestation_v2( attestation.data().slot, attestation.data().tree_hash_root(), - attestation.committee_index().unwrap(), + attestation.committee_index().expect("committee index"), ) .await .unwrap() @@ -6171,6 +6288,18 @@ async fn node_get() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_light_client_updates() { + let config = ApiTesterConfig { + spec: ForkName::Altair.make_genesis_spec(E::default_spec()), + ..<_>::default() + }; + ApiTester::new_from_config(config) + .await + .test_get_beacon_light_client_updates() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_light_client_bootstrap() { let config = ApiTesterConfig { @@ -6804,6 +6933,36 @@ async fn get_blob_sidecars() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blob_sidecars_pruned() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + + ApiTester::new_from_config(config) + .await + .test_get_blob_sidecars_pruned(false) + .await + .test_get_blob_sidecars_pruned(true) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blob_sidecars_pre_deneb() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(1)); + + ApiTester::new_from_config(config) + .await + .test_get_blob_sidecars_pre_deneb() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_liveness_epoch() { ApiTester::new() diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index f8c93ad8fc1..f835d13fb66 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -22,5 +22,5 @@ malloc_utils = { workspace = true } [dev-dependencies] tokio = { workspace = true } reqwest = { workspace = true } -environment = { workspace = true } types = { workspace = true } +logging = { workspace = true } diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index b88a790afd5..d903e233fba 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -1,6 +1,6 @@ use beacon_chain::test_utils::EphemeralHarnessType; -use environment::null_logger; use http_metrics::Config; +use logging::test_logger; use reqwest::header::HeaderValue; use reqwest::StatusCode; use std::net::{IpAddr, Ipv4Addr}; @@ -13,7 +13,7 @@ type Context = http_metrics::Context>; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn returns_200_ok() { async { - let log = null_logger().unwrap(); + let log = test_logger(); let context = Arc::new(Context { config: Config { diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index cb89d492d1d..c666b8b4552 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] +alloy-primitives = { workspace = true} discv5 = { workspace = true } gossipsub = { workspace = true } unsigned-varint = { version = "0.8", features = ["codec"] } @@ -46,10 +47,10 @@ itertools = { workspace = true } # Local dependencies void = "1.0.2" -libp2p-mplex = "0.41" +libp2p-mplex = "0.42" [dependencies.libp2p] -version = "0.53" +version = "0.54" default-features = false features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"] diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index ef8fb1b12b5..a01d60dae99 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -27,7 +27,7 @@ futures-timer = "3.0.2" getrandom = "0.2.12" hashlink.workspace = true hex_fmt = "0.3.0" -libp2p = { version = "0.53", default-features = false } +libp2p = { version = "0.54", default-features = false } quick-protobuf = "0.8" quick-protobuf-codec = "0.3" rand = "0.8" diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index 1627777f845..59e02b9403c 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -35,7 +35,11 @@ use hashlink::LinkedHashMap; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; -use libp2p::core::{multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, Endpoint, Multiaddr}; +use libp2p::core::{ + multiaddr::Protocol::{Ip4, Ip6}, + transport::PortUse, + Endpoint, Multiaddr, +}; use libp2p::identity::Keypair; use libp2p::identity::PeerId; use libp2p::swarm::{ @@ -3167,6 +3171,7 @@ where peer_id: PeerId, _: &Multiaddr, _: Endpoint, + _: PortUse, ) -> Result, ConnectionDenied> { // By default we assume a peer is only a floodsub peer. // diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs index a378198be33..19d09cd890f 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs @@ -220,6 +220,7 @@ where ConnectedPoint::Dialer { address, role_override: Endpoint::Dialer, + port_use: PortUse::Reuse, } } else { ConnectedPoint::Listener { @@ -284,6 +285,7 @@ where let fake_endpoint = ConnectedPoint::Dialer { address: Multiaddr::empty(), role_override: Endpoint::Dialer, + port_use: PortUse::Reuse, }; // this is not relevant // peer_connections.connections should never be empty. @@ -296,6 +298,7 @@ where connection_id, endpoint: &fake_endpoint, remaining_established: active_connections, + cause: None, })); } } @@ -635,6 +638,7 @@ fn test_join() { endpoint: &ConnectedPoint::Dialer { address, role_override: Endpoint::Dialer, + port_use: PortUse::Reuse, }, failed_addresses: &[], other_established: 0, @@ -4181,6 +4185,7 @@ fn test_scoring_p6() { endpoint: &ConnectedPoint::Dialer { address: addr.clone(), role_override: Endpoint::Dialer, + port_use: PortUse::Reuse, }, failed_addresses: &[], other_established: 0, @@ -4202,6 +4207,7 @@ fn test_scoring_p6() { endpoint: &ConnectedPoint::Dialer { address: addr2.clone(), role_override: Endpoint::Dialer, + port_use: PortUse::Reuse, }, failed_addresses: &[], other_established: 1, @@ -4232,6 +4238,7 @@ fn test_scoring_p6() { endpoint: &ConnectedPoint::Dialer { address: addr, role_override: Endpoint::Dialer, + port_use: PortUse::Reuse, }, failed_addresses: &[], other_established: 2, diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 91c5b62d0b2..7c95977140e 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -42,7 +42,7 @@ pub struct Config { pub network_dir: PathBuf, /// IP addresses to listen on. - listen_addresses: ListenAddress, + pub(crate) listen_addresses: ListenAddress, /// The address to broadcast to peers about which address we are listening on. None indicates /// that no discovery address has been set in the CLI args. @@ -100,6 +100,9 @@ pub struct Config { /// Attempt to construct external port mappings with UPnP. pub upnp_enabled: bool, + /// Subscribe to all data column subnets for the duration of the runtime. + pub subscribe_all_data_column_subnets: bool, + /// Subscribe to all subnets for the duration of the runtime. pub subscribe_all_subnets: bool, @@ -338,6 +341,7 @@ impl Default for Config { upnp_enabled: true, network_load: 4, private: false, + subscribe_all_data_column_subnets: false, subscribe_all_subnets: false, import_all_attestations: false, shutdown_after_sync: false, diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 51e50808e1d..7415fdaf590 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -14,7 +14,7 @@ use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::str::FromStr; -use types::{EnrForkId, EthSpec}; +use types::{ChainSpec, EnrForkId, EthSpec}; use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; @@ -24,6 +24,8 @@ pub const ETH2_ENR_KEY: &str = "eth2"; pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; /// The ENR field specifying the sync committee subnet bitfield. pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; +/// The ENR field specifying the peerdas custody subnet count. +pub const PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY: &str = "csc"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { @@ -35,6 +37,9 @@ pub trait Eth2Enr { &self, ) -> Result, &'static str>; + /// The peerdas custody subnet count associated with the ENR. + fn custody_subnet_count(&self, spec: &ChainSpec) -> u64; + fn eth2(&self) -> Result; } @@ -59,6 +64,16 @@ impl Eth2Enr for Enr { .map_err(|_| "Could not decode the ENR syncnets bitfield") } + /// if the custody value is non-existent in the ENR, then we assume the minimum custody value + /// defined in the spec. + fn custody_subnet_count(&self, spec: &ChainSpec) -> u64 { + self.get_decodable::(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) + .and_then(|r| r.ok()) + // If value supplied in ENR is invalid, fallback to `custody_requirement` + .filter(|csc| csc <= &spec.data_column_sidecar_subnet_count) + .unwrap_or(spec.custody_requirement) + } + fn eth2(&self) -> Result { let eth2_bytes = self.get(ETH2_ENR_KEY).ok_or("ENR has no eth2 field")?; @@ -126,12 +141,13 @@ pub fn build_or_load_enr( config: &NetworkConfig, enr_fork_id: &EnrForkId, log: &slog::Logger, + spec: &ChainSpec, ) -> Result { // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. let enr_key = CombinedKey::from_libp2p(local_key)?; - let mut local_enr = build_enr::(&enr_key, config, enr_fork_id)?; + let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, spec)?; use_or_load_enr(&enr_key, &mut local_enr, config, log)?; Ok(local_enr) @@ -142,6 +158,7 @@ pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, enr_fork_id: &EnrForkId, + spec: &ChainSpec, ) -> Result { let mut builder = discv5::enr::Enr::builder(); let (maybe_ipv4_address, maybe_ipv6_address) = &config.enr_address; @@ -221,6 +238,16 @@ pub fn build_enr( builder.add_value(SYNC_COMMITTEE_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + // only set `csc` if PeerDAS fork epoch has been scheduled + if spec.is_peer_das_scheduled() { + let custody_subnet_count = if config.subscribe_all_data_column_subnets { + spec.data_column_sidecar_subnet_count + } else { + spec.custody_requirement + }; + builder.add_value(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY, &custody_subnet_count); + } + builder .build(enr_key) .map_err(|e| format!("Could not build Local ENR: {:?}", e)) @@ -244,10 +271,12 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { // take preference over disk udp port if one is not specified && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) - // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, - // otherwise we use a new ENR. This will likely only be true for non-validating nodes + // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and + // PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will + // likely only be true for non-validating nodes. && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) + && local_enr.get(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) == disk_enr.get(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) } /// Loads enr from the given directory @@ -280,3 +309,77 @@ pub fn save_enr_to_disk(dir: &Path, enr: &Enr, log: &slog::Logger) { } } } + +#[cfg(test)] +mod test { + use super::*; + use crate::config::Config as NetworkConfig; + use types::{Epoch, MainnetEthSpec}; + + type E = MainnetEthSpec; + + fn make_eip7594_spec() -> ChainSpec { + let mut spec = E::default_spec(); + spec.eip7594_fork_epoch = Some(Epoch::new(10)); + spec + } + + #[test] + fn custody_subnet_count_default() { + let config = NetworkConfig { + subscribe_all_data_column_subnets: false, + ..NetworkConfig::default() + }; + let spec = make_eip7594_spec(); + + let enr = build_enr_with_config(config, &spec).0; + + assert_eq!( + enr.custody_subnet_count::(&spec), + spec.custody_requirement, + ); + } + + #[test] + fn custody_subnet_count_all() { + let config = NetworkConfig { + subscribe_all_data_column_subnets: true, + ..NetworkConfig::default() + }; + let spec = make_eip7594_spec(); + let enr = build_enr_with_config(config, &spec).0; + + assert_eq!( + enr.custody_subnet_count::(&spec), + spec.data_column_sidecar_subnet_count, + ); + } + + #[test] + fn custody_subnet_count_fallback_default() { + let config = NetworkConfig::default(); + let spec = make_eip7594_spec(); + let (mut enr, enr_key) = build_enr_with_config(config, &spec); + let invalid_subnet_count = 999u64; + + enr.insert( + PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY, + &invalid_subnet_count, + &enr_key, + ) + .unwrap(); + + assert_eq!( + enr.custody_subnet_count::(&spec), + spec.custody_requirement, + ); + } + + fn build_enr_with_config(config: NetworkConfig, spec: &ChainSpec) -> (Enr, CombinedKey) { + let keypair = libp2p::identity::secp256k1::Keypair::generate(); + let enr_key = CombinedKey::from_secp256k1(&keypair); + let enr_fork_id = EnrForkId::default(); + let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec).unwrap(); + (enr, enr_key) + } +} diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 865d707495f..c92a8bd2b45 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -18,6 +18,7 @@ pub use libp2p::identity::{Keypair, PublicKey}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; +use libp2p::core::transport::PortUse; use libp2p::multiaddr::Protocol; use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; use libp2p::swarm::THandlerInEvent; @@ -983,6 +984,7 @@ impl NetworkBehaviour for Discovery { _peer: PeerId, _addr: &Multiaddr, _role_override: libp2p::core::Endpoint, + _port_use: PortUse, ) -> Result, libp2p::swarm::ConnectionDenied> { Ok(ConnectionHandler) } @@ -1220,7 +1222,7 @@ mod tests { let mut config = NetworkConfig::default(); config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); - let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); + let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default(), &spec).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( enr, @@ -1232,6 +1234,7 @@ mod tests { vec![], false, &log, + spec.clone(), ); let keypair = keypair.into(); Discovery::new(keypair, &config, Arc::new(globals), &log, &spec) diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index b53afe556db..e198b3ee17f 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -16,6 +16,7 @@ where E: EthSpec, { let log_clone = log.clone(); + let spec_clone = spec.clone(); move |enr: &Enr| { let attestation_bitfield: EnrAttestationBitfield = match enr.attestation_bitfield::() @@ -29,8 +30,7 @@ where let sync_committee_bitfield: Result, _> = enr.sync_committee_bitfield::(); - // TODO(das): compute from enr - let custody_subnet_count = spec.custody_requirement; + let custody_subnet_count = enr.custody_subnet_count::(&spec_clone); let predicate = subnets.iter().any(|subnet| match subnet { Subnet::Attestation(s) => attestation_bitfield @@ -41,7 +41,7 @@ where .map_or(false, |b| b.get(*s.deref() as usize).unwrap_or(false)), Subnet::DataColumn(s) => { let mut subnets = DataColumnSubnetId::compute_custody_subnets::( - enr.node_id().raw().into(), + enr.node_id().raw(), custody_subnet_count, &spec, ); diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 6423da56fe2..31ff8bdfc23 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,6 +1,7 @@ //! Implementation of Lighthouse's peer management system. use crate::discovery::enr_ext::EnrExt; +use crate::discovery::peer_id_to_node_id; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::service::TARGET_SUBNET_PEERS; use crate::{error, metrics, Gossipsub}; @@ -530,7 +531,10 @@ impl PeerManager { RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError, RPCResponseErrorCode::ResourceUnavailable => { // Don't ban on this because we want to retry with a block by root request. - if matches!(protocol, Protocol::BlobsByRoot) { + if matches!( + protocol, + Protocol::BlobsByRoot | Protocol::DataColumnsByRoot + ) { return; } @@ -569,6 +573,8 @@ impl PeerManager { Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, Protocol::BlobsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRange => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -587,6 +593,8 @@ impl PeerManager { Protocol::BlocksByRoot => return, Protocol::BlobsByRange => return, Protocol::BlobsByRoot => return, + Protocol::DataColumnsByRoot => return, + Protocol::DataColumnsByRange => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, @@ -607,6 +615,8 @@ impl PeerManager { Protocol::BlocksByRoot => PeerAction::MidToleranceError, Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::BlobsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRange => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, @@ -707,7 +717,8 @@ impl PeerManager { debug!(self.log, "Obtained peer's metadata"; "peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number()); } - peer_info.set_meta_data(meta_data); + let node_id_opt = peer_id_to_node_id(peer_id).ok(); + peer_info.set_meta_data(meta_data, node_id_opt, &self.network_globals.spec); } else { error!(self.log, "Received METADATA from an unknown peer"; "peer_id" => %peer_id); @@ -1379,7 +1390,8 @@ mod tests { ..Default::default() }; let log = build_log(slog::Level::Debug, false); - let globals = NetworkGlobals::new_test_globals(vec![], &log); + let spec = E::default_spec(); + let globals = NetworkGlobals::new_test_globals(vec![], &log, spec); PeerManager::new(config, Arc::new(globals), &log).unwrap() } @@ -1393,7 +1405,8 @@ mod tests { ..Default::default() }; let log = build_log(slog::Level::Debug, false); - let globals = NetworkGlobals::new_test_globals(trusted_peers, &log); + let spec = E::default_spec(); + let globals = NetworkGlobals::new_test_globals(trusted_peers, &log, spec); PeerManager::new(config, Arc::new(globals), &log).unwrap() } @@ -1667,7 +1680,11 @@ mod tests { .write() .peer_info_mut(&peer0) .unwrap() - .set_meta_data(MetaData::V2(metadata)); + .set_meta_data( + MetaData::V2(metadata), + None, + &peer_manager.network_globals.spec, + ); peer_manager .network_globals .peers @@ -1687,7 +1704,11 @@ mod tests { .write() .peer_info_mut(&peer2) .unwrap() - .set_meta_data(MetaData::V2(metadata)); + .set_meta_data( + MetaData::V2(metadata), + None, + &peer_manager.network_globals.spec, + ); peer_manager .network_globals .peers @@ -1707,7 +1728,11 @@ mod tests { .write() .peer_info_mut(&peer4) .unwrap() - .set_meta_data(MetaData::V2(metadata)); + .set_meta_data( + MetaData::V2(metadata), + None, + &peer_manager.network_globals.spec, + ); peer_manager .network_globals .peers @@ -1781,7 +1806,11 @@ mod tests { .write() .peer_info_mut(&peer) .unwrap() - .set_meta_data(MetaData::V2(metadata)); + .set_meta_data( + MetaData::V2(metadata), + None, + &peer_manager.network_globals.spec, + ); peer_manager .network_globals .peers @@ -1905,7 +1934,11 @@ mod tests { .write() .peer_info_mut(&peer) .unwrap() - .set_meta_data(MetaData::V2(metadata)); + .set_meta_data( + MetaData::V2(metadata), + None, + &peer_manager.network_globals.spec, + ); let long_lived_subnets = peer_manager .network_globals .peers @@ -2014,7 +2047,11 @@ mod tests { .write() .peer_info_mut(&peer) .unwrap() - .set_meta_data(MetaData::V2(metadata)); + .set_meta_data( + MetaData::V2(metadata), + None, + &peer_manager.network_globals.spec, + ); let long_lived_subnets = peer_manager .network_globals .peers @@ -2180,7 +2217,11 @@ mod tests { .write() .peer_info_mut(&peer) .unwrap() - .set_meta_data(MetaData::V2(metadata)); + .set_meta_data( + MetaData::V2(metadata), + None, + &peer_manager.network_globals.spec, + ); let long_lived_subnets = peer_manager .network_globals .peers @@ -2337,7 +2378,11 @@ mod tests { let mut peer_db = peer_manager.network_globals.peers.write(); let peer_info = peer_db.peer_info_mut(&condition.peer_id).unwrap(); - peer_info.set_meta_data(MetaData::V2(metadata)); + peer_info.set_meta_data( + MetaData::V2(metadata), + None, + &peer_manager.network_globals.spec, + ); peer_info.set_gossipsub_score(condition.gossipsub_score); peer_info.add_to_score(condition.score); diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 3858a2a5392..d9df8e7c4bb 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -4,6 +4,7 @@ use std::net::IpAddr; use std::task::{Context, Poll}; use futures::StreamExt; +use libp2p::core::transport::PortUse; use libp2p::core::ConnectedPoint; use libp2p::identity::PeerId; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; @@ -214,6 +215,7 @@ impl NetworkBehaviour for PeerManager { peer_id: PeerId, addr: &libp2p::Multiaddr, _role_override: libp2p::core::Endpoint, + _port_use: PortUse, ) -> Result, libp2p::swarm::ConnectionDenied> { trace!(self.log, "Outbound connection"; "peer_id" => %peer_id, "multiaddr" => %addr); match self.ban_status(&peer_id) { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index c3e77ae225e..6e76fd4bb00 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,5 +1,8 @@ +use crate::discovery::enr::PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY; use crate::discovery::CombinedKey; -use crate::{metrics, multiaddr::Multiaddr, types::Subnet, Enr, Gossipsub, PeerId}; +use crate::{ + metrics, multiaddr::Multiaddr, types::Subnet, Enr, EnrExt, Eth2Enr, Gossipsub, PeerId, +}; use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use rand::seq::SliceRandom; use score::{PeerAction, ReportSource, Score, ScoreState}; @@ -12,7 +15,7 @@ use std::{ fmt::Formatter, }; use sync_status::SyncStatus; -use types::EthSpec; +use types::{ChainSpec, DataColumnSubnetId, EthSpec}; pub mod client; pub mod peer_info; @@ -44,10 +47,16 @@ pub struct PeerDB { disable_peer_scoring: bool, /// PeerDB's logger log: slog::Logger, + spec: ChainSpec, } impl PeerDB { - pub fn new(trusted_peers: Vec, disable_peer_scoring: bool, log: &slog::Logger) -> Self { + pub fn new( + trusted_peers: Vec, + disable_peer_scoring: bool, + log: &slog::Logger, + spec: ChainSpec, + ) -> Self { // Initialize the peers hashmap with trusted peers let peers = trusted_peers .into_iter() @@ -59,6 +68,7 @@ impl PeerDB { banned_peers_count: BannedPeersCount::default(), disable_peer_scoring, peers, + spec, } } @@ -246,6 +256,22 @@ impl PeerDB { .map(|(peer_id, _)| peer_id) } + /// Returns an iterator of all good gossipsub peers that are supposed to be custodying + /// the given subnet id. + pub fn good_custody_subnet_peer( + &self, + subnet: DataColumnSubnetId, + ) -> impl Iterator { + self.peers + .iter() + .filter(move |(_, info)| { + // The custody_subnets hashset can be populated via enr or metadata + let is_custody_subnet_peer = info.is_assigned_to_custody_subnet(&subnet); + info.is_connected() && info.is_good_gossipsub_peer() && is_custody_subnet_peer + }) + .map(|(peer_id, _)| peer_id) + } + /// Gives the ids of all known disconnected peers. pub fn disconnected_peers(&self) -> impl Iterator { self.peers @@ -673,17 +699,34 @@ impl PeerDB { } /// Updates the connection state. MUST ONLY BE USED IN TESTS. - pub fn __add_connected_peer_testing_only(&mut self, peer_id: &PeerId) -> Option { + pub fn __add_connected_peer_testing_only( + &mut self, + supernode: bool, + spec: &ChainSpec, + ) -> PeerId { let enr_key = CombinedKey::generate_secp256k1(); - let enr = Enr::builder().build(&enr_key).unwrap(); + let mut enr = Enr::builder().build(&enr_key).unwrap(); + let peer_id = enr.peer_id(); + + if supernode { + enr.insert( + PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY, + &spec.data_column_sidecar_subnet_count, + &enr_key, + ) + .expect("u64 can be encoded"); + } + self.update_connection_state( - peer_id, + &peer_id, NewConnectionState::Connected { enr: Some(enr), seen_address: Multiaddr::empty(), direction: ConnectionDirection::Outgoing, }, - ) + ); + + peer_id } /// The connection state of the peer has been changed. Modify the peer in the db to ensure all @@ -746,8 +789,16 @@ impl PeerDB { seen_address, }, ) => { - // Update the ENR if one exists + // Update the ENR if one exists, and compute the custody subnets if let Some(enr) = enr { + let custody_subnet_count = enr.custody_subnet_count::(&self.spec); + let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( + enr.node_id().raw(), + custody_subnet_count, + &self.spec, + ) + .collect::>(); + info.set_custody_subnets(custody_subnets); info.set_enr(enr); } @@ -1298,7 +1349,8 @@ mod tests { fn get_db() -> PeerDB { let log = build_log(slog::Level::Debug, false); - PeerDB::new(vec![], false, &log) + let spec = M::default_spec(); + PeerDB::new(vec![], false, &log, spec) } #[test] @@ -1997,7 +2049,8 @@ mod tests { fn test_trusted_peers_score() { let trusted_peer = PeerId::random(); let log = build_log(slog::Level::Debug, false); - let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], false, &log); + let spec = M::default_spec(); + let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], false, &log, spec); pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); @@ -2021,7 +2074,8 @@ mod tests { fn test_disable_peer_scoring() { let peer = PeerId::random(); let log = build_log(slog::Level::Debug, false); - let mut pdb: PeerDB = PeerDB::new(vec![], true, &log); + let spec = M::default_spec(); + let mut pdb: PeerDB = PeerDB::new(vec![], true, &log, spec); pdb.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 0745cc26008..1ea3f8ed5fc 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -3,6 +3,7 @@ use super::score::{PeerAction, Score, ScoreState}; use super::sync_status::SyncStatus; use crate::discovery::Eth2Enr; use crate::{rpc::MetaData, types::Subnet}; +use discv5::enr::NodeId; use discv5::Enr; use libp2p::core::multiaddr::{Multiaddr, Protocol}; use serde::{ @@ -13,7 +14,7 @@ use std::collections::HashSet; use std::net::IpAddr; use std::time::Instant; use strum::AsRefStr; -use types::EthSpec; +use types::{ChainSpec, DataColumnSubnetId, EthSpec}; use PeerConnectionStatus::*; /// Information about a given connected peer. @@ -40,6 +41,11 @@ pub struct PeerInfo { meta_data: Option>, /// Subnets the peer is connected to. subnets: HashSet, + /// This is computed from either metadata or the ENR, and contains the subnets that the peer + /// is *assigned* to custody, rather than *connected* to (different to `self.subnets`). + /// Note: Another reason to keep this separate to `self.subnets` is an upcoming change to + /// decouple custody requirements from the actual subnets, i.e. changing this to `custody_groups`. + custody_subnets: HashSet, /// The time we would like to retain this peer. After this time, the peer is no longer /// necessary. #[serde(skip)] @@ -62,6 +68,7 @@ impl Default for PeerInfo { listening_addresses: Vec::new(), seen_multiaddrs: HashSet::new(), subnets: HashSet::new(), + custody_subnets: HashSet::new(), sync_status: SyncStatus::Unknown, meta_data: None, min_ttl: None, @@ -83,6 +90,7 @@ impl PeerInfo { } /// Returns if the peer is subscribed to a given `Subnet` from the metadata attnets/syncnets field. + /// Also returns true if the peer is assigned to custody a given data column `Subnet` computed from the metadata `custody_column_count` field or ENR `csc` field. pub fn on_subnet_metadata(&self, subnet: &Subnet) -> bool { if let Some(meta_data) = &self.meta_data { match subnet { @@ -94,15 +102,7 @@ impl PeerInfo { .syncnets() .map_or(false, |s| s.get(**id as usize).unwrap_or(false)) } - Subnet::DataColumn(_) => { - // TODO(das): Pending spec PR https://github.com/ethereum/consensus-specs/pull/3821 - // We should use MetaDataV3 for peer selection rather than - // looking at subscribed peers (current behavior). Until MetaDataV3 is - // implemented, this is the perhaps the only viable option on the current devnet - // as the peer count is low and it's important to identify supernodes to get a - // good distribution of peers across subnets. - return true; - } + Subnet::DataColumn(column) => return self.custody_subnets.contains(column), } } false @@ -210,6 +210,11 @@ impl PeerInfo { self.subnets.contains(subnet) } + /// Returns if the peer is assigned to a given `DataColumnSubnetId`. + pub fn is_assigned_to_custody_subnet(&self, subnet: &DataColumnSubnetId) -> bool { + self.custody_subnets.contains(subnet) + } + /// Returns true if the peer is connected to a long-lived subnet. pub fn has_long_lived_subnet(&self) -> bool { // Check the meta_data @@ -353,8 +358,32 @@ impl PeerInfo { /// Sets an explicit value for the meta data. // VISIBILITY: The peer manager is able to adjust the meta_data - pub(in crate::peer_manager) fn set_meta_data(&mut self, meta_data: MetaData) { - self.meta_data = Some(meta_data) + pub(in crate::peer_manager) fn set_meta_data( + &mut self, + meta_data: MetaData, + node_id_opt: Option, + spec: &ChainSpec, + ) { + // If we don't have a node id, we cannot compute the custody duties anyway + let Some(node_id) = node_id_opt else { + self.meta_data = Some(meta_data); + return; + }; + + // Already set by enr if custody_subnets is non empty + if self.custody_subnets.is_empty() { + if let Ok(custody_subnet_count) = meta_data.custody_subnet_count() { + let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( + node_id.raw(), + std::cmp::min(*custody_subnet_count, spec.data_column_sidecar_subnet_count), + spec, + ) + .collect::>(); + self.set_custody_subnets(custody_subnets); + } + } + + self.meta_data = Some(meta_data); } /// Sets the connection status of the peer. @@ -362,6 +391,10 @@ impl PeerInfo { self.connection_status = connection_status } + pub(super) fn set_custody_subnets(&mut self, custody_subnets: HashSet) { + self.custody_subnets = custody_subnets + } + /// Sets the ENR of the peer if one is known. pub(super) fn set_enr(&mut self, enr: Enr) { self.enr = Some(enr) diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index 42a31d3480a..4b9e8d50975 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -176,7 +176,7 @@ mod tests { use crate::rpc::protocol::*; use std::sync::Arc; - use types::{Epoch, ForkContext, ForkName, Hash256, Slot}; + use types::{Epoch, FixedBytesExtended, ForkContext, ForkName, Hash256, Slot}; use unsigned_varint::codec::Uvi; type Spec = types::MainnetEthSpec; diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 482d1d96b4a..8f5143d7ed9 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -16,10 +16,11 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - BlobSidecar, ChainSpec, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, ForkContext, ForkName, Hash256, + LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, + RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, + SignedBeaconBlockElectra, }; use unsigned_varint::codec::Uvi; @@ -70,6 +71,8 @@ impl Encoder> for SSZSnappyInboundCodec { RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(), + RPCResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), + RPCResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RPCResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), @@ -79,9 +82,10 @@ impl Encoder> for SSZSnappyInboundCodec { { match self.protocol.versioned_protocol { SupportedProtocol::MetaDataV1 => res.metadata_v1().as_ssz_bytes(), - // We always send V2 metadata responses from the behaviour - // No change required. SupportedProtocol::MetaDataV2 => res.metadata_v2().as_ssz_bytes(), + SupportedProtocol::MetaDataV3 => { + res.metadata_v3(&self.fork_context.spec).as_ssz_bytes() + } _ => unreachable!( "We only send metadata responses on negotiating metadata requests" ), @@ -133,6 +137,9 @@ impl Decoder for SSZSnappyInboundCodec { if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 { return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))); } + if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV3 { + return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v3()))); + } let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { return Ok(None); }; @@ -224,6 +231,8 @@ impl Encoder> for SSZSnappyOutboundCodec { }, OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), OutboundRequest::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), + OutboundRequest::DataColumnsByRange(req) => req.as_ssz_bytes(), + OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode }; @@ -417,6 +426,17 @@ fn context_bytes( RPCResponse::BlobsByRange(_) | RPCResponse::BlobsByRoot(_) => { return fork_context.to_context_bytes(ForkName::Deneb); } + RPCResponse::DataColumnsByRoot(d) | RPCResponse::DataColumnsByRange(d) => { + // TODO(das): Remove deneb fork after `peerdas-devnet-2`. + return if matches!( + fork_context.spec.fork_name_at_slot::(d.slot()), + ForkName::Deneb + ) { + fork_context.to_context_bytes(ForkName::Deneb) + } else { + fork_context.to_context_bytes(ForkName::Electra) + }; + } RPCResponse::LightClientBootstrap(lc_bootstrap) => { return lc_bootstrap .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); @@ -512,6 +532,17 @@ fn handle_rpc_request( )?, }))) } + SupportedProtocol::DataColumnsByRangeV1 => Ok(Some(InboundRequest::DataColumnsByRange( + DataColumnsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), + SupportedProtocol::DataColumnsByRootV1 => Ok(Some(InboundRequest::DataColumnsByRoot( + DataColumnsByRootRequest { + data_column_ids: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_data_column_sidecars as usize, + )?, + }, + ))), SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -528,6 +559,15 @@ fn handle_rpc_request( } // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. + SupportedProtocol::MetaDataV3 => { + if !decoded_buffer.is_empty() { + Err(RPCError::InternalError( + "Metadata requests shouldn't reach decoder", + )) + } else { + Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v3()))) + } + } SupportedProtocol::MetaDataV2 => { if !decoded_buffer.is_empty() { Err(RPCError::InternalError( @@ -604,6 +644,51 @@ fn handle_rpc_response( ), )), }, + SupportedProtocol::DataColumnsByRootV1 => match fork_name { + Some(fork_name) => { + // TODO(das): PeerDAS is currently supported for both deneb and electra. This check + // does not advertise the topic on deneb, simply allows it to decode it. Advertise + // logic is in `SupportedTopic::currently_supported`. + if fork_name.deneb_enabled() { + Ok(Some(RPCResponse::DataColumnsByRoot(Arc::new( + DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid fork name for data columns by root".to_string(), + )) + } + } + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, + SupportedProtocol::DataColumnsByRangeV1 => match fork_name { + Some(fork_name) => { + if fork_name.deneb_enabled() { + Ok(Some(RPCResponse::DataColumnsByRange(Arc::new( + DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid fork name for data columns by range".to_string(), + )) + } + } + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -646,7 +731,10 @@ fn handle_rpc_response( ), )), }, - // MetaData V2 responses have no context bytes, so behave similarly to V1 responses + // MetaData V2/V3 responses have no context bytes, so behave similarly to V1 responses + SupportedProtocol::MetaDataV3 => Ok(Some(RPCResponse::MetaData(MetaData::V3( + MetaDataV3::from_ssz_bytes(decoded_buffer)?, + )))), SupportedProtocol::MetaDataV2 => Ok(Some(RPCResponse::MetaData(MetaData::V2( MetaDataV2::from_ssz_bytes(decoded_buffer)?, )))), @@ -747,7 +835,8 @@ mod tests { use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use types::{ blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, - BeaconBlockBellatrix, EmptyBlock, Epoch, FullPayload, Signature, Slot, + BeaconBlockBellatrix, DataColumnIdentifier, EmptyBlock, Epoch, FixedBytesExtended, + FullPayload, Signature, Slot, }; type Spec = types::MainnetEthSpec; @@ -794,6 +883,10 @@ mod tests { Arc::new(BlobSidecar::empty()) } + fn empty_data_column_sidecar() -> Arc> { + Arc::new(DataColumnSidecar::empty()) + } + /// Bellatrix block with length < max_rpc_size. fn bellatrix_block_small( fork_context: &ForkContext, @@ -833,9 +926,9 @@ mod tests { fn status_message() -> StatusMessage { StatusMessage { fork_digest: [0; 4], - finalized_root: Hash256::from_low_u64_be(0), + finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), - head_root: Hash256::from_low_u64_be(0), + head_root: Hash256::zero(), head_slot: Slot::new(1), } } @@ -855,6 +948,27 @@ mod tests { } } + fn dcbrange_request() -> DataColumnsByRangeRequest { + DataColumnsByRangeRequest { + start_slot: 0, + count: 10, + columns: vec![1, 2, 3], + } + } + + fn dcbroot_request(spec: &ChainSpec) -> DataColumnsByRootRequest { + DataColumnsByRootRequest { + data_column_ids: RuntimeVariableList::new( + vec![DataColumnIdentifier { + block_root: Hash256::zero(), + index: 0, + }], + spec.max_request_data_column_sidecars as usize, + ) + .unwrap(), + } + } + fn bbroot_request_v1(spec: &ChainSpec) -> BlocksByRootRequest { BlocksByRootRequest::new_v1(vec![Hash256::zero()], spec) } @@ -892,6 +1006,15 @@ mod tests { }) } + fn metadata_v3() -> MetaData { + MetaData::V3(MetaDataV3 { + seq_number: 1, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + custody_subnet_count: 1, + }) + } + /// Encodes the given protocol response as bytes. fn encode_response( protocol: SupportedProtocol, @@ -1012,6 +1135,12 @@ mod tests { OutboundRequest::BlobsByRoot(bbroot) => { assert_eq!(decoded, InboundRequest::BlobsByRoot(bbroot)) } + OutboundRequest::DataColumnsByRoot(dcbroot) => { + assert_eq!(decoded, InboundRequest::DataColumnsByRoot(dcbroot)) + } + OutboundRequest::DataColumnsByRange(dcbrange) => { + assert_eq!(decoded, InboundRequest::DataColumnsByRange(dcbrange)) + } OutboundRequest::Ping(ping) => { assert_eq!(decoded, InboundRequest::Ping(ping)) } @@ -1119,6 +1248,17 @@ mod tests { Ok(Some(RPCResponse::MetaData(metadata()))), ); + // A MetaDataV3 still encodes as a MetaDataV2 since version is Version::V2 + assert_eq!( + encode_then_decode_response( + SupportedProtocol::MetaDataV2, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v3())), + ForkName::Base, + &chain_spec, + ), + Ok(Some(RPCResponse::MetaData(metadata_v2()))), + ); + assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRangeV1, @@ -1138,6 +1278,34 @@ mod tests { ), Ok(Some(RPCResponse::BlobsByRoot(empty_blob_sidecar()))), ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRangeV1, + RPCCodedResponse::Success(RPCResponse::DataColumnsByRange( + empty_data_column_sidecar() + )), + ForkName::Deneb, + &chain_spec + ), + Ok(Some(RPCResponse::DataColumnsByRange( + empty_data_column_sidecar() + ))), + ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRootV1, + RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot( + empty_data_column_sidecar() + )), + ForkName::Deneb, + &chain_spec + ), + Ok(Some(RPCResponse::DataColumnsByRoot( + empty_data_column_sidecar() + ))), + ); } // Test RPCResponse encoding/decoding for V1 messages @@ -1491,6 +1659,8 @@ mod tests { OutboundRequest::MetaData(MetadataRequest::new_v1()), OutboundRequest::BlobsByRange(blbrange_request()), OutboundRequest::BlobsByRoot(blbroot_request(&chain_spec)), + OutboundRequest::DataColumnsByRange(dcbrange_request()), + OutboundRequest::DataColumnsByRoot(dcbroot_request(&chain_spec)), OutboundRequest::MetaData(MetadataRequest::new_v2()), ]; @@ -1517,9 +1687,9 @@ mod tests { // Status message is 84 bytes uncompressed. `max_compressed_len` is 32 + 84 + 84/6 = 130. let status_message_bytes = StatusMessage { fork_digest: [0; 4], - finalized_root: Hash256::from_low_u64_be(0), + finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), - head_root: Hash256::from_low_u64_be(0), + head_root: Hash256::zero(), head_slot: Slot::new(1), } .as_ssz_bytes(); @@ -1640,9 +1810,9 @@ mod tests { // Status message is 84 bytes uncompressed. `max_compressed_len` is 32 + 84 + 84/6 = 130. let status_message_bytes = StatusMessage { fork_digest: [0; 4], - finalized_root: Hash256::from_low_u64_be(0), + finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), - head_root: Hash256::from_low_u64_be(0), + head_root: Hash256::zero(), head_slot: Slot::new(1), } .as_ssz_bytes(); diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index d17fa112a1b..fcb9c986048 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -91,6 +91,8 @@ pub struct RateLimiterConfig { pub(super) blocks_by_root_quota: Quota, pub(super) blobs_by_range_quota: Quota, pub(super) blobs_by_root_quota: Quota, + pub(super) data_columns_by_root_quota: Quota, + pub(super) data_columns_by_range_quota: Quota, pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, @@ -110,6 +112,12 @@ impl RateLimiterConfig { // measured against the maximum request size. pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(6144, 10); pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(768, 10); + // 320 blocks worth of columns for regular node, or 40 blocks for supernode. + // Range sync load balances when requesting blocks, and each batch is 32 blocks. + pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10); + // 512 columns per request from spec. This should be plenty as peers are unlikely to send all + // sampling requests to a single peer. + pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); @@ -126,6 +134,8 @@ impl Default for RateLimiterConfig { blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA, blobs_by_root_quota: Self::DEFAULT_BLOBS_BY_ROOT_QUOTA, + data_columns_by_root_quota: Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA, + data_columns_by_range_quota: Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA, light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, @@ -155,6 +165,14 @@ impl Debug for RateLimiterConfig { .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) .field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota)) .field("blobs_by_root", fmt_q!(&self.blobs_by_root_quota)) + .field( + "data_columns_by_range", + fmt_q!(&self.data_columns_by_range_quota), + ) + .field( + "data_columns_by_root", + fmt_q!(&self.data_columns_by_root_quota), + ) .finish() } } @@ -175,6 +193,8 @@ impl FromStr for RateLimiterConfig { let mut blocks_by_root_quota = None; let mut blobs_by_range_quota = None; let mut blobs_by_root_quota = None; + let mut data_columns_by_root_quota = None; + let mut data_columns_by_range_quota = None; let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; @@ -189,6 +209,12 @@ impl FromStr for RateLimiterConfig { Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota), Protocol::BlobsByRoot => blobs_by_root_quota = blobs_by_root_quota.or(quota), + Protocol::DataColumnsByRoot => { + data_columns_by_root_quota = data_columns_by_root_quota.or(quota) + } + Protocol::DataColumnsByRange => { + data_columns_by_range_quota = data_columns_by_range_quota.or(quota) + } Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::LightClientBootstrap => { @@ -216,6 +242,10 @@ impl FromStr for RateLimiterConfig { blobs_by_range_quota: blobs_by_range_quota .unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA), blobs_by_root_quota: blobs_by_root_quota.unwrap_or(Self::DEFAULT_BLOBS_BY_ROOT_QUOTA), + data_columns_by_root_quota: data_columns_by_root_quota + .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA), + data_columns_by_range_quota: data_columns_by_range_quota + .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA), light_client_bootstrap_quota: light_client_bootstrap_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), light_client_optimistic_update_quota: light_client_optimistic_update_quota diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 1b0486ff771..a96b9d1b166 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -6,6 +6,7 @@ use serde::Serialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::U256, VariableList}; +use std::collections::BTreeMap; use std::fmt::Display; use std::marker::PhantomData; use std::ops::Deref; @@ -14,9 +15,9 @@ use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; use types::{ - blob_sidecar::BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, - Slot, + blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, + Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, + LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; /// Maximum length of error message. @@ -88,7 +89,7 @@ pub struct Ping { /// The METADATA request structure. #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes(derive(Clone, Debug, PartialEq, Serialize),) )] #[derive(Clone, Debug, PartialEq)] @@ -108,11 +109,17 @@ impl MetadataRequest { _phantom_data: PhantomData, }) } + + pub fn new_v3() -> Self { + Self::V3(MetadataRequestV3 { + _phantom_data: PhantomData, + }) + } } /// The METADATA response structure. #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes( derive(Encode, Decode, Clone, Debug, PartialEq, Serialize), serde(bound = "E: EthSpec", deny_unknown_fields), @@ -126,8 +133,10 @@ pub struct MetaData { /// The persistent attestation subnet bitfield. pub attnets: EnrAttestationBitfield, /// The persistent sync committee bitfield. - #[superstruct(only(V2))] + #[superstruct(only(V2, V3))] pub syncnets: EnrSyncCommitteeBitfield, + #[superstruct(only(V3))] + pub custody_subnet_count: u64, } impl MetaData { @@ -139,6 +148,10 @@ impl MetaData { seq_number: metadata.seq_number, attnets: metadata.attnets.clone(), }), + MetaData::V3(metadata) => MetaData::V1(MetaDataV1 { + seq_number: metadata.seq_number, + attnets: metadata.attnets.clone(), + }), } } @@ -151,6 +164,30 @@ impl MetaData { syncnets: Default::default(), }), md @ MetaData::V2(_) => md.clone(), + MetaData::V3(metadata) => MetaData::V2(MetaDataV2 { + seq_number: metadata.seq_number, + attnets: metadata.attnets.clone(), + syncnets: metadata.syncnets.clone(), + }), + } + } + + /// Returns a V3 MetaData response from self by filling unavailable fields with default. + pub fn metadata_v3(&self, spec: &ChainSpec) -> Self { + match self { + MetaData::V1(metadata) => MetaData::V3(MetaDataV3 { + seq_number: metadata.seq_number, + attnets: metadata.attnets.clone(), + syncnets: Default::default(), + custody_subnet_count: spec.custody_requirement, + }), + MetaData::V2(metadata) => MetaData::V3(MetaDataV3 { + seq_number: metadata.seq_number, + attnets: metadata.attnets.clone(), + syncnets: metadata.syncnets.clone(), + custody_subnet_count: spec.custody_requirement, + }), + md @ MetaData::V3(_) => md.clone(), } } @@ -158,6 +195,7 @@ impl MetaData { match self { MetaData::V1(md) => md.as_ssz_bytes(), MetaData::V2(md) => md.as_ssz_bytes(), + MetaData::V3(md) => md.as_ssz_bytes(), } } } @@ -293,6 +331,43 @@ impl BlobsByRangeRequest { } } +/// Request a number of beacon data columns from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct DataColumnsByRangeRequest { + /// The starting slot to request data columns. + pub start_slot: u64, + /// The number of slots from the start slot. + pub count: u64, + /// The list column indices being requested. + pub columns: Vec, +} + +impl DataColumnsByRangeRequest { + pub fn max_requested(&self) -> u64 { + self.count.saturating_mul(self.columns.len() as u64) + } + + pub fn ssz_min_len() -> usize { + DataColumnsByRangeRequest { + start_slot: 0, + count: 0, + columns: vec![0], + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len(spec: &ChainSpec) -> usize { + DataColumnsByRangeRequest { + start_slot: 0, + count: 0, + columns: vec![0; spec.number_of_columns], + } + .as_ssz_bytes() + .len() + } +} + /// Request a number of beacon block roots from a peer. #[superstruct( variants(V1, V2), @@ -370,6 +445,38 @@ impl BlobsByRootRequest { } } +/// Request a number of data columns from a peer. +#[derive(Clone, Debug, PartialEq)] +pub struct DataColumnsByRootRequest { + /// The list of beacon block roots and column indices being requested. + pub data_column_ids: RuntimeVariableList, +} + +impl DataColumnsByRootRequest { + pub fn new(data_column_ids: Vec, spec: &ChainSpec) -> Self { + let data_column_ids = RuntimeVariableList::from_vec( + data_column_ids, + spec.max_request_data_column_sidecars as usize, + ); + Self { data_column_ids } + } + + pub fn new_single(block_root: Hash256, index: ColumnIndex, spec: &ChainSpec) -> Self { + Self::new(vec![DataColumnIdentifier { block_root, index }], spec) + } + + pub fn group_by_ordered_block_root(&self) -> Vec<(Hash256, Vec)> { + let mut column_indexes_by_block = BTreeMap::>::new(); + for request_id in self.data_column_ids.as_slice() { + column_indexes_by_block + .entry(request_id.block_root) + .or_default() + .push(request_id.index); + } + column_indexes_by_block.into_iter().collect() + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -400,6 +507,12 @@ pub enum RPCResponse { /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Arc>), + /// A response to a get DATA_COLUMN_SIDECARS_BY_ROOT request. + DataColumnsByRoot(Arc>), + + /// A response to a get DATA_COLUMN_SIDECARS_BY_RANGE request. + DataColumnsByRange(Arc>), + /// A PONG response to a PING request. Pong(Ping), @@ -421,6 +534,12 @@ pub enum ResponseTermination { /// Blobs by root stream termination. BlobsByRoot, + + /// Data column sidecars by root stream termination. + DataColumnsByRoot, + + /// Data column sidecars by range stream termination. + DataColumnsByRange, } /// The structured response containing a result/code indicating success or failure @@ -511,6 +630,8 @@ impl RPCResponse { RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, + RPCResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, + RPCResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -556,6 +677,16 @@ impl std::fmt::Display for RPCResponse { RPCResponse::BlobsByRoot(sidecar) => { write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot()) } + RPCResponse::DataColumnsByRoot(sidecar) => { + write!(f, "DataColumnsByRoot: Data column slot: {}", sidecar.slot()) + } + RPCResponse::DataColumnsByRange(sidecar) => { + write!( + f, + "DataColumnsByRange: Data column slot: {}", + sidecar.slot() + ) + } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), RPCResponse::LightClientBootstrap(bootstrap) => { @@ -648,6 +779,16 @@ impl std::fmt::Display for BlobsByRangeRequest { } } +impl std::fmt::Display for DataColumnsByRootRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: DataColumnsByRoot: Number of Requested Data Column Ids: {}", + self.data_column_ids.len() + ) + } +} + impl slog::KV for StatusMessage { fn serialize( &self, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 027af89edfa..cd591554a36 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -6,6 +6,7 @@ use futures::future::FutureExt; use handler::RPCHandler; +use libp2p::core::transport::PortUse; use libp2p::swarm::{ handler::ConnectionHandler, CloseConnection, ConnectionId, NetworkBehaviour, NotifyHandler, ToSwarm, @@ -21,7 +22,9 @@ use std::time::Duration; use types::{EthSpec, ForkContext}; pub(crate) use handler::{HandlerErr, HandlerEvent}; -pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; +pub(crate) use methods::{ + MetaData, MetaDataV1, MetaDataV2, MetaDataV3, Ping, RPCCodedResponse, RPCResponse, +}; pub(crate) use protocol::InboundRequest; pub use handler::SubstreamId; @@ -257,6 +260,7 @@ where peer_id: PeerId, _addr: &libp2p::Multiaddr, _role_override: libp2p::core::Endpoint, + _port_use: PortUse, ) -> Result, libp2p::swarm::ConnectionDenied> { let protocol = SubstreamProtocol::new( RPCProtocol { @@ -366,8 +370,10 @@ where protocol, Protocol::BlocksByRange | Protocol::BlobsByRange + | Protocol::DataColumnsByRange | Protocol::BlocksByRoot | Protocol::BlobsByRoot + | Protocol::DataColumnsByRoot ) { debug!(self.log, "Request too large to process"; "request" => %req, "protocol" => %protocol); } else { @@ -471,6 +477,8 @@ where ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, ResponseTermination::BlobsByRange => Protocol::BlobsByRange, ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, + ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, + ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, }, ), }; diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 8ea7b84bc95..c67c7865ea3 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -36,6 +36,8 @@ pub enum OutboundRequest { BlocksByRoot(BlocksByRootRequest), BlobsByRange(BlobsByRangeRequest), BlobsByRoot(BlobsByRootRequest), + DataColumnsByRoot(DataColumnsByRootRequest), + DataColumnsByRange(DataColumnsByRangeRequest), Ping(Ping), MetaData(MetadataRequest), } @@ -79,11 +81,20 @@ impl OutboundRequest { SupportedProtocol::BlobsByRootV1, Encoding::SSZSnappy, )], + OutboundRequest::DataColumnsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRootV1, + Encoding::SSZSnappy, + )], + OutboundRequest::DataColumnsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRangeV1, + Encoding::SSZSnappy, + )], OutboundRequest::Ping(_) => vec![ProtocolId::new( SupportedProtocol::PingV1, Encoding::SSZSnappy, )], OutboundRequest::MetaData(_) => vec![ + ProtocolId::new(SupportedProtocol::MetaDataV3, Encoding::SSZSnappy), ProtocolId::new(SupportedProtocol::MetaDataV2, Encoding::SSZSnappy), ProtocolId::new(SupportedProtocol::MetaDataV1, Encoding::SSZSnappy), ], @@ -100,6 +111,8 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, OutboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, + OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, + OutboundRequest::DataColumnsByRange(req) => req.max_requested::(), OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, } @@ -113,6 +126,8 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(_) => false, OutboundRequest::BlobsByRange(_) => false, OutboundRequest::BlobsByRoot(_) => false, + OutboundRequest::DataColumnsByRoot(_) => false, + OutboundRequest::DataColumnsByRange(_) => false, OutboundRequest::Ping(_) => true, OutboundRequest::MetaData(_) => true, } @@ -133,10 +148,13 @@ impl OutboundRequest { }, OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, + OutboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, + OutboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, OutboundRequest::Ping(_) => SupportedProtocol::PingV1, OutboundRequest::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2, + MetadataRequest::V3(_) => SupportedProtocol::MetaDataV3, }, } } @@ -151,6 +169,8 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, + OutboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, + OutboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -208,6 +228,10 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), + OutboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), + OutboundRequest::DataColumnsByRange(req) => { + write!(f, "Data columns by range: {:?}", req) + } OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 2cdd730a2b0..f4bdf6450b8 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -18,10 +18,10 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockElectra, - BlobSidecar, ChainSpec, EmptyBlock, EthSpec, ForkContext, ForkName, LightClientBootstrap, - LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, - SignedBeaconBlock, + BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, + LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, + LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, + LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -268,6 +268,12 @@ pub enum Protocol { /// The `BlobsByRoot` protocol name. #[strum(serialize = "blob_sidecars_by_root")] BlobsByRoot, + /// The `DataColumnSidecarsByRoot` protocol name. + #[strum(serialize = "data_column_sidecars_by_root")] + DataColumnsByRoot, + /// The `DataColumnSidecarsByRange` protocol name. + #[strum(serialize = "data_column_sidecars_by_range")] + DataColumnsByRange, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -293,6 +299,8 @@ impl Protocol { Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot), Protocol::BlobsByRange => Some(ResponseTermination::BlobsByRange), Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot), + Protocol::DataColumnsByRoot => Some(ResponseTermination::DataColumnsByRoot), + Protocol::DataColumnsByRange => Some(ResponseTermination::DataColumnsByRange), Protocol::Ping => None, Protocol::MetaData => None, Protocol::LightClientBootstrap => None, @@ -319,9 +327,12 @@ pub enum SupportedProtocol { BlocksByRootV2, BlobsByRangeV1, BlobsByRootV1, + DataColumnsByRootV1, + DataColumnsByRangeV1, PingV1, MetaDataV1, MetaDataV2, + MetaDataV3, LightClientBootstrapV1, LightClientOptimisticUpdateV1, LightClientFinalityUpdateV1, @@ -338,9 +349,12 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRootV2 => "2", SupportedProtocol::BlobsByRangeV1 => "1", SupportedProtocol::BlobsByRootV1 => "1", + SupportedProtocol::DataColumnsByRootV1 => "1", + SupportedProtocol::DataColumnsByRangeV1 => "1", SupportedProtocol::PingV1 => "1", SupportedProtocol::MetaDataV1 => "1", SupportedProtocol::MetaDataV2 => "2", + SupportedProtocol::MetaDataV3 => "3", SupportedProtocol::LightClientBootstrapV1 => "1", SupportedProtocol::LightClientOptimisticUpdateV1 => "1", SupportedProtocol::LightClientFinalityUpdateV1 => "1", @@ -357,9 +371,12 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRootV2 => Protocol::BlocksByRoot, SupportedProtocol::BlobsByRangeV1 => Protocol::BlobsByRange, SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot, + SupportedProtocol::DataColumnsByRootV1 => Protocol::DataColumnsByRoot, + SupportedProtocol::DataColumnsByRangeV1 => Protocol::DataColumnsByRange, SupportedProtocol::PingV1 => Protocol::Ping, SupportedProtocol::MetaDataV1 => Protocol::MetaData, SupportedProtocol::MetaDataV2 => Protocol::MetaData, + SupportedProtocol::MetaDataV3 => Protocol::MetaData, SupportedProtocol::LightClientBootstrapV1 => Protocol::LightClientBootstrap, SupportedProtocol::LightClientOptimisticUpdateV1 => { Protocol::LightClientOptimisticUpdate @@ -378,15 +395,32 @@ impl SupportedProtocol { ProtocolId::new(Self::BlocksByRootV2, Encoding::SSZSnappy), ProtocolId::new(Self::BlocksByRootV1, Encoding::SSZSnappy), ProtocolId::new(Self::PingV1, Encoding::SSZSnappy), - ProtocolId::new(Self::MetaDataV2, Encoding::SSZSnappy), - ProtocolId::new(Self::MetaDataV1, Encoding::SSZSnappy), ]; + if fork_context.spec.is_peer_das_scheduled() { + supported.extend_from_slice(&[ + // V3 variants have higher preference for protocol negotation + ProtocolId::new(Self::MetaDataV3, Encoding::SSZSnappy), + ProtocolId::new(Self::MetaDataV2, Encoding::SSZSnappy), + ProtocolId::new(Self::MetaDataV1, Encoding::SSZSnappy), + ]); + } else { + supported.extend_from_slice(&[ + ProtocolId::new(Self::MetaDataV2, Encoding::SSZSnappy), + ProtocolId::new(Self::MetaDataV1, Encoding::SSZSnappy), + ]); + } if fork_context.fork_exists(ForkName::Deneb) { supported.extend_from_slice(&[ ProtocolId::new(SupportedProtocol::BlobsByRootV1, Encoding::SSZSnappy), ProtocolId::new(SupportedProtocol::BlobsByRangeV1, Encoding::SSZSnappy), ]); } + if fork_context.spec.is_peer_das_scheduled() { + supported.extend_from_slice(&[ + ProtocolId::new(SupportedProtocol::DataColumnsByRootV1, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy), + ]); + } supported } } @@ -495,6 +529,11 @@ impl ProtocolId { ::ssz_fixed_len(), ), Protocol::BlobsByRoot => RpcLimits::new(0, spec.max_blobs_by_root_request), + Protocol::DataColumnsByRoot => RpcLimits::new(0, spec.max_data_columns_by_root_request), + Protocol::DataColumnsByRange => RpcLimits::new( + DataColumnsByRangeRequest::ssz_min_len(), + DataColumnsByRangeRequest::ssz_max_len(spec), + ), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -521,13 +560,15 @@ impl ProtocolId { Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), + Protocol::DataColumnsByRoot => rpc_data_column_limits::(), + Protocol::DataColumnsByRange => rpc_data_column_limits::(), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), ), Protocol::MetaData => RpcLimits::new( as Encode>::ssz_fixed_len(), - as Encode>::ssz_fixed_len(), + as Encode>::ssz_fixed_len(), ), Protocol::LightClientBootstrap => { rpc_light_client_bootstrap_limits_by_fork(fork_context.current_fork()) @@ -549,6 +590,8 @@ impl ProtocolId { | SupportedProtocol::BlocksByRootV2 | SupportedProtocol::BlobsByRangeV1 | SupportedProtocol::BlobsByRootV1 + | SupportedProtocol::DataColumnsByRootV1 + | SupportedProtocol::DataColumnsByRangeV1 | SupportedProtocol::LightClientBootstrapV1 | SupportedProtocol::LightClientOptimisticUpdateV1 | SupportedProtocol::LightClientFinalityUpdateV1 => true, @@ -558,6 +601,7 @@ impl ProtocolId { | SupportedProtocol::PingV1 | SupportedProtocol::MetaDataV1 | SupportedProtocol::MetaDataV2 + | SupportedProtocol::MetaDataV3 | SupportedProtocol::GoodbyeV1 => false, } } @@ -589,6 +633,13 @@ pub fn rpc_blob_limits() -> RpcLimits { ) } +pub fn rpc_data_column_limits() -> RpcLimits { + RpcLimits::new( + DataColumnSidecar::::empty().as_ssz_bytes().len(), + DataColumnSidecar::::max_size(), + ) +} + /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol @@ -635,6 +686,9 @@ where SupportedProtocol::MetaDataV2 => { Ok((InboundRequest::MetaData(MetadataRequest::new_v2()), socket)) } + SupportedProtocol::MetaDataV3 => { + Ok((InboundRequest::MetaData(MetadataRequest::new_v3()), socket)) + } SupportedProtocol::LightClientOptimisticUpdateV1 => { Ok((InboundRequest::LightClientOptimisticUpdate, socket)) } @@ -668,6 +722,8 @@ pub enum InboundRequest { BlocksByRoot(BlocksByRootRequest), BlobsByRange(BlobsByRangeRequest), BlobsByRoot(BlobsByRootRequest), + DataColumnsByRoot(DataColumnsByRootRequest), + DataColumnsByRange(DataColumnsByRangeRequest), LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, @@ -688,6 +744,8 @@ impl InboundRequest { InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, InboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, + InboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, + InboundRequest::DataColumnsByRange(req) => req.max_requested::(), InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, InboundRequest::LightClientBootstrap(_) => 1, @@ -711,10 +769,13 @@ impl InboundRequest { }, InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, + InboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, + InboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, InboundRequest::Ping(_) => SupportedProtocol::PingV1, InboundRequest::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2, + MetadataRequest::V3(_) => SupportedProtocol::MetaDataV3, }, InboundRequest::LightClientBootstrap(_) => SupportedProtocol::LightClientBootstrapV1, InboundRequest::LightClientOptimisticUpdate => { @@ -736,6 +797,8 @@ impl InboundRequest { InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, InboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, + InboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, + InboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, InboundRequest::Status(_) => unreachable!(), InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), @@ -846,6 +909,10 @@ impl std::fmt::Display for InboundRequest { InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), InboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), + InboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), + InboundRequest::DataColumnsByRange(req) => { + write!(f, "Data columns by range: {:?}", req) + } InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), InboundRequest::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index b304eb546da..523b891a009 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -97,6 +97,10 @@ pub struct RPCRateLimiter { blbrange_rl: Limiter, /// BlobsByRoot rate limiter. blbroot_rl: Limiter, + /// DataColumnssByRoot rate limiter. + dcbroot_rl: Limiter, + /// DataColumnsByRange rate limiter. + dcbrange_rl: Limiter, /// LightClientBootstrap rate limiter. lc_bootstrap_rl: Limiter, /// LightClientOptimisticUpdate rate limiter. @@ -133,6 +137,10 @@ pub struct RPCRateLimiterBuilder { blbrange_quota: Option, /// Quota for the BlobsByRoot protocol. blbroot_quota: Option, + /// Quota for the DataColumnsByRoot protocol. + dcbroot_quota: Option, + /// Quota for the DataColumnsByRange protocol. + dcbrange_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. @@ -154,6 +162,8 @@ impl RPCRateLimiterBuilder { Protocol::BlocksByRoot => self.bbroots_quota = q, Protocol::BlobsByRange => self.blbrange_quota = q, Protocol::BlobsByRoot => self.blbroot_quota = q, + Protocol::DataColumnsByRoot => self.dcbroot_quota = q, + Protocol::DataColumnsByRange => self.dcbrange_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, @@ -186,11 +196,18 @@ impl RPCRateLimiterBuilder { let blbrange_quota = self .blbrange_quota .ok_or("BlobsByRange quota not specified")?; - let blbroots_quota = self .blbroot_quota .ok_or("BlobsByRoot quota not specified")?; + let dcbroot_quota = self + .dcbroot_quota + .ok_or("DataColumnsByRoot quota not specified")?; + + let dcbrange_quota = self + .dcbrange_quota + .ok_or("DataColumnsByRange quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -200,6 +217,8 @@ impl RPCRateLimiterBuilder { let bbrange_rl = Limiter::from_quota(bbrange_quota)?; let blbrange_rl = Limiter::from_quota(blbrange_quota)?; let blbroot_rl = Limiter::from_quota(blbroots_quota)?; + let dcbroot_rl = Limiter::from_quota(dcbroot_quota)?; + let dcbrange_rl = Limiter::from_quota(dcbrange_quota)?; let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; @@ -218,6 +237,8 @@ impl RPCRateLimiterBuilder { bbrange_rl, blbrange_rl, blbroot_rl, + dcbroot_rl, + dcbrange_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -262,6 +283,8 @@ impl RPCRateLimiter { blocks_by_root_quota, blobs_by_range_quota, blobs_by_root_quota, + data_columns_by_root_quota, + data_columns_by_range_quota, light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, @@ -276,6 +299,8 @@ impl RPCRateLimiter { .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) .set_quota(Protocol::BlobsByRange, blobs_by_range_quota) .set_quota(Protocol::BlobsByRoot, blobs_by_root_quota) + .set_quota(Protocol::DataColumnsByRoot, data_columns_by_root_quota) + .set_quota(Protocol::DataColumnsByRange, data_columns_by_range_quota) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .set_quota( Protocol::LightClientOptimisticUpdate, @@ -312,6 +337,8 @@ impl RPCRateLimiter { Protocol::BlocksByRoot => &mut self.bbroots_rl, Protocol::BlobsByRange => &mut self.blbrange_rl, Protocol::BlobsByRoot => &mut self.blbroot_rl, + Protocol::DataColumnsByRoot => &mut self.dcbroot_rl, + Protocol::DataColumnsByRange => &mut self.dcbrange_rl, Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, @@ -329,6 +356,8 @@ impl RPCRateLimiter { self.bbroots_rl.prune(time_since_start); self.blbrange_rl.prune(time_since_start); self.blbroot_rl.prune(time_since_start); + self.dcbrange_rl.prune(time_since_start); + self.dcbroot_rl.prune(time_since_start); } } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 376ac34dee7..30400db3b66 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -2,11 +2,13 @@ use std::sync::Arc; use libp2p::swarm::ConnectionId; use types::{ - BlobSidecar, EthSpec, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, SignedBeaconBlock, + BlobSidecar, DataColumnSidecar, EthSpec, Hash256, LightClientBootstrap, + LightClientFinalityUpdate, LightClientOptimisticUpdate, SignedBeaconBlock, }; -use crate::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; +use crate::rpc::methods::{ + BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, +}; use crate::rpc::{ methods::{ BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, @@ -27,6 +29,11 @@ pub struct SingleLookupReqId { pub req_id: Id, } +/// Request ID for data_columns_by_root requests. Block lookup do not issue this requests directly. +/// Wrapping this particular req_id, ensures not mixing this requests with a custody req_id. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct DataColumnsByRootRequestId(pub Id); + /// Id of rpc requests sent by sync to the network. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum SyncRequestId { @@ -34,10 +41,44 @@ pub enum SyncRequestId { SingleBlock { id: SingleLookupReqId }, /// Request searching for a set of blobs given a hash. SingleBlob { id: SingleLookupReqId }, + /// Request searching for a set of data columns given a hash and list of column indices. + DataColumnsByRoot(DataColumnsByRootRequestId, DataColumnsByRootRequester), /// Range request that is composed by both a block range request and a blob range request. RangeBlockAndBlobs { id: Id }, } +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub enum DataColumnsByRootRequester { + Sampling(SamplingId), + Custody(CustodyId), +} + +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct SamplingId { + pub id: SamplingRequester, + pub sampling_request_id: SamplingRequestId, +} + +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub enum SamplingRequester { + ImportedBlock(Hash256), +} + +/// Identifier of sampling requests. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct SamplingRequestId(pub usize); + +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct CustodyId { + pub requester: CustodyRequester, + pub req_id: Id, +} + +/// Downstream components that perform custody by root requests. +/// Currently, it's only single block lookups, so not using an enum +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct CustodyRequester(pub SingleLookupReqId); + /// Application level requests sent to the network. #[derive(Debug, Clone, Copy)] pub enum AppRequestId { @@ -75,6 +116,10 @@ pub enum Request { LightClientFinalityUpdate, /// A request blobs root request. BlobsByRoot(BlobsByRootRequest), + /// A request data columns root request. + DataColumnsByRoot(DataColumnsByRootRequest), + /// A request data columns by range request. + DataColumnsByRange(DataColumnsByRangeRequest), } impl std::convert::From for OutboundRequest { @@ -104,6 +149,8 @@ impl std::convert::From for OutboundRequest { } Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r), + Request::DataColumnsByRoot(r) => OutboundRequest::DataColumnsByRoot(r), + Request::DataColumnsByRange(r) => OutboundRequest::DataColumnsByRange(r), Request::Status(s) => OutboundRequest::Status(s), } } @@ -123,10 +170,14 @@ pub enum Response { BlocksByRange(Option>>), /// A response to a get BLOBS_BY_RANGE request. A None response signals the end of the batch. BlobsByRange(Option>>), + /// A response to a get DATA_COLUMN_SIDECARS_BY_Range request. + DataColumnsByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Option>>), + /// A response to a get DATA_COLUMN_SIDECARS_BY_ROOT request. + DataColumnsByRoot(Option>>), /// A response to a LightClientUpdate request. LightClientBootstrap(Arc>), /// A response to a LightClientOptimisticUpdate request. @@ -154,6 +205,16 @@ impl std::convert::From> for RPCCodedResponse { Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), }, + Response::DataColumnsByRoot(r) => match r { + Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot(d)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRoot), + }, + Response::DataColumnsByRange(r) => match r { + Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRange(d)), + None => { + RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRange) + } + }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), Response::LightClientBootstrap(b) => { RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) @@ -183,3 +244,9 @@ impl slog::Value for RequestId { } } } + +impl std::fmt::Display for DataColumnsByRootRequestId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index c2a2a03fe87..a95912ff060 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -162,15 +162,27 @@ impl Network { &config, &ctx.enr_fork_id, &log, + ctx.chain_spec, )?; // Construct the metadata - let meta_data = utils::load_or_build_metadata(&config.network_dir, &log); + let custody_subnet_count = if ctx.chain_spec.is_peer_das_scheduled() { + if config.subscribe_all_data_column_subnets { + Some(ctx.chain_spec.data_column_sidecar_subnet_count) + } else { + Some(ctx.chain_spec.custody_requirement) + } + } else { + None + }; + let meta_data = + utils::load_or_build_metadata(&config.network_dir, custody_subnet_count, &log); let globals = NetworkGlobals::new( enr, meta_data, trusted_peers, config.disable_peer_scoring, &log, + ctx.chain_spec.clone(), ); Arc::new(globals) }; @@ -241,6 +253,7 @@ impl Network { let max_topics = ctx.chain_spec.attestation_subnet_count as usize + SYNC_COMMITTEE_SUBNET_COUNT as usize + ctx.chain_spec.blob_sidecar_subnet_count as usize + + ctx.chain_spec.data_column_sidecar_subnet_count as usize + BASE_CORE_TOPICS.len() + ALTAIR_CORE_TOPICS.len() + CAPELLA_CORE_TOPICS.len() @@ -254,10 +267,11 @@ impl Network { ctx.chain_spec.attestation_subnet_count, SYNC_COMMITTEE_SUBNET_COUNT, ctx.chain_spec.blob_sidecar_subnet_count, + ctx.chain_spec.data_column_sidecar_subnet_count, ), // during a fork we subscribe to both the old and new topics max_subscribed_topics: max_topics * 4, - // 162 in theory = (64 attestation + 4 sync committee + 7 core topics + 6 blob topics) * 2 + // 418 in theory = (64 attestation + 4 sync committee + 7 core topics + 6 blob topics + 128 column topics) * 2 max_subscriptions_per_request: max_topics * 2, }; @@ -1126,8 +1140,14 @@ impl Network { /// Sends a METADATA request to a peer. fn send_meta_data_request(&mut self, peer_id: PeerId) { - // We always prefer sending V2 requests - let event = OutboundRequest::MetaData(MetadataRequest::new_v2()); + let event = if self.fork_context.spec.is_peer_das_scheduled() { + // Nodes with higher custody will probably start advertising it + // before peerdas is activated + OutboundRequest::MetaData(MetadataRequest::new_v3()) + } else { + // We always prefer sending V2 requests otherwise + OutboundRequest::MetaData(MetadataRequest::new_v2()) + }; self.eth2_rpc_mut() .send_request(peer_id, RequestId::Internal, event); } @@ -1135,15 +1155,12 @@ impl Network { /// Sends a METADATA response to a peer. fn send_meta_data_response( &mut self, - req: MetadataRequest, + _req: MetadataRequest, id: PeerRequestId, peer_id: PeerId, ) { let metadata = self.network_globals.local_metadata.read().clone(); - let metadata = match req { - MetadataRequest::V1(_) => metadata.metadata_v1(), - MetadataRequest::V2(_) => metadata, - }; + // The encoder is responsible for sending the negotiated version of the metadata let event = RPCCodedResponse::Success(RPCResponse::MetaData(metadata)); self.eth2_rpc_mut().send_response(peer_id, id, event); } @@ -1203,6 +1220,12 @@ impl Network { Request::BlobsByRoot { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]) } + Request::DataColumnsByRoot { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_root"]) + } + Request::DataColumnsByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_range"]) + } } NetworkEvent::RequestReceived { peer_id, @@ -1522,6 +1545,22 @@ impl Network { self.build_request(peer_request_id, peer_id, Request::BlobsByRoot(req)); Some(event) } + InboundRequest::DataColumnsByRoot(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::DataColumnsByRoot(req), + ); + Some(event) + } + InboundRequest::DataColumnsByRange(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::DataColumnsByRange(req), + ); + Some(event) + } InboundRequest::LightClientBootstrap(req) => { let event = self.build_request( peer_request_id, @@ -1579,6 +1618,12 @@ impl Network { RPCResponse::BlobsByRoot(resp) => { self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) } + RPCResponse::DataColumnsByRoot(resp) => { + self.build_response(id, peer_id, Response::DataColumnsByRoot(Some(resp))) + } + RPCResponse::DataColumnsByRange(resp) => { + self.build_response(id, peer_id, Response::DataColumnsByRange(Some(resp))) + } // Should never be reached RPCResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) @@ -1601,6 +1646,8 @@ impl Network { ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), ResponseTermination::BlobsByRange => Response::BlobsByRange(None), ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), + ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), + ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), }; self.build_response(id, peer_id, response) } @@ -1614,7 +1661,11 @@ impl Network { /// Handle an identify event. fn inject_identify_event(&mut self, event: identify::Event) -> Option> { match event { - identify::Event::Received { peer_id, mut info } => { + identify::Event::Received { + peer_id, + mut info, + connection_id: _, + } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { debug!( self.log, diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 80187efc103..8b6a84ae0cb 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -1,4 +1,5 @@ use crate::multiaddr::Protocol; +use crate::rpc::methods::MetaDataV3; use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; use crate::types::{ error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, @@ -12,14 +13,15 @@ use libp2p::{core, noise, yamux, PeerId, Transport}; use prometheus_client::registry::Registry; use slog::{debug, warn}; use ssz::Decode; -use ssz::Encode; use std::collections::HashSet; use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::sync::Arc; use std::time::Duration; -use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId}; +use types::{ + ChainSpec, DataColumnSubnetId, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId, +}; pub const NETWORK_KEY_FILENAME: &str = "key"; /// The maximum simultaneous libp2p connections per peer. @@ -167,6 +169,7 @@ pub fn strip_peer_id(addr: &mut Multiaddr) { /// Load metadata from persisted file. Return default metadata if loading fails. pub fn load_or_build_metadata( network_dir: &std::path::Path, + custody_subnet_count: Option, log: &slog::Logger, ) -> MetaData { // We load a V2 metadata version by default (regardless of current fork) @@ -217,7 +220,16 @@ pub fn load_or_build_metadata( }; // Wrap the MetaData - let meta_data = MetaData::V2(meta_data); + let meta_data = if let Some(custody_count) = custody_subnet_count { + MetaData::V3(MetaDataV3 { + attnets: meta_data.attnets, + seq_number: meta_data.seq_number, + syncnets: meta_data.syncnets, + custody_subnet_count: custody_count, + }) + } else { + MetaData::V2(meta_data) + }; debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number()); save_metadata_to_disk(network_dir, meta_data.clone(), log); @@ -231,6 +243,7 @@ pub(crate) fn create_whitelist_filter( attestation_subnet_count: u64, sync_committee_subnet_count: u64, blob_sidecar_subnet_count: u64, + data_column_sidecar_subnet_count: u64, ) -> gossipsub::WhitelistSubscriptionFilter { let mut possible_hashes = HashSet::new(); for fork_digest in possible_fork_digests { @@ -259,6 +272,9 @@ pub(crate) fn create_whitelist_filter( for id in 0..blob_sidecar_subnet_count { add(BlobSidecar(id)); } + for id in 0..data_column_sidecar_subnet_count { + add(DataColumnSidecar(DataColumnSubnetId::new(id))); + } } gossipsub::WhitelistSubscriptionFilter(possible_hashes) } @@ -270,10 +286,11 @@ pub(crate) fn save_metadata_to_disk( log: &slog::Logger, ) { let _ = std::fs::create_dir_all(dir); - let metadata_bytes = match metadata { - MetaData::V1(md) => md.as_ssz_bytes(), - MetaData::V2(md) => md.as_ssz_bytes(), - }; + // We always store the metadata v2 to disk because + // custody_subnet_count parameter doesn't need to be persisted across runs. + // custody_subnet_count is what the user sets it for the current run. + // This is to prevent ugly branching logic when reading the metadata from disk. + let metadata_bytes = metadata.metadata_v2().as_ssz_bytes(); match File::create(dir.join(METADATA_FILENAME)).and_then(|mut f| f.write_all(&metadata_bytes)) { Ok(_) => { debug!(log, "Metadata written to disk"); diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index f9ed2c9f740..c76e0a18577 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -1,13 +1,13 @@ //! A collection of variables that are accessible outside of the network thread itself. use crate::peer_manager::peerdb::PeerDB; -use crate::rpc::{MetaData, MetaDataV2}; +use crate::rpc::{MetaData, MetaDataV3}; use crate::types::{BackFillState, SyncState}; -use crate::Client; use crate::EnrExt; +use crate::{Client, Eth2Enr}; use crate::{Enr, GossipTopic, Multiaddr, PeerId}; use parking_lot::RwLock; use std::collections::HashSet; -use types::EthSpec; +use types::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec}; pub struct NetworkGlobals { /// The current local ENR. @@ -26,6 +26,7 @@ pub struct NetworkGlobals { pub sync_state: RwLock, /// The current state of the backfill sync. pub backfill_state: RwLock, + pub spec: ChainSpec, } impl NetworkGlobals { @@ -35,16 +36,23 @@ impl NetworkGlobals { trusted_peers: Vec, disable_peer_scoring: bool, log: &slog::Logger, + spec: ChainSpec, ) -> Self { NetworkGlobals { local_enr: RwLock::new(enr.clone()), peer_id: RwLock::new(enr.peer_id()), listen_multiaddrs: RwLock::new(Vec::new()), local_metadata: RwLock::new(local_metadata), - peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring, log)), + peers: RwLock::new(PeerDB::new( + trusted_peers, + disable_peer_scoring, + log, + spec.clone(), + )), gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::NotRequired), + spec, } } @@ -110,22 +118,87 @@ impl NetworkGlobals { std::mem::replace(&mut *self.sync_state.write(), new_state) } + /// Compute custody data columns the node is assigned to custody. + pub fn custody_columns(&self) -> Vec { + let enr = self.local_enr(); + let custody_subnet_count = enr.custody_subnet_count::(&self.spec); + DataColumnSubnetId::compute_custody_columns::( + enr.node_id().raw(), + custody_subnet_count, + &self.spec, + ) + .collect() + } + + /// Compute custody data column subnets the node is assigned to custody. + pub fn custody_subnets(&self) -> impl Iterator { + let enr = self.local_enr(); + let custody_subnet_count = enr.custody_subnet_count::(&self.spec); + DataColumnSubnetId::compute_custody_subnets::( + enr.node_id().raw(), + custody_subnet_count, + &self.spec, + ) + } + + /// Returns a connected peer that: + /// 1. is connected + /// 2. assigned to custody the column based on it's `custody_subnet_count` from ENR or metadata + /// 3. has a good score + pub fn custody_peers_for_column(&self, column_index: ColumnIndex) -> Vec { + self.peers + .read() + .good_custody_subnet_peer(DataColumnSubnetId::from_column_index::( + column_index as usize, + &self.spec, + )) + .cloned() + .collect::>() + } + /// TESTING ONLY. Build a dummy NetworkGlobals instance. - pub fn new_test_globals(trusted_peers: Vec, log: &slog::Logger) -> NetworkGlobals { + pub fn new_test_globals( + trusted_peers: Vec, + log: &slog::Logger, + spec: ChainSpec, + ) -> NetworkGlobals { use crate::CombinedKeyExt; let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair); let enr = discv5::enr::Enr::builder().build(&enr_key).unwrap(); NetworkGlobals::new( enr, - MetaData::V2(MetaDataV2 { + MetaData::V3(MetaDataV3 { seq_number: 0, attnets: Default::default(), syncnets: Default::default(), + custody_subnet_count: spec.data_column_sidecar_subnet_count, }), trusted_peers, false, log, + spec, ) } } + +#[cfg(test)] +mod test { + use super::*; + use types::{EthSpec, MainnetEthSpec as E}; + + #[test] + fn test_custody_count_default() { + let spec = E::default_spec(); + let log = logging::test_logger(); + let default_custody_requirement_column_count = spec.number_of_columns as u64 + / spec.data_column_sidecar_subnet_count + * spec.custody_requirement; + let globals = NetworkGlobals::::new_test_globals(vec![], &log, spec.clone()); + let columns = globals.custody_columns(); + assert_eq!( + columns.len(), + default_custody_requirement_column_count as usize + ); + } +} diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 25431226ca6..660d786169f 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -9,7 +9,8 @@ use std::sync::Arc; use std::sync::Weak; use tokio::runtime::Runtime; use types::{ - ChainSpec, EnrForkId, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Slot, + ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, + MinimalEthSpec, Slot, }; type E = MinimalEthSpec; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 12a1c593930..25d249960d2 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -15,8 +15,8 @@ use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec, - EmptyBlock, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, - SignedBeaconBlock, Slot, + EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, MinimalEthSpec, + Signature, SignedBeaconBlock, Slot, }; type E = MinimalEthSpec; @@ -77,18 +77,18 @@ fn test_tcp_status_rpc() { // Dummy STATUS RPC message let rpc_request = Request::Status(StatusMessage { fork_digest: [0; 4], - finalized_root: Hash256::from_low_u64_be(0), + finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), - head_root: Hash256::from_low_u64_be(0), + head_root: Hash256::zero(), head_slot: Slot::new(1), }); // Dummy STATUS RPC message let rpc_response = Response::Status(StatusMessage { fork_digest: [0; 4], - finalized_root: Hash256::from_low_u64_be(0), + finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), - head_root: Hash256::from_low_u64_be(0), + head_root: Hash256::zero(), head_slot: Slot::new(1), }); @@ -756,12 +756,12 @@ fn test_tcp_blocks_by_root_chunked_rpc() { // BlocksByRoot Request let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( vec![ - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), ], &spec, )); @@ -894,16 +894,16 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { // BlocksByRoot Request let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( vec![ - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), ], &spec, )); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 511cbc3e3c7..192fdd644c3 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -14,6 +14,7 @@ eth2 = { workspace = true } gossipsub = { workspace = true } [dependencies] +alloy-primitives = { workspace = true } async-channel = { workspace = true } anyhow = { workspace = true } beacon_chain = { workspace = true } diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index bb1e5468705..9e42aa8e924 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -14,6 +14,9 @@ use std::sync::{Arc, LazyLock}; use strum::IntoEnumIterator; use types::EthSpec; +pub const SUCCESS: &str = "SUCCESS"; +pub const FAILURE: &str = "FAILURE"; + pub static BEACON_BLOCK_MESH_PEERS_PER_CLIENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( @@ -340,6 +343,13 @@ pub static PEERS_PER_SYNC_TYPE: LazyLock> = LazyLock::new(|| &["sync_status"], ) }); +pub static PEERS_PER_COLUMN_SUBNET: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "peers_per_column_subnet", + "Number of connected peers per column subnet", + &["subnet_id"], + ) +}); pub static SYNCING_CHAINS_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( "sync_range_chains", @@ -481,6 +491,29 @@ pub static BEACON_BLOB_DELAY_GOSSIP: LazyLock> = LazyLock::new( ) }); +pub static BEACON_DATA_COLUMN_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_data_column_gossip_propagation_verification_delay_time", + "Duration between when the data column sidecar is received over gossip and when it is verified for propagation.", + // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] + decimal_buckets(-3,-1) + ) +}); +pub static BEACON_DATA_COLUMN_GOSSIP_SLOT_START_DELAY_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_data_column_gossip_slot_start_delay_time", + "Duration between when the data column sidecar is received over gossip and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) + // NOTE: Previous values, which we may want to switch back to. + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + //decimal_buckets(-1,2) + ) + }); + pub static BEACON_BLOB_DELAY_GOSSIP_VERIFICATION: LazyLock> = LazyLock::new( || { try_create_int_gauge( @@ -520,22 +553,6 @@ pub static BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL: LazyLock> = }, ); -pub static BEACON_DATA_COLUMN_DELAY_GOSSIP: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "beacon_data_column_delay_gossip_last_delay", - "The first time we see this data column as a delay from the start of the slot", - ) -}); - -pub static BEACON_DATA_COLUMN_DELAY_GOSSIP_VERIFICATION: LazyLock> = LazyLock::new( - || { - try_create_int_gauge( - "beacon_data_column_delay_gossip_verification", - "Keeps track of the time delay from the start of the slot to the point we propagate the data column" - ) - }, -); - /* * Light client update reprocessing queue metrics. */ @@ -548,6 +565,31 @@ pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: LazyLock ) }); +/* + * Sampling + */ +pub static SAMPLE_DOWNLOAD_RESULT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_sampling_sample_verify_result_total", + "Total count of individual sample download results", + &["result"], + ) +}); +pub static SAMPLE_VERIFY_RESULT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_sampling_sample_verify_result_total", + "Total count of individual sample verify results", + &["result"], + ) +}); +pub static SAMPLING_REQUEST_RESULT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_sampling_request_result_total", + "Total count of sample request results", + &["result"], + ) +}); + pub fn register_finality_update_error(error: &LightClientFinalityUpdateError) { inc_counter_vec(&GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]); } @@ -564,6 +606,13 @@ pub fn register_sync_committee_error(error: &SyncCommitteeError) { inc_counter_vec(&GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE, &[error.as_ref()]); } +pub fn from_result(result: &std::result::Result) -> &str { + match result { + Ok(_) => SUCCESS, + Err(_) => FAILURE, + } +} + pub fn update_gossip_metrics( gossipsub: &Gossipsub, network_globals: &Arc>, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 4c5c34bfd83..62f1371c811 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,7 +4,6 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; -use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::store::Error; @@ -19,7 +18,13 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer, }; -use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; +use beacon_chain::{ + blob_verification::{GossipBlobError, GossipVerifiedBlob}, + data_availability_checker::DataColumnsToPublish, +}; +use lighthouse_network::{ + Client, MessageAcceptance, MessageId, PeerAction, PeerId, PubsubMessage, ReportSource, +}; use operation_pool::ReceivedPreCapella; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; @@ -166,6 +171,26 @@ impl NetworkBeaconProcessor { }) } + pub(crate) fn handle_data_columns_to_publish( + &self, + data_columns_to_publish: DataColumnsToPublish, + ) { + if let Some(data_columns_to_publish) = data_columns_to_publish { + self.send_network_message(NetworkMessage::Publish { + messages: data_columns_to_publish + .iter() + .map(|d| { + let subnet = DataColumnSubnetId::from_column_index::( + d.index as usize, + &self.chain.spec, + ); + PubsubMessage::DataColumnSidecar(Box::new((subnet, d.clone()))) + }) + .collect(), + }); + } + } + /// Send a message on `message_tx` that the `message_id` sent by `peer_id` should be propagated on /// the gossip network. /// @@ -615,9 +640,9 @@ impl NetworkBeaconProcessor { let index = column_sidecar.index; let delay = get_slot_delay_ms(seen_duration, slot, &self.chain.slot_clock); // Log metrics to track delay from other nodes on the network. - metrics::set_gauge( - &metrics::BEACON_DATA_COLUMN_DELAY_GOSSIP, - delay.as_millis() as i64, + metrics::observe_duration( + &metrics::BEACON_DATA_COLUMN_GOSSIP_SLOT_START_DELAY_TIME, + delay, ); match self .chain @@ -644,9 +669,9 @@ impl NetworkBeaconProcessor { .ok() .and_then(|now| now.checked_sub(seen_duration)) { - metrics::set_gauge( - &metrics::BEACON_DATA_COLUMN_DELAY_GOSSIP_VERIFICATION, - duration.as_millis() as i64, + metrics::observe_duration( + &metrics::BEACON_DATA_COLUMN_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME, + duration, ); } self.process_gossip_verified_data_column( @@ -755,7 +780,7 @@ impl NetworkBeaconProcessor { metrics::set_gauge(&metrics::BEACON_BLOB_DELAY_GOSSIP, delay.as_millis() as i64); match self .chain - .verify_blob_sidecar_for_gossip(blob_sidecar, blob_index) + .verify_blob_sidecar_for_gossip(blob_sidecar.clone(), blob_index) { Ok(gossip_verified_blob) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL); @@ -800,16 +825,19 @@ impl NetworkBeaconProcessor { } Err(err) => { match err { - GossipBlobError::BlobParentUnknown(blob) => { + GossipBlobError::BlobParentUnknown { parent_root } => { debug!( self.log, "Unknown parent hash for blob"; "action" => "requesting parent", - "block_root" => %blob.block_root(), - "parent_root" => %blob.block_parent_root(), + "block_root" => %root, + "parent_root" => %parent_root, "commitment" => %commitment, ); - self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob)); + self.send_sync_message(SyncMessage::UnknownParentBlob( + peer_id, + blob_sidecar, + )); } GossipBlobError::KzgNotInitialized | GossipBlobError::PubkeyCacheTimeout @@ -991,7 +1019,9 @@ impl NetworkBeaconProcessor { .process_gossip_data_columns(vec![verified_data_column]) .await { - Ok(availability) => { + Ok((availability, data_columns_to_publish)) => { + self.handle_data_columns_to_publish(data_columns_to_publish); + match availability { AvailabilityProcessingStatus::Imported(block_root) => { // Note: Reusing block imported metric here @@ -1197,7 +1227,7 @@ impl NetworkBeaconProcessor { ); return None; } - Err(BlockError::ParentUnknown(block)) => { + Err(BlockError::ParentUnknown { .. }) => { debug!( self.log, "Unknown parent for gossip block"; @@ -1304,6 +1334,16 @@ impl NetworkBeaconProcessor { ); return None; } + Err(e @ BlockError::BlobNotRequired(_)) => { + // TODO(das): penalty not implemented yet as other clients may still send us blobs + // during early stage of implementation. + debug!(self.log, "Received blobs for slot after PeerDAS epoch from peer"; + "error" => %e, + "peer_id" => %peer_id, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return None; + } }; metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL); @@ -1414,7 +1454,19 @@ impl NetworkBeaconProcessor { let block = verified_block.block.block_cloned(); let block_root = verified_block.block_root; - // TODO(block source) + // TODO(das) Might be too early to issue a request here. We haven't checked that the block + // actually includes blob transactions and thus has data. A peer could send a block is + // garbage commitments, and make us trigger sampling for a block that does not have data. + if block.num_expected_blobs() > 0 { + // Trigger sampling for block not yet execution valid. At this point column custodials are + // unlikely to have received their columns. Triggering sampling so early is only viable with + // either: + // - Sync delaying sampling until some latter window + // - Re-processing early sampling requests: https://github.com/sigp/lighthouse/pull/5569 + if self.chain.should_sample_slot(block.slot()) { + self.send_sync_message(SyncMessage::SampleBlock(block_root, block.slot())); + } + } let result = self .chain @@ -1467,7 +1519,7 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); } - Err(BlockError::ParentUnknown(_)) => { + Err(BlockError::ParentUnknown { .. }) => { // This should not occur. It should be checked by `should_forward_block`. // Do not send sync message UnknownParentBlock to prevent conflicts with the // BlockComponentProcessed message below. If this error ever happens, lookup sync @@ -3082,7 +3134,7 @@ impl NetworkBeaconProcessor { invalid_block_storage: &InvalidBlockStorage, block_root: Hash256, block: &SignedBeaconBlock, - error: &BlockError, + error: &BlockError, log: &Logger, ) { if let InvalidBlockStorage::Enabled(base_dir) = invalid_block_storage { diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index ffb01a99efb..7f551c544c7 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1,4 +1,5 @@ use crate::sync::manager::BlockProcessType; +use crate::sync::SamplingId; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend, BeaconChain}; @@ -8,7 +9,9 @@ use beacon_processor::{ DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, }; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, +}; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -474,6 +477,67 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some custody columns. `process_rpc_custody_columns` reports + /// the result back to sync. + pub fn send_rpc_custody_columns( + self: &Arc, + block_root: Hash256, + custody_columns: DataColumnSidecarList, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + let s = self.clone(); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcCustodyColumn(Box::pin(async move { + s.process_rpc_custody_columns( + block_root, + custody_columns, + seen_timestamp, + process_type, + ) + .await; + })), + }) + } + + /// Create a new `Work` event for some sampling columns, and reports the verification result + /// back to sync. + pub fn send_rpc_validate_data_columns( + self: &Arc, + block_root: Hash256, + data_columns: Vec>>, + seen_timestamp: Duration, + id: SamplingId, + ) -> Result<(), Error> { + let s = self.clone(); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcVerifyDataColumn(Box::pin(async move { + let result = s + .clone() + .validate_rpc_data_columns(block_root, data_columns, seen_timestamp) + .await; + // Sync handles these results + s.send_sync_message(SyncMessage::SampleVerified { id, result }); + })), + }) + } + + /// Create a new `Work` event with a block sampling completed result + pub fn send_sampling_completed( + self: &Arc, + block_root: Hash256, + ) -> Result<(), Error> { + let nbp = self.clone(); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::SamplingResult(Box::pin(async move { + nbp.process_sampling_completed(block_root).await; + })), + }) + } + /// Create a new work event to import `blocks` as a beacon chain segment. pub fn send_chain_segment( self: &Arc, @@ -602,6 +666,40 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `DataColumnsByRootRequest`s from the RPC network. + pub fn send_data_columns_by_roots_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: DataColumnsByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_data_columns_by_root_request(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::DataColumnsByRootsRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `DataColumnsByRange`s from the RPC network. + pub fn send_data_columns_by_range_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: DataColumnsByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_data_columns_by_range_request(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::DataColumnsByRangeRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. pub fn send_light_client_bootstrap_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 2a0c7ea089b..dde6f2e3130 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -4,7 +4,9 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use itertools::process_results; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, +}; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; @@ -13,7 +15,7 @@ use std::collections::{hash_map::Entry, HashMap}; use std::sync::Arc; use tokio_stream::StreamExt; use types::blob_sidecar::BlobIdentifier; -use types::{Epoch, EthSpec, ForkName, Hash256, Slot}; +use types::{Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, Slot}; impl NetworkBeaconProcessor { /* Auxiliary functions */ @@ -314,6 +316,70 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle a `DataColumnsByRoot` request from the peer. + pub fn handle_data_columns_by_root_request( + self: Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: DataColumnsByRootRequest, + ) { + self.terminate_response_stream( + peer_id, + request_id, + self.handle_data_columns_by_root_request_inner(peer_id, request_id, request), + Response::DataColumnsByRoot, + ); + } + + /// Handle a `DataColumnsByRoot` request from the peer. + pub fn handle_data_columns_by_root_request_inner( + &self, + peer_id: PeerId, + request_id: PeerRequestId, + request: DataColumnsByRootRequest, + ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + let mut send_data_column_count = 0; + + for data_column_id in request.data_column_ids.as_slice() { + match self.chain.get_data_column_checking_all_caches( + data_column_id.block_root, + data_column_id.index, + ) { + Ok(Some(data_column)) => { + send_data_column_count += 1; + self.send_response( + peer_id, + Response::DataColumnsByRoot(Some(data_column)), + request_id, + ); + } + Ok(None) => {} // no-op + Err(e) => { + // TODO(das): lower log level when feature is stabilized + error!(self.log, "Error getting data column"; + "block_root" => ?data_column_id.block_root, + "peer" => %peer_id, + "error" => ?e + ); + return Err(( + RPCResponseErrorCode::ServerError, + "Error getting data column", + )); + } + } + } + + debug!( + self.log, + "Received DataColumnsByRoot Request"; + "peer" => %peer_id, + "request" => ?request.group_by_ordered_block_root(), + "returned" => send_data_column_count + ); + + Ok(()) + } + /// Handle a `LightClientBootstrap` request from the peer. pub fn handle_light_client_bootstrap( self: &Arc, @@ -815,6 +881,200 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle a `DataColumnsByRange` request from the peer. + pub fn handle_data_columns_by_range_request( + &self, + peer_id: PeerId, + request_id: PeerRequestId, + req: DataColumnsByRangeRequest, + ) { + self.terminate_response_stream( + peer_id, + request_id, + self.handle_data_columns_by_range_request_inner(peer_id, request_id, req), + Response::DataColumnsByRange, + ); + } + + /// Handle a `DataColumnsByRange` request from the peer. + pub fn handle_data_columns_by_range_request_inner( + &self, + peer_id: PeerId, + request_id: PeerRequestId, + req: DataColumnsByRangeRequest, + ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + debug!(self.log, "Received DataColumnsByRange Request"; + "peer_id" => %peer_id, + "count" => req.count, + "start_slot" => req.start_slot, + ); + + // Should not send more than max request data columns + if req.max_requested::() > self.chain.spec.max_request_data_column_sidecars { + return Err(( + RPCResponseErrorCode::InvalidRequest, + "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", + )); + } + + let request_start_slot = Slot::from(req.start_slot); + + let data_availability_boundary_slot = match self.chain.data_availability_boundary() { + Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), + None => { + debug!(self.log, "Deneb fork is disabled"); + return Err(( + RPCResponseErrorCode::InvalidRequest, + "Deneb fork is disabled", + )); + } + }; + + let oldest_data_column_slot = self + .chain + .store + .get_data_column_info() + .oldest_data_column_slot + .unwrap_or(data_availability_boundary_slot); + + if request_start_slot < oldest_data_column_slot { + debug!( + self.log, + "Range request start slot is older than data availability boundary."; + "requested_slot" => request_start_slot, + "oldest_data_column_slot" => oldest_data_column_slot, + "data_availability_boundary" => data_availability_boundary_slot + ); + + return if data_availability_boundary_slot < oldest_data_column_slot { + Err(( + RPCResponseErrorCode::ResourceUnavailable, + "blobs pruned within boundary", + )) + } else { + Err(( + RPCResponseErrorCode::InvalidRequest, + "Req outside availability period", + )) + }; + } + + let forwards_block_root_iter = + match self.chain.forwards_iter_block_roots(request_start_slot) { + Ok(iter) => iter, + Err(BeaconChainError::HistoricalBlockError( + HistoricalBlockError::BlockOutOfRange { + slot, + oldest_block_slot, + }, + )) => { + debug!(self.log, "Range request failed during backfill"; + "requested_slot" => slot, + "oldest_known_slot" => oldest_block_slot + ); + return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + } + Err(e) => { + error!(self.log, "Unable to obtain root iter"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + return Err((RPCResponseErrorCode::ServerError, "Database error")); + } + }; + + // Use `WhenSlotSkipped::Prev` to get the most recent block root prior to + // `request_start_slot` in order to check whether the `request_start_slot` is a skip. + let mut last_block_root = req.start_slot.checked_sub(1).and_then(|prev_slot| { + self.chain + .block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev) + .ok() + .flatten() + }); + + // Pick out the required blocks, ignoring skip-slots. + let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { + iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) + // map skip slots to None + .map(|(root, _)| { + let result = if Some(root) == last_block_root { + None + } else { + Some(root) + }; + last_block_root = Some(root); + result + }) + .collect::>>() + }); + + let block_roots = match maybe_block_roots { + Ok(block_roots) => block_roots, + Err(e) => { + error!(self.log, "Error during iteration over blocks"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + return Err((RPCResponseErrorCode::ServerError, "Database error")); + } + }; + + // remove all skip slots + let block_roots = block_roots.into_iter().flatten(); + let mut data_columns_sent = 0; + + for root in block_roots { + for index in &req.columns { + match self.chain.get_data_column(&root, index) { + Ok(Some(data_column_sidecar)) => { + data_columns_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::DataColumnsByRange(Some( + data_column_sidecar.clone(), + )), + id: request_id, + }); + } + Ok(None) => {} // no-op + Err(e) => { + error!( + self.log, + "Error fetching data columns block root"; + "request" => ?req, + "peer" => %peer_id, + "block_root" => ?root, + "error" => ?e + ); + return Err(( + RPCResponseErrorCode::ServerError, + "No data columns and failed fetching corresponding block", + )); + } + } + } + } + + let current_slot = self + .chain + .slot() + .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); + + debug!( + self.log, + "DataColumnsByRange Response processed"; + "peer" => %peer_id, + "start_slot" => req.start_slot, + "current_slot" => current_slot, + "requested" => req.count, + "returned" => data_columns_sent + ); + + Ok(()) + } + /// Helper function to ensure single item protocol always end with either a single chunk or an /// error fn terminate_response_single_item Response>( diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 68bd6745144..c21054dab50 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -8,6 +8,7 @@ use crate::sync::{ use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::data_availability_checker::MaybeAvailableBlock; +use beacon_chain::data_column_verification::verify_kzg_for_data_column_list; use beacon_chain::{ validator_monitor::get_slot_delay_ms, AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, @@ -24,8 +25,7 @@ use store::KzgCommitment; use tokio::sync::mpsc; use types::beacon_block_body::format_kzg_commitments; use types::blob_sidecar::FixedBlobSidecarList; -use types::BlockImportSource; -use types::{Epoch, Hash256}; +use types::{BlockImportSource, DataColumnSidecar, DataColumnSidecarList, Epoch, Hash256}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. #[derive(Clone, Debug, PartialEq)] @@ -139,6 +139,7 @@ impl NetworkBeaconProcessor { }; let slot = block.slot(); + let block_has_data = block.as_block().num_expected_blobs() > 0; let parent_root = block.message().parent_root(); let commitments_formatted = block.as_block().commitments_formatted(); @@ -186,6 +187,18 @@ impl NetworkBeaconProcessor { self.chain.recompute_head_at_current_slot().await; } + + // RPC block imported or execution validated. If the block was already imported by gossip we + // receive Err(BlockError::AlreadyKnown). + if result.is_ok() && + // Block has at least one blob, so it produced columns + block_has_data && + // Block slot is within the DA boundary (should always be the case) and PeerDAS is activated + self.chain.should_sample_slot(slot) + { + self.send_sync_message(SyncMessage::SampleBlock(block_root, slot)); + } + // Sync handles these results self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, @@ -307,6 +320,83 @@ impl NetworkBeaconProcessor { }); } + pub async fn process_rpc_custody_columns( + self: Arc>, + block_root: Hash256, + custody_columns: DataColumnSidecarList, + _seen_timestamp: Duration, + process_type: BlockProcessType, + ) { + let result = self + .chain + .process_rpc_custody_columns(custody_columns) + .await; + + match &result { + Ok((availability, data_columns_to_publish)) => { + self.handle_data_columns_to_publish(data_columns_to_publish.clone()); + + match availability { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components retrieved"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Missing components over rpc"; + "block_hash" => %block_root, + ); + } + } + } + Err(BlockError::BlockIsAlreadyKnown(_)) => { + debug!( + self.log, + "Custody columns have already been imported"; + "block_hash" => %block_root, + ); + } + Err(e) => { + warn!( + self.log, + "Error when importing rpc custody columns"; + "error" => ?e, + "block_hash" => %block_root, + ); + } + } + + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: result.map(|(r, _)| r).into(), + }); + } + + /// Validate a list of data columns received from RPC requests + pub async fn validate_rpc_data_columns( + self: Arc>, + _block_root: Hash256, + data_columns: Vec>>, + _seen_timestamp: Duration, + ) -> Result<(), String> { + let kzg = self.chain.kzg.as_ref().ok_or("Kzg not initialized")?; + verify_kzg_for_data_column_list(data_columns.iter(), kzg).map_err(|err| format!("{err:?}")) + } + + /// Process a sampling completed event, inserting it into fork-choice + pub async fn process_sampling_completed( + self: Arc>, + block_root: Hash256, + ) { + self.chain.process_sampling_completed(block_root).await; + } + /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. pub async fn process_chain_segment( @@ -367,6 +457,10 @@ impl NetworkBeaconProcessor { .iter() .map(|wrapped| wrapped.n_blobs()) .sum::(); + let n_data_columns = downloaded_blocks + .iter() + .map(|wrapped| wrapped.n_data_columns()) + .sum::(); match self.process_backfill_blocks(downloaded_blocks) { (imported_blocks, Ok(_)) => { @@ -376,6 +470,7 @@ impl NetworkBeaconProcessor { "last_block_slot" => end_slot, "processed_blocks" => sent_blocks, "processed_blobs" => n_blobs, + "processed_data_columns" => n_data_columns, "service"=> "sync"); BatchProcessResult::Success { sent_blocks, @@ -419,10 +514,19 @@ impl NetworkBeaconProcessor { { ChainSegmentResult::Successful { imported_blocks } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); - if imported_blocks > 0 { + if !imported_blocks.is_empty() { self.chain.recompute_head_at_current_slot().await; + + for (block_root, block_slot) in &imported_blocks { + if self.chain.should_sample_slot(*block_slot) { + self.send_sync_message(SyncMessage::SampleBlock( + *block_root, + *block_slot, + )); + } + } } - (imported_blocks, Ok(())) + (imported_blocks.len(), Ok(())) } ChainSegmentResult::Failed { imported_blocks, @@ -430,10 +534,10 @@ impl NetworkBeaconProcessor { } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL); let r = self.handle_failed_chain_segment(error); - if imported_blocks > 0 { + if !imported_blocks.is_empty() { self.chain.recompute_head_at_current_slot().await; } - (imported_blocks, r) + (imported_blocks.len(), r) } } } @@ -602,15 +706,12 @@ impl NetworkBeaconProcessor { } /// Helper function to handle a `BlockError` from `process_chain_segment` - fn handle_failed_chain_segment( - &self, - error: BlockError, - ) -> Result<(), ChainSegmentFailed> { + fn handle_failed_chain_segment(&self, error: BlockError) -> Result<(), ChainSegmentFailed> { match error { - BlockError::ParentUnknown(block) => { + BlockError::ParentUnknown { parent_root, .. } => { // blocks should be sequential and all parents should exist Err(ChainSegmentFailed { - message: format!("Block has an unknown parent: {}", block.parent_root()), + message: format!("Block has an unknown parent: {}", parent_root), // Peers are faulty if they send non-sequential blocks. peer_action: Some(PeerAction::LowToleranceError), }) diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index a9b9f64a79d..40c69a0baa5 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -93,7 +93,7 @@ impl TestRig { spec.shard_committee_period = 2; let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() @@ -204,7 +204,14 @@ impl TestRig { }); let enr_key = CombinedKey::generate_secp256k1(); let enr = enr::Enr::builder().build(&enr_key).unwrap(); - let network_globals = Arc::new(NetworkGlobals::new(enr, meta_data, vec![], false, &log)); + let network_globals = Arc::new(NetworkGlobals::new( + enr, + meta_data, + vec![], + false, + &log, + spec, + )); let executor = harness.runtime.task_executor.clone(); diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index 289bf14335e..e1085c4f0c1 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -4,7 +4,7 @@ use store::{DBColumn, Error as StoreError, HotColdDB, ItemStore, StoreItem}; use types::{EthSpec, Hash256}; /// 32-byte key for accessing the `DhtEnrs`. All zero because `DhtEnrs` has its own column. -pub const DHT_DB_KEY: Hash256 = Hash256::zero(); +pub const DHT_DB_KEY: Hash256 = Hash256::ZERO; pub fn load_dht, Cold: ItemStore>( store: Arc>, diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index c162d52d026..a5e27f582af 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -27,7 +27,7 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; -use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -216,6 +216,14 @@ impl Router { self.network_beacon_processor .send_blobs_by_roots_request(peer_id, request_id, request), ), + Request::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_data_columns_by_roots_request(peer_id, request_id, request), + ), + Request::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_data_columns_by_range_request(peer_id, request_id, request), + ), Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor .send_light_client_bootstrap_request(peer_id, request_id, request), @@ -258,6 +266,12 @@ impl Router { Response::BlobsByRoot(blob) => { self.on_blobs_by_root_response(peer_id, request_id, blob); } + Response::DataColumnsByRoot(data_column) => { + self.on_data_columns_by_root_response(peer_id, request_id, data_column); + } + Response::DataColumnsByRange(data_column) => { + self.on_data_columns_by_range_response(peer_id, request_id, data_column); + } // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) @@ -507,11 +521,11 @@ impl Router { ) { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { - SyncRequestId::SingleBlock { .. } | SyncRequestId::SingleBlob { .. } => { - crit!(self.log, "Block lookups do not request BBRange requests"; "peer_id" => %peer_id); + id @ SyncRequestId::RangeBlockAndBlobs { .. } => id, + other => { + crit!(self.log, "BlocksByRange response on incorrect request"; "request" => ?other); return; } - id @ SyncRequestId::RangeBlockAndBlobs { .. } => id, }, AppRequestId::Router => { crit!(self.log, "All BBRange requests belong to sync"; "peer_id" => %peer_id); @@ -570,12 +584,8 @@ impl Router { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlock { .. } => id, - SyncRequestId::RangeBlockAndBlobs { .. } => { - crit!(self.log, "Batch syncing do not request BBRoot requests"; "peer_id" => %peer_id); - return; - } - SyncRequestId::SingleBlob { .. } => { - crit!(self.log, "Blob response to block by roots request"; "peer_id" => %peer_id); + other => { + crit!(self.log, "BlocksByRoot response on incorrect request"; "request" => ?other); return; } }, @@ -608,12 +618,8 @@ impl Router { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlob { .. } => id, - SyncRequestId::SingleBlock { .. } => { - crit!(self.log, "Block response to blobs by roots request"; "peer_id" => %peer_id); - return; - } - SyncRequestId::RangeBlockAndBlobs { .. } => { - crit!(self.log, "Batch syncing does not request BBRoot requests"; "peer_id" => %peer_id); + other => { + crit!(self.log, "BlobsByRoot response on incorrect request"; "request" => ?other); return; } }, @@ -636,6 +642,67 @@ impl Router { }); } + /// Handle a `DataColumnsByRoot` response from the peer. + pub fn on_data_columns_by_root_response( + &mut self, + peer_id: PeerId, + request_id: AppRequestId, + data_column: Option>>, + ) { + let request_id = match request_id { + AppRequestId::Sync(sync_id) => match sync_id { + id @ SyncRequestId::DataColumnsByRoot { .. } => id, + other => { + crit!(self.log, "DataColumnsByRoot response on incorrect request"; "request" => ?other); + return; + } + }, + AppRequestId::Router => { + crit!(self.log, "All DataColumnsByRoot requests belong to sync"; "peer_id" => %peer_id); + return; + } + }; + + trace!( + self.log, + "Received DataColumnsByRoot Response"; + "peer" => %peer_id, + ); + self.send_to_sync(SyncMessage::RpcDataColumn { + request_id, + peer_id, + data_column, + seen_timestamp: timestamp_now(), + }); + } + + pub fn on_data_columns_by_range_response( + &mut self, + peer_id: PeerId, + request_id: AppRequestId, + data_column: Option>>, + ) { + trace!( + self.log, + "Received DataColumnsByRange Response"; + "peer" => %peer_id, + ); + + if let AppRequestId::Sync(id) = request_id { + self.send_to_sync(SyncMessage::RpcDataColumn { + peer_id, + request_id: id, + data_column, + seen_timestamp: timestamp_now(), + }); + } else { + crit!( + self.log, + "All data columns by range responses should belong to sync" + ); + } + } + fn handle_beacon_processor_send_result( &mut self, result: Result<(), crate::network_beacon_processor::Error>, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e522285a9e3..5b9a3125ea5 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -16,6 +16,7 @@ use futures::prelude::*; use futures::StreamExt; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; +use lighthouse_network::Eth2Enr; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode}, @@ -35,8 +36,8 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use types::{ - ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, - Unsigned, ValidatorSubscription, + ChainSpec, DataColumnSubnetId, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, + SyncSubnetId, Unsigned, ValidatorSubscription, }; mod tests; @@ -183,6 +184,8 @@ pub struct NetworkService { next_fork_subscriptions: Pin>>, /// A delay that expires when we need to unsubscribe from old fork topics. next_unsubscribe: Pin>>, + /// Subscribe to all the data column subnets. + subscribe_all_data_column_subnets: bool, /// Subscribe to all the subnets once synced. subscribe_all_subnets: bool, /// Shutdown beacon node after sync is complete. @@ -349,6 +352,7 @@ impl NetworkService { next_fork_update, next_fork_subscriptions, next_unsubscribe, + subscribe_all_data_column_subnets: config.subscribe_all_data_column_subnets, subscribe_all_subnets: config.subscribe_all_subnets, shutdown_after_sync: config.shutdown_after_sync, metrics_enabled: config.metrics_enabled, @@ -733,6 +737,15 @@ impl NetworkService { } } + // TODO(das): This is added here for the purpose of testing, *without* having to + // activate Electra. This should happen as part of the Electra upgrade and we should + // move the subscription logic once it's ready to rebase PeerDAS on Electra, or if + // we decide to activate via the soft fork route: + // https://github.com/sigp/lighthouse/pull/5899 + if self.fork_context.spec.is_peer_das_scheduled() { + self.subscribe_to_peer_das_topics(&mut subscribed_topics); + } + // If we are to subscribe to all subnets we do it here if self.subscribe_all_subnets { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { @@ -779,6 +792,45 @@ impl NetworkService { } } + fn subscribe_to_peer_das_topics(&mut self, subscribed_topics: &mut Vec) { + if self.subscribe_all_data_column_subnets { + for column_subnet in 0..self.fork_context.spec.data_column_sidecar_subnet_count { + for fork_digest in self.required_gossip_fork_digests() { + let gossip_kind = + Subnet::DataColumn(DataColumnSubnetId::new(column_subnet)).into(); + let topic = + GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); + if self.libp2p.subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } + } + } else { + for column_subnet in DataColumnSubnetId::compute_custody_subnets::( + self.network_globals.local_enr().node_id().raw(), + self.network_globals + .local_enr() + .custody_subnet_count::<::EthSpec>( + &self.fork_context.spec, + ), + &self.fork_context.spec, + ) { + for fork_digest in self.required_gossip_fork_digests() { + let gossip_kind = Subnet::DataColumn(column_subnet).into(); + let topic = + GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); + if self.libp2p.subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } + } + } + } + /// Handle a message sent to the network service. async fn on_validator_subscription_msg(&mut self, msg: ValidatorSubscriptionMessage) { match msg { diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index b5731876968..fec5f3f83f7 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -176,7 +176,7 @@ mod tests { // Make sure the service is subscribed to the topics. let (old_topic1, old_topic2) = { let mut subnets = SubnetId::compute_subnets_for_epoch::( - network_globals.local_enr().node_id().raw().into(), + network_globals.local_enr().node_id().raw(), beacon_chain.epoch().unwrap(), &spec, ) diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index 865f8ee933f..1210926d34f 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,5 +1,5 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; -use types::{EthSpec, Hash256}; +use types::{EthSpec, FixedBytesExtended, Hash256}; use lighthouse_network::rpc::StatusMessage; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 830c43cbb18..432a2b7fb7c 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -315,7 +315,7 @@ impl AttestationService { })?; let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( - self.node_id.raw().into(), + self.node_id.raw(), current_epoch, &self.beacon_chain.spec, ) diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index dfb05da19bd..946d25237bf 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -372,7 +372,9 @@ impl BackFillSync { // A batch could be retried without the peer failing the request (disconnecting/ // sending an error /timeout) if the peer is removed from the chain for other // reasons. Check that this block belongs to the expected peer - if !batch.is_expecting_block(peer_id, &request_id) { + // TODO(das): removed peer_id matching as the node may request a different peer for data + // columns. + if !batch.is_expecting_block(&request_id) { return Ok(()); } debug!(self.log, "Batch failed"; "batch_epoch" => batch_id, "error" => "rpc_error"); @@ -420,7 +422,9 @@ impl BackFillSync { // sending an error /timeout) if the peer is removed from the chain for other // reasons. Check that this block belongs to the expected peer, and that the // request_id matches - if !batch.is_expecting_block(peer_id, &request_id) { + // TODO(das): removed peer_id matching as the node may request a different peer for data + // columns. + if !batch.is_expecting_block(&request_id) { return Ok(ProcessResult::Successful); } batch @@ -958,7 +962,7 @@ impl BackFillSync { ) -> Result<(), BackFillError> { if let Some(batch) = self.batches.get_mut(&batch_id) { let (request, is_blob_batch) = batch.to_blocks_by_range_request(); - match network.blocks_and_blobs_by_range_request( + match network.block_components_by_range_request( peer, is_blob_batch, request, diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index e94e9589c0a..c7c043f53f8 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -1,14 +1,17 @@ use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; -use crate::sync::block_lookups::{BlobRequestState, BlockRequestState, PeerId}; +use crate::sync::block_lookups::{ + BlobRequestState, BlockRequestState, CustodyRequestState, PeerId, +}; +use crate::sync::manager::BlockProcessType; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use lighthouse_network::service::api_types::Id; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; -use types::SignedBeaconBlock; +use types::{DataColumnSidecarList, SignedBeaconBlock}; use super::single_block_lookup::DownloadResult; use super::SingleLookupId; @@ -17,6 +20,7 @@ use super::SingleLookupId; pub enum ResponseType { Block, Blob, + CustodyColumn, } /// This trait unifies common single block lookup functionality across blocks and blobs. This @@ -38,7 +42,7 @@ pub trait RequestState { &self, id: Id, peer_id: PeerId, - downloaded_block_expected_blobs: Option, + downloaded_block: Option>>, cx: &mut SyncNetworkContext, ) -> Result; @@ -73,7 +77,7 @@ impl RequestState for BlockRequestState { &self, id: SingleLookupId, peer_id: PeerId, - _: Option, + _: Option>>, cx: &mut SyncNetworkContext, ) -> Result { cx.block_lookup_request(id, peer_id, self.requested_block_root) @@ -89,7 +93,7 @@ impl RequestState for BlockRequestState { value, block_root, seen_timestamp, - peer_id: _, + .. } = download_result; cx.send_block_for_processing( id, @@ -121,16 +125,11 @@ impl RequestState for BlobRequestState { &self, id: Id, peer_id: PeerId, - downloaded_block_expected_blobs: Option, + downloaded_block: Option>>, cx: &mut SyncNetworkContext, ) -> Result { - cx.blob_lookup_request( - id, - peer_id, - self.block_root, - downloaded_block_expected_blobs, - ) - .map_err(LookupRequestError::SendFailedNetwork) + cx.blob_lookup_request(id, peer_id, self.block_root, downloaded_block) + .map_err(LookupRequestError::SendFailedNetwork) } fn send_for_processing( @@ -142,7 +141,7 @@ impl RequestState for BlobRequestState { value, block_root, seen_timestamp, - peer_id: _, + .. } = download_result; cx.send_blobs_for_processing(id, block_root, value, seen_timestamp) .map_err(LookupRequestError::SendFailedProcessor) @@ -161,3 +160,53 @@ impl RequestState for BlobRequestState { &mut self.state } } + +impl RequestState for CustodyRequestState { + type VerifiedResponseType = DataColumnSidecarList; + + fn make_request( + &self, + id: Id, + // TODO(das): consider selecting peers that have custody but are in this set + _peer_id: PeerId, + downloaded_block: Option>>, + cx: &mut SyncNetworkContext, + ) -> Result { + cx.custody_lookup_request(id, self.block_root, downloaded_block) + .map_err(LookupRequestError::SendFailedNetwork) + } + + fn send_for_processing( + id: Id, + download_result: DownloadResult, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + let DownloadResult { + value, + block_root, + seen_timestamp, + .. + } = download_result; + cx.send_custody_columns_for_processing( + id, + block_root, + value, + seen_timestamp, + BlockProcessType::SingleCustodyColumn(id), + ) + .map_err(LookupRequestError::SendFailedProcessor) + } + + fn response_type() -> ResponseType { + ResponseType::CustodyColumn + } + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { + &mut request.custody_request_state + } + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 3b93b8072c3..e31adb783c9 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -24,7 +24,7 @@ use self::parent_chain::{compute_parent_chains, NodeChain}; pub use self::single_block_lookup::DownloadResult; use self::single_block_lookup::{LookupRequestError, LookupResult, SingleBlockLookup}; use super::manager::{BlockProcessType, BlockProcessingResult, SLOT_IMPORT_TOLERANCE}; -use super::network_context::{RpcResponseResult, SyncNetworkContext}; +use super::network_context::{PeerGroup, RpcResponseError, SyncNetworkContext}; use crate::metrics; use crate::sync::block_lookups::common::ResponseType; use crate::sync::block_lookups::parent_chain::find_oldest_fork_ancestor; @@ -36,13 +36,13 @@ use fnv::FnvHashMap; use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; -pub use single_block_lookup::{BlobRequestState, BlockRequestState}; +pub use single_block_lookup::{BlobRequestState, BlockRequestState, CustodyRequestState}; use slog::{debug, error, warn, Logger}; use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; use store::Hash256; -use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; pub mod common; pub mod parent_chain; @@ -76,6 +76,7 @@ const MAX_LOOKUPS: usize = 200; pub enum BlockComponent { Block(DownloadResult>>), Blob(DownloadResult>>), + DataColumn(DownloadResult>>), } impl BlockComponent { @@ -83,12 +84,14 @@ impl BlockComponent { match self { BlockComponent::Block(block) => block.value.parent_root(), BlockComponent::Blob(blob) => blob.value.block_parent_root(), + BlockComponent::DataColumn(column) => column.value.block_parent_root(), } } fn get_type(&self) -> &'static str { match self { BlockComponent::Block(_) => "block", BlockComponent::Blob(_) => "blob", + BlockComponent::DataColumn(_) => "data_column", } } } @@ -379,11 +382,10 @@ impl BlockLookups { pub fn on_download_response>( &mut self, id: SingleLookupReqId, - peer_id: PeerId, - response: RpcResponseResult, + response: Result<(R::VerifiedResponseType, PeerGroup, Duration), RpcResponseError>, cx: &mut SyncNetworkContext, ) { - let result = self.on_download_response_inner::(id, peer_id, response, cx); + let result = self.on_download_response_inner::(id, response, cx); self.on_lookup_result(id.lookup_id, result, "download_response", cx); } @@ -391,8 +393,7 @@ impl BlockLookups { pub fn on_download_response_inner>( &mut self, id: SingleLookupReqId, - peer_id: PeerId, - response: RpcResponseResult, + response: Result<(R::VerifiedResponseType, PeerGroup, Duration), RpcResponseError>, cx: &mut SyncNetworkContext, ) -> Result { // Note: no need to downscore peers here, already downscored on network context @@ -409,12 +410,12 @@ impl BlockLookups { let request_state = R::request_state_mut(lookup).get_state_mut(); match response { - Ok((response, seen_timestamp)) => { + Ok((response, peer_group, seen_timestamp)) => { debug!(self.log, "Received lookup download success"; "block_root" => ?block_root, "id" => ?id, - "peer_id" => %peer_id, + "peer_group" => ?peer_group, "response_type" => ?response_type, ); @@ -435,19 +436,20 @@ impl BlockLookups { value: response, block_root, seen_timestamp, - peer_id, + peer_group, }, )?; // continue_request will send for processing as the request state is AwaitingProcessing } Err(e) => { + // TODO(das): is it okay to not log the peer source of request failures? Then we + // should log individual requests failures in the SyncNetworkContext debug!(self.log, "Received lookup download failure"; "block_root" => ?block_root, "id" => ?id, - "peer_id" => %peer_id, "response_type" => ?response_type, - "error" => %e, + "error" => ?e, ); request_state.on_download_failure(id.req_id)?; @@ -471,7 +473,7 @@ impl BlockLookups { pub fn on_processing_result( &mut self, process_type: BlockProcessType, - result: BlockProcessingResult, + result: BlockProcessingResult, cx: &mut SyncNetworkContext, ) { let lookup_result = match process_type { @@ -481,17 +483,17 @@ impl BlockLookups { BlockProcessType::SingleBlob { id } => { self.on_processing_result_inner::>(id, result, cx) } + BlockProcessType::SingleCustodyColumn(id) => { + self.on_processing_result_inner::>(id, result, cx) + } }; - let id = match process_type { - BlockProcessType::SingleBlock { id } | BlockProcessType::SingleBlob { id } => id, - }; - self.on_lookup_result(id, lookup_result, "processing_result", cx); + self.on_lookup_result(process_type.id(), lookup_result, "processing_result", cx); } pub fn on_processing_result_inner>( &mut self, lookup_id: SingleLookupId, - result: BlockProcessingResult, + result: BlockProcessingResult, cx: &mut SyncNetworkContext, ) -> Result { let Some(lookup) = self.single_block_lookups.get_mut(&lookup_id) else { @@ -519,15 +521,14 @@ impl BlockLookups { Action::Continue } - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - _, - _block_root, - )) => { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents { + .. + }) => { // `on_processing_success` is called here to ensure the request state is updated prior to checking // if both components have been processed. request_state.on_processing_success()?; - if lookup.both_components_processed() { + if lookup.all_components_processed() { // We don't request for other block components until being sure that the block has // data. If we request blobs / columns to a peer we are sure those must exist. // Therefore if all components are processed and we still receive `MissingComponents` @@ -555,16 +556,14 @@ impl BlockLookups { error!(self.log, "Beacon chain error processing lookup component"; "block_root" => %block_root, "error" => ?e); Action::Drop } - BlockError::ParentUnknown(block) => { + BlockError::ParentUnknown { parent_root, .. } => { // Reverts the status of this request to `AwaitingProcessing` holding the // downloaded data. A future call to `continue_requests` will re-submit it // once there are no pending parent requests. // Note: `BlockError::ParentUnknown` is only returned when processing // blocks, not blobs. request_state.revert_to_awaiting_processing()?; - Action::ParentUnknown { - parent_root: block.parent_root(), - } + Action::ParentUnknown { parent_root } } ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline @@ -591,16 +590,21 @@ impl BlockLookups { } other => { debug!(self.log, "Invalid lookup component"; "block_root" => ?block_root, "component" => ?R::response_type(), "error" => ?other); - - let peer_id = request_state.on_processing_failure()?; - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - match R::response_type() { - ResponseType::Block => "lookup_block_processing_failure", - ResponseType::Blob => "lookup_blobs_processing_failure", - }, - ); + let peer_group = request_state.on_processing_failure()?; + // TOOD(das): only downscore peer subgroup that provided the invalid proof + for peer in peer_group.all() { + cx.report_peer( + *peer, + PeerAction::MidToleranceError, + match R::response_type() { + ResponseType::Block => "lookup_block_processing_failure", + ResponseType::Blob => "lookup_blobs_processing_failure", + ResponseType::CustodyColumn => { + "lookup_custody_column_processing_failure" + } + }, + ); + } Action::Retry } diff --git a/beacon_node/network/src/sync/block_lookups/parent_chain.rs b/beacon_node/network/src/sync/block_lookups/parent_chain.rs index 7f4fe5119f6..009b5e2ff74 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_chain.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_chain.rs @@ -118,7 +118,7 @@ pub(crate) fn find_oldest_fork_ancestor( #[cfg(test)] mod tests { use super::{compute_parent_chains, find_oldest_fork_ancestor, Node}; - use types::Hash256; + use types::{FixedBytesExtended, Hash256}; fn h(n: u64) -> Hash256 { Hash256::from_low_u64_be(n) diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 0466636fb7d..4ae55d5aafe 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -2,7 +2,8 @@ use super::common::ResponseType; use super::{BlockComponent, PeerId, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS}; use crate::sync::block_lookups::common::RequestState; use crate::sync::network_context::{ - LookupRequestResult, ReqId, RpcRequestSendError, SendErrorProcessor, SyncNetworkContext, + LookupRequestResult, PeerGroup, ReqId, RpcRequestSendError, SendErrorProcessor, + SyncNetworkContext, }; use beacon_chain::BeaconChainTypes; use derivative::Derivative; @@ -15,7 +16,7 @@ use std::time::{Duration, Instant}; use store::Hash256; use strum::IntoStaticStr; use types::blob_sidecar::FixedBlobSidecarList; -use types::{EthSpec, SignedBeaconBlock}; +use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -63,6 +64,7 @@ pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, pub blob_request_state: BlobRequestState, + pub custody_request_state: CustodyRequestState, /// Peers that claim to have imported this set of block components #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] peers: HashSet, @@ -82,6 +84,7 @@ impl SingleBlockLookup { id, block_request_state: BlockRequestState::new(requested_block_root), blob_request_state: BlobRequestState::new(requested_block_root), + custody_request_state: CustodyRequestState::new(requested_block_root), peers: HashSet::from_iter(peers.iter().copied()), block_root: requested_block_root, awaiting_parent, @@ -122,8 +125,8 @@ impl SingleBlockLookup { .block_request_state .state .insert_verified_response(block), - BlockComponent::Blob(_) => { - // For now ignore single blobs, as the blob request state assumes all blobs are + BlockComponent::Blob(_) | BlockComponent::DataColumn(_) => { + // For now ignore single blobs and columns, as the blob request state assumes all blobs are // attributed to the same peer = the peer serving the remaining blobs. Ignoring this // block component has a minor effect, causing the node to re-request this blob // once the parent chain is successfully resolved @@ -138,9 +141,10 @@ impl SingleBlockLookup { } /// Returns true if the block has already been downloaded. - pub fn both_components_processed(&self) -> bool { + pub fn all_components_processed(&self) -> bool { self.block_request_state.state.is_processed() && self.blob_request_state.state.is_processed() + && self.custody_request_state.state.is_processed() } /// Returns true if this request is expecting some event to make progress @@ -148,6 +152,7 @@ impl SingleBlockLookup { self.awaiting_parent.is_some() || self.block_request_state.state.is_awaiting_event() || self.blob_request_state.state.is_awaiting_event() + || self.custody_request_state.state.is_awaiting_event() } /// Makes progress on all requests of this lookup. Any error is not recoverable and must result @@ -159,13 +164,12 @@ impl SingleBlockLookup { // TODO: Check what's necessary to download, specially for blobs self.continue_request::>(cx)?; self.continue_request::>(cx)?; + self.continue_request::>(cx)?; // If all components of this lookup are already processed, there will be no future events // that can make progress so it must be dropped. Consider the lookup completed. // This case can happen if we receive the components from gossip during a retry. - if self.block_request_state.state.is_processed() - && self.blob_request_state.state.is_processed() - { + if self.all_components_processed() { Ok(LookupResult::Completed) } else { Ok(LookupResult::Pending) @@ -179,11 +183,11 @@ impl SingleBlockLookup { ) -> Result<(), LookupRequestError> { let id = self.id; let awaiting_parent = self.awaiting_parent.is_some(); - let downloaded_block_expected_blobs = self + let downloaded_block = self .block_request_state .state .peek_downloaded_data() - .map(|block| block.num_expected_blobs()); + .cloned(); let block_is_processed = self.block_request_state.state.is_processed(); let request = R::request_state_mut(self); @@ -210,18 +214,18 @@ impl SingleBlockLookup { }; let request = R::request_state_mut(self); - match request.make_request(id, peer_id, downloaded_block_expected_blobs, cx)? { + match request.make_request(id, peer_id, downloaded_block, cx)? { LookupRequestResult::RequestSent(req_id) => { // Lookup sync event safety: If make_request returns `RequestSent`, we are // guaranteed that `BlockLookups::on_download_response` will be called exactly // with this `req_id`. request.get_state_mut().on_download_start(req_id)? } - LookupRequestResult::NoRequestNeeded => { + LookupRequestResult::NoRequestNeeded(reason) => { // Lookup sync event safety: Advances this request to the terminal `Processed` // state. If all requests reach this state, the request is marked as completed // in `Self::continue_requests`. - request.get_state_mut().on_completed_request()? + request.get_state_mut().on_completed_request(reason)? } // Sync will receive a future event to make progress on the request, do nothing now LookupRequestResult::Pending(reason) => { @@ -307,6 +311,24 @@ impl BlobRequestState { } } +/// The state of the custody request component of a `SingleBlockLookup`. +#[derive(Derivative)] +#[derivative(Debug)] +pub struct CustodyRequestState { + #[derivative(Debug = "ignore")] + pub block_root: Hash256, + pub state: SingleLookupRequestState>, +} + +impl CustodyRequestState { + pub fn new(block_root: Hash256) -> Self { + Self { + block_root, + state: SingleLookupRequestState::new(), + } + } +} + /// The state of the block request component of a `SingleBlockLookup`. #[derive(Derivative)] #[derivative(Debug)] @@ -325,28 +347,27 @@ impl BlockRequestState { } } -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, Clone)] pub struct DownloadResult { pub value: T, pub block_root: Hash256, pub seen_timestamp: Duration, - pub peer_id: PeerId, + pub peer_group: PeerGroup, } -#[derive(PartialEq, Eq, IntoStaticStr)] +#[derive(IntoStaticStr)] pub enum State { - AwaitingDownload(&'static str), + AwaitingDownload(/* reason */ &'static str), Downloading(ReqId), AwaitingProcess(DownloadResult), /// Request is processing, sent by lookup sync Processing(DownloadResult), /// Request is processed - Processed, + Processed(/* reason */ &'static str), } /// Object representing the state of a single block or blob lookup request. -#[derive(PartialEq, Eq, Derivative)] -#[derivative(Debug)] +#[derive(Debug)] pub struct SingleLookupRequestState { /// State of this request. state: State, @@ -516,13 +537,13 @@ impl SingleLookupRequestState { } /// Registers a failure in processing a block. - pub fn on_processing_failure(&mut self) -> Result { + pub fn on_processing_failure(&mut self) -> Result { match &self.state { State::Processing(result) => { - let peer_id = result.peer_id; + let peers_source = result.peer_group.clone(); self.failed_processing = self.failed_processing.saturating_add(1); self.state = State::AwaitingDownload("not started"); - Ok(peer_id) + Ok(peers_source) } other => Err(LookupRequestError::BadState(format!( "Bad state on_processing_failure expected Processing got {other}" @@ -533,7 +554,7 @@ impl SingleLookupRequestState { pub fn on_processing_success(&mut self) -> Result<(), LookupRequestError> { match &self.state { State::Processing(_) => { - self.state = State::Processed; + self.state = State::Processed("processing success"); Ok(()) } other => Err(LookupRequestError::BadState(format!( @@ -543,10 +564,10 @@ impl SingleLookupRequestState { } /// Mark a request as complete without any download or processing - pub fn on_completed_request(&mut self) -> Result<(), LookupRequestError> { + pub fn on_completed_request(&mut self, reason: &'static str) -> Result<(), LookupRequestError> { match &self.state { State::AwaitingDownload { .. } => { - self.state = State::Processed; + self.state = State::Processed(reason); Ok(()) } other => Err(LookupRequestError::BadState(format!( @@ -577,11 +598,11 @@ impl std::fmt::Display for State { impl std::fmt::Debug for State { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::AwaitingDownload(status) => write!(f, "AwaitingDownload({:?})", status), + Self::AwaitingDownload(reason) => write!(f, "AwaitingDownload({})", reason), Self::Downloading(req_id) => write!(f, "Downloading({:?})", req_id), - Self::AwaitingProcess(d) => write!(f, "AwaitingProcess({:?})", d.peer_id), - Self::Processing(d) => write!(f, "Processing({:?})", d.peer_id), - Self::Processed { .. } => write!(f, "Processed"), + Self::AwaitingProcess(d) => write!(f, "AwaitingProcess({:?})", d.peer_group), + Self::Processing(d) => write!(f, "Processing({:?})", d.peer_group), + Self::Processed(reason) => write!(f, "Processed({})", reason), } } } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index ef2822fe563..6d852b2572d 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,7 +1,7 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; - use crate::sync::manager::{BlockProcessType, SyncManager}; -use crate::sync::SyncMessage; +use crate::sync::sampling::SamplingConfig; +use crate::sync::{SamplingId, SyncMessage}; use crate::NetworkMessage; use std::sync::Arc; @@ -9,31 +9,38 @@ use super::*; use crate::sync::block_lookups::common::ResponseType; use beacon_chain::blob_verification::GossipVerifiedBlob; -use beacon_chain::block_verification_types::{BlockImportData, RpcBlock}; +use beacon_chain::block_verification_types::BlockImportData; use beacon_chain::builder::Witness; use beacon_chain::data_availability_checker::Availability; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::test_utils::{ - build_log, generate_rand_block_and_blobs, BeaconChainHarness, EphemeralHarnessType, NumBlobs, + build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, + BeaconChainHarness, EphemeralHarnessType, NumBlobs, }; +use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ AvailabilityPendingExecutedBlock, PayloadVerificationOutcome, PayloadVerificationStatus, }; use beacon_processor::WorkEvent; use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; -use lighthouse_network::service::api_types::{AppRequestId, Id, SingleLookupReqId, SyncRequestId}; +use lighthouse_network::service::api_types::{ + AppRequestId, DataColumnsByRootRequester, Id, SamplingRequester, SingleLookupReqId, + SyncRequestId, +}; use lighthouse_network::types::SyncState; use lighthouse_network::{NetworkGlobals, Request}; use slog::info; use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; use store::MemoryStore; use tokio::sync::mpsc; +use types::data_column_sidecar::ColumnIndex; use types::test_utils::TestRandom; use types::{ test_utils::{SeedableRng, XorShiftRng}, BlobSidecar, ForkName, MinimalEthSpec as E, SignedBeaconBlock, Slot, }; use types::{BeaconState, BeaconStateBase}; +use types::{DataColumnSidecar, Epoch}; type T = Witness, E, MemoryStore, MemoryStore>; @@ -84,15 +91,32 @@ struct TestRig { const D: Duration = Duration::new(0, 0); const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; +const SAMPLING_REQUIRED_SUCCESSES: usize = 2; + +type DCByRootIds = Vec; +type DCByRootId = (SyncRequestId, Vec); + +struct TestRigConfig { + peer_das_enabled: bool, +} impl TestRig { - fn test_setup() -> Self { + fn test_setup_with_config(config: Option) -> Self { let enable_log = cfg!(feature = "test_logger"); let log = build_log(slog::Level::Trace, enable_log); + // Use `fork_from_env` logic to set correct fork epochs + let mut spec = test_spec::(); + + if let Some(config) = config { + if config.peer_das_enabled { + spec.eip7594_fork_epoch = Some(Epoch::new(0)); + } + } + // Initialise a new beacon chain let harness = BeaconChainHarness::>::builder(E) - .default_spec() + .spec(spec) .logger(log.clone()) .deterministic_keypairs(1) .fresh_ephemeral_store() @@ -106,7 +130,13 @@ impl TestRig { let chain = harness.chain.clone(); let (network_tx, network_rx) = mpsc::unbounded_channel(); - let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); + // TODO(das): make the generation of the ENR use the deterministic rng to have consistent + // column assignments + let globals = Arc::new(NetworkGlobals::new_test_globals( + Vec::new(), + &log, + chain.spec.clone(), + )); let (beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( globals, chain.clone(), @@ -136,6 +166,9 @@ impl TestRig { network_tx, beacon_processor.into(), sync_recv, + SamplingConfig::Custom { + required_successes: vec![SAMPLING_REQUIRED_SUCCESSES], + }, log.clone(), ), harness, @@ -144,6 +177,10 @@ impl TestRig { } } + fn test_setup() -> Self { + Self::test_setup_with_config(None) + } + fn test_setup_after_deneb() -> Option { let r = Self::test_setup(); if r.after_deneb() { @@ -153,6 +190,17 @@ impl TestRig { } } + fn test_setup_after_peerdas() -> Option { + let r = Self::test_setup_with_config(Some(TestRigConfig { + peer_das_enabled: true, + })); + if r.after_deneb() { + Some(r) + } else { + None + } + } + fn log(&self, msg: &str) { info!(self.log, "TEST_RIG"; "msg" => msg); } @@ -163,11 +211,7 @@ impl TestRig { fn trigger_unknown_parent_block(&mut self, peer_id: PeerId, block: Arc>) { let block_root = block.canonical_root(); - self.send_sync_message(SyncMessage::UnknownParentBlock( - peer_id, - RpcBlock::new_without_blobs(Some(block_root), block), - block_root, - )) + self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root)) } fn trigger_unknown_parent_blob(&mut self, peer_id: PeerId, blob: BlobSidecar) { @@ -180,6 +224,10 @@ impl TestRig { )); } + fn trigger_sample_block(&mut self, block_root: Hash256, block_slot: Slot) { + self.send_sync_message(SyncMessage::SampleBlock(block_root, block_slot)) + } + fn rand_block(&mut self) -> SignedBeaconBlock { self.rand_block_and_blobs(NumBlobs::None).0 } @@ -193,6 +241,18 @@ impl TestRig { generate_rand_block_and_blobs::(fork_name, num_blobs, rng) } + fn rand_block_and_data_columns( + &mut self, + ) -> (SignedBeaconBlock, Vec>>) { + let num_blobs = NumBlobs::Number(1); + generate_rand_block_and_data_columns::( + self.fork_name, + num_blobs, + &mut self.rng, + &self.harness.spec, + ) + } + pub fn rand_block_and_parent( &mut self, ) -> (SignedBeaconBlock, SignedBeaconBlock, Hash256, Hash256) { @@ -233,6 +293,20 @@ impl TestRig { ); } + fn expect_no_active_sampling(&mut self) { + assert_eq!( + self.sync_manager.active_sampling_requests(), + Vec::::new(), + "expected no active sampling" + ); + } + + fn expect_clean_finished_sampling(&mut self) { + self.expect_empty_network(); + self.expect_sampling_result_work(); + self.expect_no_active_sampling(); + } + fn assert_parent_lookups_count(&self, count: usize) { assert_eq!( self.active_parent_lookups_count(), @@ -311,12 +385,26 @@ impl TestRig { } fn new_connected_peer(&mut self) -> PeerId { - let peer_id = PeerId::random(); self.network_globals .peers .write() - .__add_connected_peer_testing_only(&peer_id); - peer_id + .__add_connected_peer_testing_only(false, &self.harness.spec) + } + + fn new_connected_supernode_peer(&mut self) -> PeerId { + self.network_globals + .peers + .write() + .__add_connected_peer_testing_only(true, &self.harness.spec) + } + + fn new_connected_peers_for_peerdas(&mut self) { + // Enough sampling peers with few columns + for _ in 0..100 { + self.new_connected_peer(); + } + // One supernode peer to ensure all columns have at least one peer + self.new_connected_supernode_peer(); } fn parent_chain_processed_success( @@ -348,12 +436,12 @@ impl TestRig { *parent_chain.last().unwrap() } - fn parent_block_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { + fn parent_block_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { let id = self.find_single_lookup_for(self.find_oldest_parent_lookup(chain_hash)); self.single_block_component_processed(id, result); } - fn parent_blob_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { + fn parent_blob_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { let id = self.find_single_lookup_for(self.find_oldest_parent_lookup(chain_hash)); self.single_blob_component_processed(id, result); } @@ -365,7 +453,7 @@ impl TestRig { ); } - fn single_block_component_processed(&mut self, id: Id, result: BlockProcessingResult) { + fn single_block_component_processed(&mut self, id: Id, result: BlockProcessingResult) { self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type: BlockProcessType::SingleBlock { id }, result, @@ -380,7 +468,7 @@ impl TestRig { ) } - fn single_blob_component_processed(&mut self, id: Id, result: BlockProcessingResult) { + fn single_blob_component_processed(&mut self, id: Id, result: BlockProcessingResult) { self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type: BlockProcessType::SingleBlob { id }, result, @@ -542,6 +630,182 @@ impl TestRig { }) } + fn return_empty_sampling_requests(&mut self, ids: DCByRootIds) { + for id in ids { + self.log(&format!("return empty data column for {id:?}")); + self.return_empty_sampling_request(id) + } + } + + fn return_empty_sampling_request(&mut self, (request_id, _): DCByRootId) { + let peer_id = PeerId::random(); + // Send stream termination + self.send_sync_message(SyncMessage::RpcDataColumn { + request_id, + peer_id, + data_column: None, + seen_timestamp: timestamp_now(), + }); + } + + fn sampling_requests_failed( + &mut self, + sampling_ids: DCByRootIds, + peer_id: PeerId, + error: RPCError, + ) { + for (request_id, _) in sampling_ids { + self.send_sync_message(SyncMessage::RpcError { + peer_id, + request_id, + error: error.clone(), + }) + } + } + + fn complete_valid_block_request( + &mut self, + id: SingleLookupReqId, + block: Arc>, + missing_components: bool, + ) { + // Complete download + let peer_id = PeerId::random(); + let slot = block.slot(); + let block_root = block.canonical_root(); + self.single_lookup_block_response(id, peer_id, Some(block)); + self.single_lookup_block_response(id, peer_id, None); + // Expect processing and resolve with import + self.expect_block_process(ResponseType::Block); + self.single_block_component_processed( + id.lookup_id, + if missing_components { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + slot, block_root, + )) + } else { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)) + }, + ) + } + + fn complete_valid_sampling_column_requests( + &mut self, + ids: DCByRootIds, + data_columns: Vec>>, + ) { + for id in ids { + self.log(&format!("return valid data column for {id:?}")); + let indices = &id.1; + let columns_to_send = indices + .iter() + .map(|&i| data_columns[i as usize].clone()) + .collect::>(); + self.complete_valid_sampling_column_request(id, &columns_to_send); + } + } + + fn complete_valid_sampling_column_request( + &mut self, + id: DCByRootId, + data_columns: &[Arc>], + ) { + let first_dc = data_columns.first().unwrap(); + let block_root = first_dc.block_root(); + let sampling_request_id = match id.0 { + SyncRequestId::DataColumnsByRoot( + _, + _requester @ DataColumnsByRootRequester::Sampling(sampling_id), + ) => sampling_id.sampling_request_id, + _ => unreachable!(), + }; + self.complete_data_columns_by_root_request(id, data_columns); + + // Expect work event + // TODO(das): worth it to append sender id to the work event for stricter assertion? + self.expect_rpc_sample_verify_work_event(); + + // Respond with valid result + self.send_sync_message(SyncMessage::SampleVerified { + id: SamplingId { + id: SamplingRequester::ImportedBlock(block_root), + sampling_request_id, + }, + result: Ok(()), + }) + } + + fn complete_valid_custody_request( + &mut self, + ids: DCByRootIds, + data_columns: Vec>>, + missing_components: bool, + ) { + let lookup_id = + if let SyncRequestId::DataColumnsByRoot(_, DataColumnsByRootRequester::Custody(id)) = + ids.first().unwrap().0 + { + id.requester.0.lookup_id + } else { + panic!("not a custody requester") + }; + + let first_column = data_columns.first().cloned().unwrap(); + + for id in ids { + self.log(&format!("return valid data column for {id:?}")); + let indices = &id.1; + let columns_to_send = indices + .iter() + .map(|&i| data_columns[i as usize].clone()) + .collect::>(); + self.complete_data_columns_by_root_request(id, &columns_to_send); + } + + // Expect work event + // TODO(das): worth it to append sender id to the work event for stricter assertion? + self.expect_rpc_custody_column_work_event(); + + // Respond with valid result + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type: BlockProcessType::SingleCustodyColumn(lookup_id), + result: if missing_components { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + first_column.slot(), + first_column.block_root(), + )) + } else { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported( + first_column.block_root(), + )) + }, + }); + } + + fn complete_data_columns_by_root_request( + &mut self, + (request_id, _): DCByRootId, + data_columns: &[Arc>], + ) { + let peer_id = PeerId::random(); + for data_column in data_columns { + // Send chunks + self.send_sync_message(SyncMessage::RpcDataColumn { + request_id, + peer_id, + data_column: Some(data_column.clone()), + seen_timestamp: timestamp_now(), + }); + } + // Send stream termination + self.send_sync_message(SyncMessage::RpcDataColumn { + request_id, + peer_id, + data_column: None, + seen_timestamp: timestamp_now(), + }); + } + /// Return RPCErrors for all active requests of peer fn rpc_error_all_active_requests(&mut self, disconnected_peer_id: PeerId) { self.drain_network_rx(); @@ -710,6 +974,59 @@ impl TestRig { .unwrap_or_else(|e| panic!("Expected blob parent request for {for_block:?}: {e}")) } + /// Retrieves an unknown number of requests for data columns of `block_root`. Because peer ENRs + /// are random, and peer selection is random, the total number of batched requests is unknown. + fn expect_data_columns_by_root_requests( + &mut self, + block_root: Hash256, + count: usize, + ) -> DCByRootIds { + let mut requests: DCByRootIds = vec![]; + loop { + let req = self + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id: _, + request: Request::DataColumnsByRoot(request), + request_id: AppRequestId::Sync(id @ SyncRequestId::DataColumnsByRoot { .. }), + } if request + .data_column_ids + .to_vec() + .iter() + .any(|r| r.block_root == block_root) => + { + let indices = request + .data_column_ids + .to_vec() + .iter() + .map(|cid| cid.index) + .collect::>(); + Some((*id, indices)) + } + _ => None, + }) + .unwrap_or_else(|e| { + panic!("Expected more DataColumnsByRoot requests for {block_root:?}: {e}") + }); + requests.push(req); + + // Should never infinite loop because sync does not send requests for 0 columns + if requests.iter().map(|r| r.1.len()).sum::() >= count { + return requests; + } + } + } + + fn expect_only_data_columns_by_root_requests( + &mut self, + for_block: Hash256, + count: usize, + ) -> DCByRootIds { + let ids = self.expect_data_columns_by_root_requests(for_block, count); + self.expect_empty_network(); + ids + } + #[track_caller] fn expect_block_process(&mut self, response_type: ResponseType) { match response_type { @@ -723,9 +1040,47 @@ impl TestRig { (ev.work_type() == beacon_processor::RPC_BLOBS).then_some(()) }) .unwrap_or_else(|e| panic!("Expected blobs work event: {e}")), + ResponseType::CustodyColumn => self + .pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::RPC_CUSTODY_COLUMN).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expected column work event: {e}")), } } + fn expect_rpc_custody_column_work_event(&mut self) { + self.pop_received_processor_event(|ev| { + if ev.work_type() == beacon_processor::RPC_CUSTODY_COLUMN { + Some(()) + } else { + None + } + }) + .unwrap_or_else(|e| panic!("Expected RPC custody column work: {e}")) + } + + fn expect_rpc_sample_verify_work_event(&mut self) { + self.pop_received_processor_event(|ev| { + if ev.work_type() == beacon_processor::RPC_VERIFY_DATA_COLUMNS { + Some(()) + } else { + None + } + }) + .unwrap_or_else(|e| panic!("Expected sample verify work: {e}")) + } + + fn expect_sampling_result_work(&mut self) { + self.pop_received_processor_event(|ev| { + if ev.work_type() == beacon_processor::SAMPLING_RESULT { + Some(()) + } else { + None + } + }) + .unwrap_or_else(|e| panic!("Expected sampling result work: {e}")) + } + fn expect_no_penalty_for(&mut self, peer_id: PeerId) { self.drain_network_rx(); let downscore_events = self @@ -761,7 +1116,11 @@ impl TestRig { fn expect_empty_network(&mut self) { self.drain_network_rx(); if !self.network_rx_queue.is_empty() { - panic!("expected no network events: {:#?}", self.network_rx_queue); + let n = self.network_rx_queue.len(); + panic!( + "expected no network events but got {n} events, displaying first 2: {:#?}", + self.network_rx_queue[..n.min(2)].iter().collect::>() + ); } } @@ -1077,7 +1436,9 @@ fn test_single_block_lookup_becomes_parent_request() { // parent request after processing. rig.single_block_component_processed( id.lookup_id, - BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), + BlockProcessingResult::Err(BlockError::ParentUnknown { + parent_root: block.parent_root(), + }), ); assert_eq!(rig.active_single_lookups_count(), 2); // 2 = current + parent rig.expect_block_parent_request(parent_root); @@ -1298,7 +1659,9 @@ fn test_parent_lookup_too_deep_grow_ancestor() { // the processing result rig.parent_block_processed( chain_hash, - BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), + BlockProcessingResult::Err(BlockError::ParentUnknown { + parent_root: block.parent_root(), + }), ) } @@ -1322,7 +1685,10 @@ fn test_parent_lookup_too_deep_grow_tip() { rig.expect_block_process(ResponseType::Block); rig.single_block_component_processed( id.lookup_id, - BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), + BlockError::ParentUnknown { + parent_root: block.parent_root(), + } + .into(), ); } @@ -1477,7 +1843,9 @@ fn test_same_chain_race_condition() { rig.log(&format!("Block {i} ParentUnknown")); rig.parent_block_processed( chain_hash, - BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), + BlockProcessingResult::Err(BlockError::ParentUnknown { + parent_root: block.parent_root(), + }), ) } } @@ -1586,6 +1954,95 @@ fn blobs_in_da_checker_skip_download() { r.expect_no_active_lookups(); } +#[test] +fn sampling_happy_path() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + r.new_connected_peers_for_peerdas(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + r.trigger_sample_block(block_root, block.slot()); + // Retrieve all outgoing sample requests for random column indexes + let sampling_ids = + r.expect_only_data_columns_by_root_requests(block_root, SAMPLING_REQUIRED_SUCCESSES); + // Resolve all of them one by one + r.complete_valid_sampling_column_requests(sampling_ids, data_columns); + r.expect_clean_finished_sampling(); +} + +#[test] +#[ignore] // Ignoring due to flakiness https://github.com/sigp/lighthouse/issues/6319 +fn sampling_with_retries() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + r.new_connected_peers_for_peerdas(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + r.trigger_sample_block(block_root, block.slot()); + // Retrieve all outgoing sample requests for random column indexes, and return empty responses + let sampling_ids = + r.expect_only_data_columns_by_root_requests(block_root, SAMPLING_REQUIRED_SUCCESSES); + r.return_empty_sampling_requests(sampling_ids); + // Expect retries for all of them, and resolve them + let sampling_ids = + r.expect_only_data_columns_by_root_requests(block_root, SAMPLING_REQUIRED_SUCCESSES); + r.complete_valid_sampling_column_requests(sampling_ids, data_columns); + r.expect_clean_finished_sampling(); +} + +#[test] +fn sampling_avoid_retrying_same_peer() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + let peer_id_1 = r.new_connected_supernode_peer(); + let peer_id_2 = r.new_connected_supernode_peer(); + let block_root = Hash256::random(); + r.trigger_sample_block(block_root, Slot::new(0)); + // Retrieve all outgoing sample requests for random column indexes, and return empty responses + let sampling_ids = + r.expect_only_data_columns_by_root_requests(block_root, SAMPLING_REQUIRED_SUCCESSES); + r.sampling_requests_failed(sampling_ids, peer_id_1, RPCError::Disconnected); + // Should retry the other peer + let sampling_ids = + r.expect_only_data_columns_by_root_requests(block_root, SAMPLING_REQUIRED_SUCCESSES); + r.sampling_requests_failed(sampling_ids, peer_id_2, RPCError::Disconnected); + // Expect no more retries + r.expect_empty_network(); +} + +#[test] +fn custody_lookup_happy_path() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + let spec = E::default_spec(); + r.new_connected_peers_for_peerdas(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); + r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should not request blobs + let id = r.expect_block_lookup_request(block.canonical_root()); + r.complete_valid_block_request(id, block.into(), true); + let custody_column_count = spec.custody_requirement * spec.data_columns_per_subnet() as u64; + let custody_ids = + r.expect_only_data_columns_by_root_requests(block_root, custody_column_count as usize); + r.complete_valid_custody_request(custody_ids, data_columns, false); + r.expect_no_active_lookups(); +} + +// TODO(das): Test retries of DataColumnByRoot: +// - Expect request for column_index +// - Respond with bad data +// - Respond with stream terminator +// ^ The stream terminator should be ignored and not close the next retry + +// TODO(das): Test error early a sampling request and it getting drop + then receiving responses +// from pending requests. + mod deneb_only { use super::*; use beacon_chain::{ @@ -1678,7 +2135,7 @@ mod deneb_only { RequestTrigger::GossipUnknownParentBlock { .. } => { rig.send_sync_message(SyncMessage::UnknownParentBlock( peer_id, - RpcBlock::new_without_blobs(Some(block_root), block.clone()), + block.clone(), block_root, )); @@ -1960,7 +2417,9 @@ mod deneb_only { .unwrap(); self.rig.parent_block_processed( self.block_root, - BlockProcessingResult::Err(BlockError::ParentUnknown(block)), + BlockProcessingResult::Err(BlockError::ParentUnknown { + parent_root: block.parent_root(), + }), ); assert_eq!(self.rig.active_parent_lookups_count(), 1); self diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index f31f2921ea2..966ce55fabe 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,69 +1,105 @@ -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::{ + block_verification_types::RpcBlock, data_column_verification::CustodyDataColumn, get_block_root, +}; use lighthouse_network::PeerId; use ssz_types::VariableList; -use std::{collections::VecDeque, sync::Arc}; -use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; - -use super::range_sync::ByRangeRequestType; +use std::{ + collections::{HashMap, VecDeque}, + sync::Arc, +}; +use types::{ + BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, +}; #[derive(Debug)] -pub struct BlocksAndBlobsRequestInfo { +pub struct RangeBlockComponentsRequest { /// Blocks we have received awaiting for their corresponding sidecar. - accumulated_blocks: VecDeque>>, + blocks: VecDeque>>, /// Sidecars we have received awaiting for their corresponding block. - accumulated_sidecars: VecDeque>>, + blobs: VecDeque>>, + data_columns: VecDeque>>, /// Whether the individual RPC request for blocks is finished or not. is_blocks_stream_terminated: bool, /// Whether the individual RPC request for sidecars is finished or not. is_sidecars_stream_terminated: bool, + custody_columns_streams_terminated: usize, /// Used to determine if this accumulator should wait for a sidecars stream termination - request_type: ByRangeRequestType, - /// The peer the request was made to. - pub(crate) peer_id: PeerId, + expects_blobs: bool, + expects_custody_columns: Option>, + /// Used to determine if the number of data columns stream termination this accumulator should + /// wait for. This may be less than the number of `expects_custody_columns` due to request batching. + num_custody_column_requests: Option, + /// The peers the request was made to. + pub(crate) peer_ids: Vec, } -impl BlocksAndBlobsRequestInfo { - pub fn new(request_type: ByRangeRequestType, peer_id: PeerId) -> Self { +impl RangeBlockComponentsRequest { + pub fn new( + expects_blobs: bool, + expects_custody_columns: Option>, + num_custody_column_requests: Option, + peer_ids: Vec, + ) -> Self { Self { - accumulated_blocks: <_>::default(), - accumulated_sidecars: <_>::default(), - is_blocks_stream_terminated: <_>::default(), - is_sidecars_stream_terminated: <_>::default(), - request_type, - peer_id, + blocks: <_>::default(), + blobs: <_>::default(), + data_columns: <_>::default(), + is_blocks_stream_terminated: false, + is_sidecars_stream_terminated: false, + custody_columns_streams_terminated: 0, + expects_blobs, + expects_custody_columns, + num_custody_column_requests, + peer_ids, } } - pub fn get_request_type(&self) -> ByRangeRequestType { - self.request_type + // TODO: This function should be deprecated when simplying the retry mechanism of this range + // requests. + pub fn get_requirements(&self) -> (bool, Option>) { + (self.expects_blobs, self.expects_custody_columns.clone()) } pub fn add_block_response(&mut self, block_opt: Option>>) { match block_opt { - Some(block) => self.accumulated_blocks.push_back(block), + Some(block) => self.blocks.push_back(block), None => self.is_blocks_stream_terminated = true, } } pub fn add_sidecar_response(&mut self, sidecar_opt: Option>>) { match sidecar_opt { - Some(sidecar) => self.accumulated_sidecars.push_back(sidecar), + Some(sidecar) => self.blobs.push_back(sidecar), None => self.is_sidecars_stream_terminated = true, } } - pub fn into_responses(self) -> Result>, String> { - let BlocksAndBlobsRequestInfo { - accumulated_blocks, - accumulated_sidecars, - .. - } = self; + pub fn add_data_column(&mut self, column_opt: Option>>) { + match column_opt { + Some(column) => self.data_columns.push_back(column), + // TODO(das): this mechanism is dangerous, if somehow there are two requests for the + // same column index it can terminate early. This struct should track that all requests + // for all custody columns terminate. + None => self.custody_columns_streams_terminated += 1, + } + } + + pub fn into_responses(self, spec: &ChainSpec) -> Result>, String> { + if let Some(expects_custody_columns) = self.expects_custody_columns.clone() { + self.into_responses_with_custody_columns(expects_custody_columns, spec) + } else { + self.into_responses_with_blobs() + } + } + + fn into_responses_with_blobs(self) -> Result>, String> { + let RangeBlockComponentsRequest { blocks, blobs, .. } = self; // There can't be more more blobs than blocks. i.e. sending any blob (empty // included) for a skipped slot is not permitted. - let mut responses = Vec::with_capacity(accumulated_blocks.len()); - let mut blob_iter = accumulated_sidecars.into_iter().peekable(); - for block in accumulated_blocks.into_iter() { + let mut responses = Vec::with_capacity(blocks.len()); + let mut blob_iter = blobs.into_iter().peekable(); + for block in blocks.into_iter() { let mut blob_list = Vec::with_capacity(E::max_blobs_per_block()); while { let pair_next_blob = blob_iter @@ -99,20 +135,110 @@ impl BlocksAndBlobsRequestInfo { Ok(responses) } + fn into_responses_with_custody_columns( + self, + expects_custody_columns: Vec, + spec: &ChainSpec, + ) -> Result>, String> { + let RangeBlockComponentsRequest { + blocks, + data_columns, + .. + } = self; + + // Group data columns by block_root and index + let mut data_columns_by_block = + HashMap::>>>::new(); + + for column in data_columns { + let block_root = column.block_root(); + let index = column.index; + if data_columns_by_block + .entry(block_root) + .or_default() + .insert(index, column) + .is_some() + { + return Err(format!( + "Repeated column block_root {block_root:?} index {index}" + )); + } + } + + // Now iterate all blocks ensuring that the block roots of each block and data column match, + // plus we have columns for our custody requirements + let mut rpc_blocks = Vec::with_capacity(blocks.len()); + + for block in blocks { + let block_root = get_block_root(&block); + rpc_blocks.push(if block.num_expected_blobs() > 0 { + let Some(mut data_columns_by_index) = data_columns_by_block.remove(&block_root) + else { + // This PR ignores the fix from https://github.com/sigp/lighthouse/pull/5675 + // which allows blobs to not match blocks. + // TODO(das): on the initial version of PeerDAS the beacon chain does not check + // rpc custody requirements and dropping this check can allow the block to have + // an inconsistent DB. + return Err(format!("No columns for block {block_root:?} with data")); + }; + + let mut custody_columns = vec![]; + for index in &expects_custody_columns { + let Some(data_column) = data_columns_by_index.remove(index) else { + return Err(format!("No column for block {block_root:?} index {index}")); + }; + // Safe to convert to `CustodyDataColumn`: we have asserted that the index of + // this column is in the set of `expects_custody_columns` and with the expected + // block root, so for the expected epoch of this batch. + custody_columns.push(CustodyDataColumn::from_asserted_custody(data_column)); + } + + // Assert that there are no columns left + if !data_columns_by_index.is_empty() { + let remaining_indices = data_columns_by_index.keys().collect::>(); + return Err(format!( + "Not all columns consumed for block {block_root:?}: {remaining_indices:?}" + )); + } + + RpcBlock::new_with_custody_columns(Some(block_root), block, custody_columns, spec) + .map_err(|e| format!("{e:?}"))? + } else { + RpcBlock::new_without_blobs(Some(block_root), block) + }); + } + + // Assert that there are no columns left for other blocks + if !data_columns_by_block.is_empty() { + let remaining_roots = data_columns_by_block.keys().collect::>(); + return Err(format!("Not all columns consumed: {remaining_roots:?}")); + } + + Ok(rpc_blocks) + } + pub fn is_finished(&self) -> bool { - let blobs_requested = match self.request_type { - ByRangeRequestType::Blocks => false, - ByRangeRequestType::BlocksAndBlobs => true, - }; - self.is_blocks_stream_terminated && (!blobs_requested || self.is_sidecars_stream_terminated) + if !self.is_blocks_stream_terminated { + return false; + } + if self.expects_blobs && !self.is_sidecars_stream_terminated { + return false; + } + if let Some(expects_custody_column_responses) = self.num_custody_column_requests { + if self.custody_columns_streams_terminated < expects_custody_column_responses { + return false; + } + } + true } } #[cfg(test)] mod tests { - use super::BlocksAndBlobsRequestInfo; - use crate::sync::range_sync::ByRangeRequestType; - use beacon_chain::test_utils::{generate_rand_block_and_blobs, NumBlobs}; + use super::RangeBlockComponentsRequest; + use beacon_chain::test_utils::{ + generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, NumBlobs, + }; use lighthouse_network::PeerId; use rand::SeedableRng; use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; @@ -120,7 +246,7 @@ mod tests { #[test] fn no_blobs_into_responses() { let peer_id = PeerId::random(); - let mut info = BlocksAndBlobsRequestInfo::::new(ByRangeRequestType::Blocks, peer_id); + let mut info = RangeBlockComponentsRequest::::new(false, None, None, vec![peer_id]); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng).0) @@ -134,14 +260,13 @@ mod tests { // Assert response is finished and RpcBlocks can be constructed assert!(info.is_finished()); - info.into_responses().unwrap(); + info.into_responses(&test_spec::()).unwrap(); } #[test] fn empty_blobs_into_responses() { let peer_id = PeerId::random(); - let mut info = - BlocksAndBlobsRequestInfo::::new(ByRangeRequestType::BlocksAndBlobs, peer_id); + let mut info = RangeBlockComponentsRequest::::new(true, None, None, vec![peer_id]); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { @@ -162,6 +287,123 @@ mod tests { // This makes sure we don't expect blobs here when they have expired. Checking this logic should // be hendled elsewhere. assert!(info.is_finished()); - info.into_responses().unwrap(); + info.into_responses(&test_spec::()).unwrap(); + } + + #[test] + fn rpc_block_with_custody_columns() { + let spec = test_spec::(); + let expects_custody_columns = vec![1, 2, 3, 4]; + let mut info = RangeBlockComponentsRequest::::new( + false, + Some(expects_custody_columns.clone()), + Some(expects_custody_columns.len()), + vec![PeerId::random()], + ); + let mut rng = XorShiftRng::from_seed([42; 16]); + let blocks = (0..4) + .map(|_| { + generate_rand_block_and_data_columns::( + ForkName::Deneb, + NumBlobs::Number(1), + &mut rng, + &spec, + ) + }) + .collect::>(); + + // Send blocks and complete terminate response + for block in &blocks { + info.add_block_response(Some(block.0.clone().into())); + } + info.add_block_response(None); + // Assert response is not finished + assert!(!info.is_finished()); + + // Send data columns interleaved + for block in &blocks { + for column in &block.1 { + if expects_custody_columns.contains(&column.index) { + info.add_data_column(Some(column.clone())); + } + } + } + + // Terminate the requests + for (i, _column_index) in expects_custody_columns.iter().enumerate() { + info.add_data_column(None); + + if i < expects_custody_columns.len() - 1 { + assert!( + !info.is_finished(), + "requested should not be finished at loop {i}" + ); + } else { + assert!( + info.is_finished(), + "request should be finishied at loop {i}" + ); + } + } + + // All completed construct response + info.into_responses(&spec).unwrap(); + } + + #[test] + fn rpc_block_with_custody_columns_batched() { + let spec = test_spec::(); + let expects_custody_columns = vec![1, 2, 3, 4]; + let num_of_data_column_requests = 2; + let mut info = RangeBlockComponentsRequest::::new( + false, + Some(expects_custody_columns.clone()), + Some(num_of_data_column_requests), + vec![PeerId::random()], + ); + let mut rng = XorShiftRng::from_seed([42; 16]); + let blocks = (0..4) + .map(|_| { + generate_rand_block_and_data_columns::( + ForkName::Deneb, + NumBlobs::Number(1), + &mut rng, + &spec, + ) + }) + .collect::>(); + + // Send blocks and complete terminate response + for block in &blocks { + info.add_block_response(Some(block.0.clone().into())); + } + info.add_block_response(None); + // Assert response is not finished + assert!(!info.is_finished()); + + // Send data columns interleaved + for block in &blocks { + for column in &block.1 { + if expects_custody_columns.contains(&column.index) { + info.add_data_column(Some(column.clone())); + } + } + } + + // Terminate the requests + for i in 0..num_of_data_column_requests { + info.add_data_column(None); + if i < num_of_data_column_requests - 1 { + assert!( + !info.is_finished(), + "requested should not be finished at loop {i}" + ); + } else { + assert!(info.is_finished(), "request should be finished at loop {i}"); + } + } + + // All completed construct response + info.into_responses(&spec).unwrap(); } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 7149395839b..ed91c73d8bf 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,22 +38,26 @@ use super::block_lookups::BlockLookups; use super::network_context::{BlockOrBlob, RangeRequestId, RpcEvent, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; +use super::sampling::{Sampling, SamplingConfig, SamplingResult}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::block_lookups::{ - BlobRequestState, BlockComponent, BlockRequestState, DownloadResult, + BlobRequestState, BlockComponent, BlockRequestState, CustodyRequestState, DownloadResult, }; -use crate::sync::block_sidecar_coupling::BlocksAndBlobsRequestInfo; +use crate::sync::block_sidecar_coupling::RangeBlockComponentsRequest; +use crate::sync::network_context::PeerGroup; use beacon_chain::block_verification_types::AsBlock; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, }; use futures::StreamExt; use lighthouse_network::rpc::RPCError; -use lighthouse_network::service::api_types::{Id, SingleLookupReqId, SyncRequestId}; +use lighthouse_network::service::api_types::{ + DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SamplingId, SamplingRequester, + SingleLookupReqId, SyncRequestId, +}; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; @@ -101,8 +105,16 @@ pub enum SyncMessage { seen_timestamp: Duration, }, + /// A data columns has been received from the RPC + RpcDataColumn { + request_id: SyncRequestId, + peer_id: PeerId, + data_column: Option>>, + seen_timestamp: Duration, + }, + /// A block with an unknown parent has been received. - UnknownParentBlock(PeerId, RpcBlock, Hash256), + UnknownParentBlock(PeerId, Arc>, Hash256), /// A blob with an unknown parent has been received. UnknownParentBlob(PeerId, Arc>), @@ -114,6 +126,10 @@ pub enum SyncMessage { /// manager to attempt to find the block matching the unknown hash. UnknownBlockHashFromAttestation(PeerId, Hash256), + /// Request to start sampling a block. Caller should ensure that block has data before sending + /// the request. + SampleBlock(Hash256, Slot), + /// A peer has disconnected. Disconnect(PeerId), @@ -133,7 +149,13 @@ pub enum SyncMessage { /// Block processed BlockComponentProcessed { process_type: BlockProcessType, - result: BlockProcessingResult, + result: BlockProcessingResult, + }, + + /// Sample data column verified + SampleVerified { + id: SamplingId, + result: Result<(), String>, }, /// A block from gossip has completed processing, @@ -145,12 +167,23 @@ pub enum SyncMessage { pub enum BlockProcessType { SingleBlock { id: Id }, SingleBlob { id: Id }, + SingleCustodyColumn(Id), +} + +impl BlockProcessType { + pub fn id(&self) -> Id { + match self { + BlockProcessType::SingleBlock { id } + | BlockProcessType::SingleBlob { id } + | BlockProcessType::SingleCustodyColumn(id) => *id, + } + } } #[derive(Debug)] -pub enum BlockProcessingResult { +pub enum BlockProcessingResult { Ok(AvailabilityProcessingStatus), - Err(BlockError), + Err(BlockError), Ignored, } @@ -196,6 +229,8 @@ pub struct SyncManager { /// one event is useful, the rest generating log noise and wasted cycles notified_unknown_roots: LRUTimeCache<(PeerId, Hash256)>, + sampling: Sampling, + /// The logger for the import manager. log: Logger, } @@ -222,6 +257,7 @@ pub fn spawn( network_send, beacon_processor, sync_recv, + SamplingConfig::Default, log.clone(), ); @@ -236,6 +272,7 @@ impl SyncManager { network_send: mpsc::UnboundedSender>, beacon_processor: Arc>, sync_recv: mpsc::UnboundedReceiver>, + sampling_config: SamplingConfig, log: slog::Logger, ) -> Self { let network_globals = beacon_processor.network_globals.clone(); @@ -261,6 +298,7 @@ impl SyncManager { notified_unknown_roots: LRUTimeCache::new(Duration::from_secs( NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS, )), + sampling: Sampling::new(sampling_config, log.new(o!("service" => "sampling"))), log: log.clone(), } } @@ -289,6 +327,11 @@ impl SyncManager { self.block_lookups.insert_failed_chain(block_root); } + #[cfg(test)] + pub(crate) fn active_sampling_requests(&self) -> Vec { + self.sampling.active_sampling_requests() + } + fn network_globals(&self) -> &NetworkGlobals { self.network.network_globals() } @@ -337,6 +380,13 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::DataColumnsByRoot(req_id, requester) => self + .on_data_columns_by_root_response( + req_id, + requester, + peer_id, + RpcEvent::RPCError(error), + ), SyncRequestId::RangeBlockAndBlobs { id } => { if let Some(sender_id) = self.network.range_request_failed(id) { match sender_id { @@ -614,6 +664,12 @@ impl SyncManager { blob_sidecar, seen_timestamp, } => self.rpc_blob_received(request_id, peer_id, blob_sidecar, seen_timestamp), + SyncMessage::RpcDataColumn { + request_id, + peer_id, + data_column, + seen_timestamp, + } => self.rpc_data_column_received(request_id, peer_id, data_column, seen_timestamp), SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); @@ -627,7 +683,7 @@ impl SyncManager { value: block.block_cloned(), block_root, seen_timestamp: timestamp_now(), - peer_id, + peer_group: PeerGroup::from_single(peer_id), }), ); } @@ -645,12 +701,27 @@ impl SyncManager { value: blob, block_root, seen_timestamp: timestamp_now(), - peer_id, + peer_group: PeerGroup::from_single(peer_id), }), ); } - SyncMessage::UnknownParentDataColumn(_peer_id, _data_column) => { - // TODO(das): data column parent lookup to be implemented + SyncMessage::UnknownParentDataColumn(peer_id, data_column) => { + let data_column_slot = data_column.slot(); + let block_root = data_column.block_root(); + let parent_root = data_column.block_parent_root(); + debug!(self.log, "Received unknown parent data column message"; "block_root" => %block_root, "parent_root" => %parent_root); + self.handle_unknown_parent( + peer_id, + block_root, + parent_root, + data_column_slot, + BlockComponent::DataColumn(DownloadResult { + value: data_column, + block_root, + seen_timestamp: timestamp_now(), + peer_group: PeerGroup::from_single(peer_id), + }), + ); } SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_root) => { if !self.notified_unknown_roots.contains(&(peer_id, block_root)) { @@ -659,6 +730,15 @@ impl SyncManager { self.handle_unknown_block_root(peer_id, block_root); } } + SyncMessage::SampleBlock(block_root, block_slot) => { + debug!(self.log, "Received SampleBlock message"; "block_root" => %block_root, "slot" => block_slot); + if let Some((requester, result)) = self + .sampling + .on_new_sample_request(block_root, &mut self.network) + { + self.on_sampling_result(requester, result) + } + } SyncMessage::Disconnect(peer_id) => { debug!(self.log, "Received disconnected message"; "peer_id" => %peer_id); self.peer_disconnect(&peer_id); @@ -708,6 +788,14 @@ impl SyncManager { } } }, + SyncMessage::SampleVerified { id, result } => { + if let Some((requester, result)) = + self.sampling + .on_sample_verified(id, result, &mut self.network) + { + self.on_sampling_result(requester, result) + } + } } } @@ -843,12 +931,12 @@ impl SyncManager { None => RpcEvent::StreamTermination, }, ), - SyncRequestId::SingleBlob { .. } => { - crit!(self.log, "Block received during blob request"; "peer_id" => %peer_id ); - } SyncRequestId::RangeBlockAndBlobs { id } => { self.range_block_and_blobs_response(id, peer_id, block.into()) } + _ => { + crit!(self.log, "bad request id for block"; "peer_id" => %peer_id ); + } } } @@ -862,8 +950,9 @@ impl SyncManager { self.block_lookups .on_download_response::>( id, - peer_id, - resp, + resp.map(|(value, seen_timestamp)| { + (value, PeerGroup::from_single(peer_id), seen_timestamp) + }), &mut self.network, ) } @@ -877,9 +966,6 @@ impl SyncManager { seen_timestamp: Duration, ) { match request_id { - SyncRequestId::SingleBlock { .. } => { - crit!(self.log, "Single blob received during block request"; "peer_id" => %peer_id ); - } SyncRequestId::SingleBlob { id } => self.on_single_blob_response( id, peer_id, @@ -891,6 +977,41 @@ impl SyncManager { SyncRequestId::RangeBlockAndBlobs { id } => { self.range_block_and_blobs_response(id, peer_id, blob.into()) } + _ => { + crit!(self.log, "bad request id for blob"; "peer_id" => %peer_id); + } + } + } + + fn rpc_data_column_received( + &mut self, + request_id: SyncRequestId, + peer_id: PeerId, + data_column: Option>>, + seen_timestamp: Duration, + ) { + match request_id { + SyncRequestId::DataColumnsByRoot(req_id, requester) => { + self.on_data_columns_by_root_response( + req_id, + requester, + peer_id, + match data_column { + Some(data_column) => RpcEvent::Response(data_column, seen_timestamp), + None => RpcEvent::StreamTermination, + }, + ); + } + SyncRequestId::RangeBlockAndBlobs { id } => { + self.range_block_and_blobs_response( + id, + peer_id, + BlockOrBlob::CustodyColumns(data_column), + ); + } + _ => { + crit!(self.log, "bad request id for data_column"; "peer_id" => %peer_id); + } } } @@ -904,13 +1025,88 @@ impl SyncManager { self.block_lookups .on_download_response::>( id, - peer_id, - resp, + resp.map(|(value, seen_timestamp)| { + (value, PeerGroup::from_single(peer_id), seen_timestamp) + }), &mut self.network, ) } } + fn on_data_columns_by_root_response( + &mut self, + req_id: DataColumnsByRootRequestId, + requester: DataColumnsByRootRequester, + peer_id: PeerId, + data_column: RpcEvent>>, + ) { + if let Some(resp) = + self.network + .on_data_columns_by_root_response(req_id, peer_id, data_column) + { + match requester { + DataColumnsByRootRequester::Sampling(id) => { + if let Some((requester, result)) = + self.sampling + .on_sample_downloaded(id, peer_id, resp, &mut self.network) + { + self.on_sampling_result(requester, result) + } + } + DataColumnsByRootRequester::Custody(custody_id) => { + if let Some(custody_columns) = self + .network + .on_custody_by_root_response(custody_id, req_id, peer_id, resp) + { + // TODO(das): get proper timestamp + let seen_timestamp = timestamp_now(); + self.block_lookups + .on_download_response::>( + custody_id.requester.0, + custody_columns.map(|(columns, peer_group)| { + (columns, peer_group, seen_timestamp) + }), + &mut self.network, + ); + } + } + } + } + } + + fn on_sampling_result(&mut self, requester: SamplingRequester, result: SamplingResult) { + // TODO(das): How is a consumer of sampling results? + // - Fork-choice for trailing DA + // - Single lookups to complete import requirements + // - Range sync to complete import requirements? Can sampling for syncing lag behind and + // accumulate in fork-choice? + + match requester { + SamplingRequester::ImportedBlock(block_root) => { + debug!(self.log, "Sampling result"; "block_root" => %block_root, "result" => ?result); + + // TODO(das): Consider moving SamplingResult to the beacon_chain crate and import + // here. No need to add too much enum variants, just whatever the beacon_chain or + // fork-choice needs to make a decision. Currently the fork-choice only needs to + // be notified of successful samplings, i.e. sampling failures don't trigger pruning + match result { + Ok(_) => { + if let Err(e) = self + .network + .beacon_processor() + .send_sampling_completed(block_root) + { + warn!(self.log, "Error sending sampling result"; "block_root" => ?block_root, "reason" => ?e); + } + } + Err(e) => { + warn!(self.log, "Sampling failed"; "block_root" => %block_root, "reason" => ?e); + } + } + } + } + } + /// Handles receiving a response for a range sync request that should have both blocks and /// blobs. fn range_block_and_blobs_response( @@ -961,7 +1157,12 @@ impl SyncManager { self.network.insert_range_blocks_and_blobs_request( id, resp.sender_id, - BlocksAndBlobsRequestInfo::new(resp.request_type, peer_id), + RangeBlockComponentsRequest::new( + resp.expects_blobs, + resp.expects_custody_columns, + None, + vec![], + ), ); // inform range that the request needs to be treated as failed // With time we will want to downgrade this log @@ -985,10 +1186,8 @@ impl SyncManager { } } -impl From>> - for BlockProcessingResult -{ - fn from(result: Result>) -> Self { +impl From> for BlockProcessingResult { + fn from(result: Result) -> Self { match result { Ok(status) => BlockProcessingResult::Ok(status), Err(e) => BlockProcessingResult::Err(e), @@ -996,8 +1195,8 @@ impl From>> } } -impl From> for BlockProcessingResult { - fn from(e: BlockError) -> Self { +impl From for BlockProcessingResult { + fn from(e: BlockError) -> Self { BlockProcessingResult::Err(e) } } diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 7b244bceceb..6669add4453 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -8,6 +8,8 @@ pub mod manager; mod network_context; mod peer_sync_info; mod range_sync; +mod sampling; +pub use lighthouse_network::service::api_types::SamplingId; pub use manager::{BatchProcessResult, SyncMessage}; pub use range_sync::{BatchOperationOutcome, ChainId}; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index df8be9f6d59..1cf028dbcd8 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1,38 +1,52 @@ //! Provides network functionality for the Syncing thread. This fundamentally wraps a network //! channel and stores a global RPC ID to perform requests. +use self::custody::{ActiveCustodyRequest, Error as CustodyRequestError}; use self::requests::{ActiveBlobsByRootRequest, ActiveBlocksByRootRequest}; -pub use self::requests::{BlobsByRootSingleBlockRequest, BlocksByRootSingleRequest}; -use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo; +pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlockRequest}; +use super::block_sidecar_coupling::RangeBlockComponentsRequest; +use super::manager::BlockProcessType; use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; use crate::metrics; use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::block_lookups::SingleLookupId; -use crate::sync::manager::BlockProcessType; +use crate::sync::network_context::requests::BlobsByRootSingleBlockRequest; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError}; -use lighthouse_network::service::api_types::{AppRequestId, Id, SingleLookupReqId, SyncRequestId}; +use lighthouse_network::service::api_types::{ + AppRequestId, CustodyId, CustodyRequester, DataColumnsByRootRequestId, + DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, +}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; +use rand::seq::SliceRandom; +use rand::thread_rng; +use requests::ActiveDataColumnsByRootRequest; pub use requests::LookupVerifyError; -use slog::{debug, error, trace, warn}; +use slog::{debug, error, warn}; use std::collections::hash_map::Entry; +use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock}; +use types::{ + BlobSidecar, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256, + SignedBeaconBlock, Slot, +}; +pub mod custody; mod requests; pub struct BlocksAndBlobsByRangeResponse { pub sender_id: RangeRequestId, pub responses: Result>, String>, - pub request_type: ByRangeRequestType, + pub expects_blobs: bool, + pub expects_custody_columns: Option>, } #[derive(Debug, Clone, Copy)] @@ -55,15 +69,20 @@ pub enum RpcEvent { pub type RpcResponseResult = Result<(T, Duration), RpcResponseError>; +#[derive(Debug)] pub enum RpcResponseError { RpcError(RPCError), VerifyError(LookupVerifyError), + CustodyRequestError(CustodyRequestError), } #[derive(Debug, PartialEq, Eq)] pub enum RpcRequestSendError { /// Network channel send failed NetworkSendError, + NoCustodyPeers, + CustodyRequestError(custody::Error), + SlotClockError, } #[derive(Debug, PartialEq, Eq)] @@ -77,6 +96,7 @@ impl std::fmt::Display for RpcResponseError { match self { RpcResponseError::RpcError(e) => write!(f, "RPC Error: {:?}", e), RpcResponseError::VerifyError(e) => write!(f, "Lookup Verify Error: {:?}", e), + RpcResponseError::CustodyRequestError(e) => write!(f, "Custody Request Error: {:?}", e), } } } @@ -93,15 +113,41 @@ impl From for RpcResponseError { } } +/// Represents a group of peers that served a block component. +#[derive(Clone, Debug)] +pub struct PeerGroup { + /// Peers group by which indexed section of the block component they served. For example: + /// - PeerA served = [blob index 0, blob index 2] + /// - PeerA served = [blob index 1] + peers: HashMap>, +} + +impl PeerGroup { + /// Return a peer group where a single peer returned all parts of a block component. For + /// example, a block has a single component (the block = index 0/1). + pub fn from_single(peer: PeerId) -> Self { + Self { + peers: HashMap::from_iter([(peer, vec![0])]), + } + } + pub fn from_set(peers: HashMap>) -> Self { + Self { peers } + } + pub fn all(&self) -> impl Iterator + '_ { + self.peers.keys() + } +} + /// Sequential ID that uniquely identifies ReqResp outgoing requests pub type ReqId = u32; -pub enum LookupRequestResult { +pub enum LookupRequestResult { /// A request is sent. Sync MUST receive an event from the network in the future for either: /// completed response or failed request - RequestSent(ReqId), - /// No request is sent, and no further action is necessary to consider this request completed - NoRequestNeeded, + RequestSent(I), + /// No request is sent, and no further action is necessary to consider this request completed. + /// Includes a reason why this request is not needed. + NoRequestNeeded(&'static str), /// No request is sent, but the request is not completed. Sync MUST receive some future event /// that makes progress on the request. For example: request is processing from a different /// source (i.e. block received from gossip) and sync MUST receive an event with that processing @@ -123,9 +169,16 @@ pub struct SyncNetworkContext { /// A mapping of active BlobsByRoot requests, including both current slot and parent lookups. blobs_by_root_requests: FnvHashMap>, + /// Mapping of active custody column requests for a block root + custody_by_root_requests: FnvHashMap>, + + /// A mapping of active DataColumnsByRoot requests + data_columns_by_root_requests: + FnvHashMap>, + /// BlocksByRange requests paired with BlobsByRange - range_blocks_and_blobs_requests: - FnvHashMap)>, + range_block_components_requests: + FnvHashMap)>, /// Whether the ee is online. If it's not, we don't allow access to the /// `beacon_processor_send`. @@ -144,6 +197,7 @@ pub struct SyncNetworkContext { pub enum BlockOrBlob { Block(Option>>), Blob(Option>>), + CustodyColumns(Option>>), } impl From>>> for BlockOrBlob { @@ -171,7 +225,9 @@ impl SyncNetworkContext { request_id: 1, blocks_by_root_requests: <_>::default(), blobs_by_root_requests: <_>::default(), - range_blocks_and_blobs_requests: FnvHashMap::default(), + data_columns_by_root_requests: <_>::default(), + custody_by_root_requests: <_>::default(), + range_block_components_requests: FnvHashMap::default(), network_beacon_processor, chain, log, @@ -181,10 +237,10 @@ impl SyncNetworkContext { /// Returns the ids of all the requests made to the given peer_id. pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Vec { let failed_range_ids = - self.range_blocks_and_blobs_requests + self.range_block_components_requests .iter() .filter_map(|(id, request)| { - if request.1.peer_id == *peer_id { + if request.1.peer_ids.contains(peer_id) { Some(SyncRequestId::RangeBlockAndBlobs { id: *id }) } else { None @@ -211,13 +267,35 @@ impl SyncNetworkContext { None } }); + let failed_data_column_by_root_ids = + self.data_columns_by_root_requests + .iter() + .filter_map(|(req_id, request)| { + if request.peer_id == *peer_id { + Some(SyncRequestId::DataColumnsByRoot(*req_id, request.requester)) + } else { + None + } + }); failed_range_ids .chain(failed_block_ids) .chain(failed_blob_ids) + .chain(failed_data_column_by_root_ids) .collect() } + pub fn get_custodial_peers(&self, column_index: ColumnIndex) -> Vec { + self.network_globals() + .custody_peers_for_column(column_index) + } + + pub fn get_random_custodial_peer(&self, column_index: ColumnIndex) -> Option { + self.get_custodial_peers(column_index) + .choose(&mut thread_rng()) + .cloned() + } + pub fn network_globals(&self) -> &NetworkGlobals { &self.network_beacon_processor.network_globals } @@ -256,19 +334,23 @@ impl SyncNetworkContext { } } - /// A blocks by range request for the range sync algorithm. - pub fn blocks_by_range_request( + /// A blocks by range request sent by the range sync algorithm + pub fn block_components_by_range_request( &mut self, peer_id: PeerId, batch_type: ByRangeRequestType, request: BlocksByRangeRequest, + sender_id: RangeRequestId, ) -> Result { + let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()); let id = self.next_id(); - trace!( + let mut requested_peers = vec![peer_id]; + debug!( self.log, "Sending BlocksByRange request"; "method" => "BlocksByRange", "count" => request.count(), + "epoch" => epoch, "peer" => %peer_id, ); self.network_send @@ -279,12 +361,13 @@ impl SyncNetworkContext { }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; - if matches!(batch_type, ByRangeRequestType::BlocksAndBlobs) { + let expected_blobs = if matches!(batch_type, ByRangeRequestType::BlocksAndBlobs) { debug!( self.log, "Sending BlobsByRange requests"; "method" => "BlobsByRange", "count" => request.count(), + "epoch" => epoch, "peer" => %peer_id, ); @@ -299,33 +382,94 @@ impl SyncNetworkContext { request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; - } + true + } else { + false + }; + + let (expects_custody_columns, num_of_custody_column_req) = + if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { + let custody_indexes = self.network_globals().custody_columns(); + let mut num_of_custody_column_req = 0; + + for (peer_id, columns_by_range_request) in + self.make_columns_by_range_requests(request, &custody_indexes)? + { + requested_peers.push(peer_id); + + debug!( + self.log, + "Sending DataColumnsByRange requests"; + "method" => "DataColumnsByRange", + "count" => columns_by_range_request.count, + "epoch" => epoch, + "columns" => ?columns_by_range_request.columns, + "peer" => %peer_id, + ); + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: Request::DataColumnsByRange(columns_by_range_request), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; + + num_of_custody_column_req += 1; + } + (Some(custody_indexes), Some(num_of_custody_column_req)) + } else { + (None, None) + }; + + let info = RangeBlockComponentsRequest::new( + expected_blobs, + expects_custody_columns, + num_of_custody_column_req, + requested_peers, + ); + self.range_block_components_requests + .insert(id, (sender_id, info)); Ok(id) } - /// A blocks by range request sent by the range sync algorithm - pub fn blocks_and_blobs_by_range_request( - &mut self, - peer_id: PeerId, - batch_type: ByRangeRequestType, + fn make_columns_by_range_requests( + &self, request: BlocksByRangeRequest, - sender_id: RangeRequestId, - ) -> Result { - let id = self.blocks_by_range_request(peer_id, batch_type, request)?; - self.range_blocks_and_blobs_requests.insert( - id, - ( - sender_id, - BlocksAndBlobsRequestInfo::new(batch_type, peer_id), - ), - ); - Ok(id) + custody_indexes: &Vec, + ) -> Result, RpcRequestSendError> { + let mut peer_id_to_request_map = HashMap::new(); + + for column_index in custody_indexes { + // TODO(das): The peer selection logic here needs to be improved - we should probably + // avoid retrying from failed peers, however `BatchState` currently only tracks the peer + // serving the blocks. + let Some(custody_peer) = self.get_random_custodial_peer(*column_index) else { + // TODO(das): this will be pretty bad UX. To improve we should: + // - Attempt to fetch custody requests first, before requesting blocks + // - Handle the no peers case gracefully, maybe add some timeout and give a few + // minutes / seconds to the peer manager to locate peers on this subnet before + // abandoing progress on the chain completely. + return Err(RpcRequestSendError::NoCustodyPeers); + }; + + let columns_by_range_request = peer_id_to_request_map + .entry(custody_peer) + .or_insert_with(|| DataColumnsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + columns: vec![], + }); + + columns_by_range_request.columns.push(*column_index); + } + + Ok(peer_id_to_request_map) } pub fn range_request_failed(&mut self, request_id: Id) -> Option { let sender_id = self - .range_blocks_and_blobs_requests + .range_block_components_requests .remove(&request_id) .map(|(sender_id, _info)| sender_id); if let Some(sender_id) = sender_id { @@ -349,7 +493,7 @@ impl SyncNetworkContext { request_id: Id, block_or_blob: BlockOrBlob, ) -> Option> { - let Entry::Occupied(mut entry) = self.range_blocks_and_blobs_requests.entry(request_id) + let Entry::Occupied(mut entry) = self.range_block_components_requests.entry(request_id) else { metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["range_blocks"]); return None; @@ -359,15 +503,17 @@ impl SyncNetworkContext { match block_or_blob { BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + BlockOrBlob::CustodyColumns(column) => info.add_data_column(column), } if info.is_finished() { // If the request is finished, dequeue everything let (sender_id, info) = entry.remove(); - let request_type = info.get_request_type(); + let (expects_blobs, expects_custody_columns) = info.get_requirements(); Some(BlocksAndBlobsByRangeResponse { sender_id, - request_type, - responses: info.into_responses(), + responses: info.into_responses(&self.chain.spec), + expects_blobs, + expects_custody_columns, }) } else { None @@ -398,7 +544,9 @@ impl SyncNetworkContext { // Block is fully validated. If it's not yet imported it's waiting for missing block // components. Consider this request completed and do nothing. BlockProcessStatus::ExecutionValidated { .. } => { - return Ok(LookupRequestResult::NoRequestNeeded) + return Ok(LookupRequestResult::NoRequestNeeded( + "block execution validated", + )) } } @@ -447,16 +595,16 @@ impl SyncNetworkContext { lookup_id: SingleLookupId, peer_id: PeerId, block_root: Hash256, - downloaded_block_expected_blobs: Option, + downloaded_block: Option>>, ) -> Result { - let Some(expected_blobs) = downloaded_block_expected_blobs.or_else(|| { + let Some(block) = downloaded_block.or_else(|| { // If the block is already being processed or fully validated, retrieve how many blobs // it expects. Consider any stage of the block. If the block root has been validated, we // can assert that this is the correct value of `blob_kzg_commitments_count`. match self.chain.get_block_process_status(&block_root) { BlockProcessStatus::Unknown => None, BlockProcessStatus::NotValidated(block) - | BlockProcessStatus::ExecutionValidated(block) => Some(block.num_expected_blobs()), + | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), } }) else { // Wait to download the block before downloading blobs. Then we can be sure that the @@ -473,6 +621,18 @@ impl SyncNetworkContext { // get dropped as completed. return Ok(LookupRequestResult::Pending("waiting for block download")); }; + let expected_blobs = block.num_expected_blobs(); + let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + + // Check if we are in deneb, before peerdas and inside da window + if !self.chain.should_fetch_blobs(block_epoch) { + return Ok(LookupRequestResult::NoRequestNeeded("blobs not required")); + } + + // No data required for this block + if expected_blobs == 0 { + return Ok(LookupRequestResult::NoRequestNeeded("no data")); + } let imported_blob_indexes = self .chain @@ -486,7 +646,7 @@ impl SyncNetworkContext { if indices.is_empty() { // No blobs required, do not issue any request - return Ok(LookupRequestResult::NoRequestNeeded); + return Ok(LookupRequestResult::NoRequestNeeded("no indices to fetch")); } let req_id = self.next_id(); @@ -522,6 +682,130 @@ impl SyncNetworkContext { Ok(LookupRequestResult::RequestSent(req_id)) } + /// Request to send a single `data_columns_by_root` request to the network. + pub fn data_column_lookup_request( + &mut self, + requester: DataColumnsByRootRequester, + peer_id: PeerId, + request: DataColumnsByRootSingleBlockRequest, + ) -> Result, &'static str> { + let req_id = DataColumnsByRootRequestId(self.next_id()); + debug!( + self.log, + "Sending DataColumnsByRoot Request"; + "method" => "DataColumnsByRoot", + "block_root" => ?request.block_root, + "indices" => ?request.indices, + "peer" => %peer_id, + "requester" => ?requester, + "req_id" => %req_id, + ); + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: Request::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id, requester)), + })?; + + self.data_columns_by_root_requests.insert( + req_id, + ActiveDataColumnsByRootRequest::new(request, peer_id, requester), + ); + + Ok(LookupRequestResult::RequestSent(req_id)) + } + + /// Request to fetch all needed custody columns of a specific block. This function may not send + /// any request to the network if no columns have to be fetched based on the import state of the + /// node. A custody request is a "super request" that may trigger 0 or more `data_columns_by_root` + /// requests. + pub fn custody_lookup_request( + &mut self, + lookup_id: SingleLookupId, + block_root: Hash256, + downloaded_block: Option>>, + ) -> Result { + let Some(block) = + downloaded_block.or_else(|| match self.chain.get_block_process_status(&block_root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), + }) + else { + // Wait to download the block before downloading columns. Then we can be sure that the + // block has data, so there's no need to do "blind" requests for all possible columns and + // latter handle the case where if the peer sent no columns, penalize. + // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. + // - if `num_expected_blobs` returns Some = block is processed. + return Ok(LookupRequestResult::Pending("waiting for block download")); + }; + let expected_blobs = block.num_expected_blobs(); + let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + + // Check if we are into peerdas and inside da window + if !self.chain.should_fetch_custody_columns(block_epoch) { + return Ok(LookupRequestResult::NoRequestNeeded("columns not required")); + } + + // No data required for this block + if expected_blobs == 0 { + return Ok(LookupRequestResult::NoRequestNeeded("no data")); + } + + let custody_indexes_imported = self + .chain + .data_availability_checker + .imported_custody_column_indexes(&block_root) + .unwrap_or_default(); + + let custody_indexes_duty = self.network_globals().custody_columns(); + + // Include only the blob indexes not yet imported (received through gossip) + let custody_indexes_to_fetch = custody_indexes_duty + .into_iter() + .filter(|index| !custody_indexes_imported.contains(index)) + .collect::>(); + + if custody_indexes_to_fetch.is_empty() { + // No indexes required, do not issue any request + return Ok(LookupRequestResult::NoRequestNeeded("no indices to fetch")); + } + + let req_id = self.next_id(); + let id = SingleLookupReqId { lookup_id, req_id }; + + debug!( + self.log, + "Starting custody columns request"; + "block_root" => ?block_root, + "indices" => ?custody_indexes_to_fetch, + "id" => ?id + ); + + let requester = CustodyRequester(id); + let mut request = ActiveCustodyRequest::new( + block_root, + // TODO(das): req_id is duplicated here, also present in id + CustodyId { requester, req_id }, + &custody_indexes_to_fetch, + self.log.clone(), + ); + + // TODO(das): start request + // Note that you can only send, but not handle a response here + match request.continue_requests(self) { + Ok(_) => { + // Ignoring the result of `continue_requests` is okay. A request that has just been + // created cannot return data immediately, it must send some request to the network + // first. And there must exist some request, `custody_indexes_to_fetch` is not empty. + self.custody_by_root_requests.insert(requester, request); + Ok(LookupRequestResult::RequestSent(req_id)) + } + // TODO(das): handle this error properly + Err(e) => Err(RpcRequestSendError::CustodyRequestError(e)), + } + } + pub fn is_execution_engine_online(&self) -> bool { self.execution_engine_state == EngineState::Online } @@ -603,12 +887,18 @@ impl SyncNetworkContext { "To deal with alignment with deneb boundaries, batches need to be of just one epoch" ); - if let Some(data_availability_boundary) = self.chain.data_availability_boundary() { - if epoch >= data_availability_boundary { - ByRangeRequestType::BlocksAndBlobs - } else { - ByRangeRequestType::Blocks - } + if self + .chain + .data_availability_checker + .data_columns_required_for_epoch(epoch) + { + ByRangeRequestType::BlocksAndColumns + } else if self + .chain + .data_availability_checker + .blobs_required_for_epoch(epoch) + { + ByRangeRequestType::BlocksAndBlobs } else { ByRangeRequestType::Blocks } @@ -618,9 +908,9 @@ impl SyncNetworkContext { &mut self, id: Id, sender_id: RangeRequestId, - info: BlocksAndBlobsRequestInfo, + info: RangeBlockComponentsRequest, ) { - self.range_blocks_and_blobs_requests + self.range_block_components_requests .insert(id, (sender_id, info)); } @@ -630,14 +920,14 @@ impl SyncNetworkContext { &mut self, request_id: SingleLookupReqId, peer_id: PeerId, - block: RpcEvent>>, + rpc_event: RpcEvent>>, ) -> Option>>> { let Entry::Occupied(mut request) = self.blocks_by_root_requests.entry(request_id) else { metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blocks_by_root"]); return None; }; - let resp = match block { + let resp = match rpc_event { RpcEvent::Response(block, seen_timestamp) => { match request.get_mut().add_response(block) { Ok(block) => Ok((block, seen_timestamp)), @@ -668,14 +958,14 @@ impl SyncNetworkContext { &mut self, request_id: SingleLookupReqId, peer_id: PeerId, - blob: RpcEvent>>, + rpc_event: RpcEvent>>, ) -> Option>> { let Entry::Occupied(mut request) = self.blobs_by_root_requests.entry(request_id) else { metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blobs_by_root"]); return None; }; - let resp = match blob { + let resp = match rpc_event { RpcEvent::Response(blob, seen_timestamp) => { let request = request.get_mut(); match request.add_response(blob) { @@ -714,6 +1004,103 @@ impl SyncNetworkContext { } } + #[allow(clippy::type_complexity)] + pub fn on_data_columns_by_root_response( + &mut self, + id: DataColumnsByRootRequestId, + _peer_id: PeerId, + rpc_event: RpcEvent>>, + ) -> Option>>>> { + let Entry::Occupied(mut request) = self.data_columns_by_root_requests.entry(id) else { + return None; + }; + + let resp = match rpc_event { + RpcEvent::Response(data_column, seen_timestamp) => { + let request = request.get_mut(); + match request.add_response(data_column) { + Ok(Some(data_columns)) => Ok((data_columns, seen_timestamp)), + Ok(None) => return None, + Err(e) => Err((e.into(), request.resolve())), + } + } + RpcEvent::StreamTermination => match request.remove().terminate() { + Ok(_) => return None, + // (err, false = not resolved) because terminate returns Ok() if resolved + Err(e) => Err((e.into(), false)), + }, + RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), + }; + + match resp { + Ok(resp) => Some(Ok(resp)), + // Track if this request has already returned some value downstream. Ensure that + // downstream code only receives a single Result per request. If the serving peer does + // multiple penalizable actions per request, downscore and return None. This allows to + // catch if a peer is returning more columns than requested or if the excess blobs are + // invalid. + Err((e, resolved)) => { + if let RpcResponseError::VerifyError(_e) = &e { + // TODO(das): this is a bug, we should not penalise peer in this case. + // confirm this can be removed. + // self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); + } + if resolved { + None + } else { + Some(Err(e)) + } + } + } + } + + /// Insert a downloaded column into an active custody request. Then make progress on the + /// entire request. + /// + /// ### Returns + /// + /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. + /// - `None`: Request still active, requester should do no action + #[allow(clippy::type_complexity)] + pub fn on_custody_by_root_response( + &mut self, + id: CustodyId, + req_id: DataColumnsByRootRequestId, + peer_id: PeerId, + resp: RpcResponseResult>>>, + ) -> Option, PeerGroup), RpcResponseError>> { + // Note: need to remove the request to borrow self again below. Otherwise we can't + // do nested requests + let Some(mut request) = self.custody_by_root_requests.remove(&id.requester) else { + // TOOD(das): This log can happen if the request is error'ed early and dropped + debug!(self.log, "Custody column downloaded event for unknown request"; "id" => ?id); + return None; + }; + + let result = request + .on_data_column_downloaded(peer_id, req_id, resp, self) + .map_err(RpcResponseError::CustodyRequestError) + .transpose(); + + // Convert a result from internal format of `ActiveCustodyRequest` (error first to use ?) to + // an Option first to use in an `if let Some() { act on result }` block. + if let Some(result) = result { + match result.as_ref() { + Ok((columns, peer_group)) => { + debug!(self.log, "Custody request success, removing"; "id" => ?id, "count" => columns.len(), "peers" => ?peer_group) + } + Err(e) => { + debug!(self.log, "Custody request failure, removing"; "id" => ?id, "error" => ?e) + } + } + + Some(result) + } else { + self.custody_by_root_requests.insert(id.requester, request); + None + } + } + pub fn send_block_for_processing( &self, id: Id, @@ -776,6 +1163,32 @@ impl SyncNetworkContext { }) } + pub fn send_custody_columns_for_processing( + &self, + _id: Id, + block_root: Hash256, + custody_columns: DataColumnSidecarList, + duration: Duration, + process_type: BlockProcessType, + ) -> Result<(), SendErrorProcessor> { + let beacon_processor = self + .beacon_processor_if_enabled() + .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + + debug!(self.log, "Sending custody columns for processing"; "block" => ?block_root, "process_type" => ?process_type); + + beacon_processor + .send_rpc_custody_columns(block_root, custody_columns, duration, process_type) + .map_err(|e| { + error!( + self.log, + "Failed to send sync custody columns to processor"; + "error" => ?e + ); + SendErrorProcessor::SendError + }) + } + pub(crate) fn register_metrics(&self) { metrics::set_gauge_vec( &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, @@ -790,7 +1203,7 @@ impl SyncNetworkContext { metrics::set_gauge_vec( &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, &["range_blocks"], - self.range_blocks_and_blobs_requests.len() as i64, + self.range_block_components_requests.len() as i64, ); } } @@ -803,7 +1216,7 @@ fn to_fixed_blob_sidecar_list( let index = blob.index as usize; *fixed_list .get_mut(index) - .ok_or(LookupVerifyError::UnrequestedBlobIndex(index as u64))? = Some(blob) + .ok_or(LookupVerifyError::UnrequestedIndex(index as u64))? = Some(blob) } Ok(fixed_list) } diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs new file mode 100644 index 00000000000..dfe409f043d --- /dev/null +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -0,0 +1,415 @@ +use crate::sync::network_context::{ + DataColumnsByRootRequestId, DataColumnsByRootSingleBlockRequest, +}; + +use beacon_chain::BeaconChainTypes; +use fnv::FnvHashMap; +use lighthouse_network::service::api_types::{CustodyId, DataColumnsByRootRequester}; +use lighthouse_network::PeerId; +use lru_cache::LRUTimeCache; +use rand::Rng; +use slog::{debug, warn}; +use std::time::Duration; +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; +use types::EthSpec; +use types::{data_column_sidecar::ColumnIndex, DataColumnSidecar, Hash256}; + +use super::{LookupRequestResult, PeerGroup, RpcResponseResult, SyncNetworkContext}; + +const FAILED_PEERS_CACHE_EXPIRY_SECONDS: u64 = 5; + +type DataColumnSidecarList = Vec>>; + +pub struct ActiveCustodyRequest { + block_root: Hash256, + custody_id: CustodyId, + /// List of column indices this request needs to download to complete successfully + column_requests: FnvHashMap>, + /// Active requests for 1 or more columns each + active_batch_columns_requests: + FnvHashMap, + /// Peers that have recently failed to successfully respond to a columns by root request. + /// Having a LRUTimeCache allows this request to not have to track disconnecting peers. + failed_peers: LRUTimeCache, + /// Logger for the `SyncNetworkContext`. + pub log: slog::Logger, + _phantom: PhantomData, +} + +#[derive(Debug, Eq, PartialEq)] +pub enum Error { + SendFailed(&'static str), + TooManyFailures, + BadState(String), + NoPeers(ColumnIndex), + /// Received a download result for a different request id than the in-flight request. + /// There should only exist a single request at a time. Having multiple requests is a bug and + /// can result in undefined state, so it's treated as a hard error and the lookup is dropped. + UnexpectedRequestId { + expected_req_id: DataColumnsByRootRequestId, + req_id: DataColumnsByRootRequestId, + }, +} + +struct ActiveBatchColumnsRequest { + peer_id: PeerId, + indices: Vec, +} + +type CustodyRequestResult = Result, PeerGroup)>, Error>; + +impl ActiveCustodyRequest { + pub(crate) fn new( + block_root: Hash256, + custody_id: CustodyId, + column_indices: &[ColumnIndex], + log: slog::Logger, + ) -> Self { + Self { + block_root, + custody_id, + column_requests: HashMap::from_iter( + column_indices + .iter() + .map(|index| (*index, ColumnRequest::new())), + ), + active_batch_columns_requests: <_>::default(), + failed_peers: LRUTimeCache::new(Duration::from_secs(FAILED_PEERS_CACHE_EXPIRY_SECONDS)), + log, + _phantom: PhantomData, + } + } + + /// Insert a downloaded column into an active custody request. Then make progress on the + /// entire request. + /// + /// ### Returns + /// + /// - `Err`: Custody request has failed and will be dropped + /// - `Ok(Some)`: Custody request has successfully completed and will be dropped + /// - `Ok(None)`: Custody request still active + pub(crate) fn on_data_column_downloaded( + &mut self, + peer_id: PeerId, + req_id: DataColumnsByRootRequestId, + resp: RpcResponseResult>, + cx: &mut SyncNetworkContext, + ) -> CustodyRequestResult { + // TODO(das): Should downscore peers for verify errors here + + let Some(batch_request) = self.active_batch_columns_requests.get_mut(&req_id) else { + warn!(self.log, + "Received custody column response for unrequested index"; + "id" => ?self.custody_id, + "block_root" => ?self.block_root, + "req_id" => %req_id, + ); + return Ok(None); + }; + + match resp { + Ok((data_columns, _seen_timestamp)) => { + debug!(self.log, + "Custody column download success"; + "id" => ?self.custody_id, + "block_root" => ?self.block_root, + "req_id" => %req_id, + "peer" => %peer_id, + "count" => data_columns.len() + ); + + // Map columns by index as an optimization to not loop the returned list on each + // requested index. The worse case is 128 loops over a 128 item vec + mutation to + // drop the consumed columns. + let mut data_columns = HashMap::::from_iter( + data_columns.into_iter().map(|d| (d.index, d)), + ); + // Accumulate columns that the peer does not have to issue a single log per request + let mut missing_column_indexes = vec![]; + + for column_index in &batch_request.indices { + let column_request = self + .column_requests + .get_mut(column_index) + .ok_or(Error::BadState("unknown column_index".to_owned()))?; + + if let Some(data_column) = data_columns.remove(column_index) { + column_request.on_download_success(req_id, peer_id, data_column)?; + } else { + // Peer does not have the requested data. + // TODO(das) do not consider this case a success. We know for sure the block has + // data. However we allow the peer to return empty as we can't attribute fault. + // TODO(das): Should track which columns are missing and eventually give up + // TODO(das): If the peer is in the lookup peer set it claims to have imported + // the block AND its custody columns. So in this case we can downscore + column_request.on_download_error(req_id)?; + missing_column_indexes.push(column_index); + } + } + + // Note: no need to check data_columns is empty, SyncNetworkContext ensures that + // successful responses only contain requested data. + + if !missing_column_indexes.is_empty() { + // Note: Batch logging that columns are missing to not spam logger + debug!(self.log, + "Custody column peer claims to not have some data"; + "id" => ?self.custody_id, + "block_root" => ?self.block_root, + "req_id" => %req_id, + "peer" => %peer_id, + // TODO(das): this property can become very noisy, being the full range 0..128 + "missing_column_indexes" => ?missing_column_indexes + ); + + self.failed_peers.insert(peer_id); + } + } + Err(err) => { + debug!(self.log, + "Custody column download error"; + "id" => ?self.custody_id, + "block_root" => ?self.block_root, + "req_id" => %req_id, + "peer" => %peer_id, + "error" => ?err + ); + + // TODO(das): Should mark peer as failed and try from another peer + for column_index in &batch_request.indices { + self.column_requests + .get_mut(column_index) + .ok_or(Error::BadState("unknown column_index".to_owned()))? + .on_download_error_and_mark_failure(req_id)?; + } + + self.failed_peers.insert(peer_id); + } + }; + + self.continue_requests(cx) + } + + pub(crate) fn continue_requests( + &mut self, + cx: &mut SyncNetworkContext, + ) -> CustodyRequestResult { + if self.column_requests.values().all(|r| r.is_downloaded()) { + // All requests have completed successfully. + let mut peers = HashMap::>::new(); + let columns = std::mem::take(&mut self.column_requests) + .into_values() + .map(|request| { + let (peer, data_column) = request.complete()?; + peers + .entry(peer) + .or_default() + .push(data_column.index as usize); + Ok(data_column) + }) + .collect::, _>>()?; + + let peer_group = PeerGroup::from_set(peers); + return Ok(Some((columns, peer_group))); + } + + let mut columns_to_request_by_peer = HashMap::>::new(); + + // Need to: + // - track how many active requests a peer has for load balancing + // - which peers have failures to attempt others + // - which peer returned what to have PeerGroup attributability + + for (column_index, request) in self.column_requests.iter_mut() { + if request.is_awaiting_download() { + if request.download_failures > MAX_CUSTODY_COLUMN_DOWNLOAD_ATTEMPTS { + return Err(Error::TooManyFailures); + } + + // TODO: When is a fork and only a subset of your peers know about a block, we should only + // query the peers on that fork. Should this case be handled? How to handle it? + let custodial_peers = cx.get_custodial_peers(*column_index); + + // TODO(das): cache this computation in a OneCell or similar to prevent having to + // run it every loop + let mut active_requests_by_peer = HashMap::::new(); + for batch_request in self.active_batch_columns_requests.values() { + *active_requests_by_peer + .entry(batch_request.peer_id) + .or_default() += 1; + } + + let mut priorized_peers = custodial_peers + .iter() + .map(|peer| { + ( + // De-prioritize peers that have failed to successfully respond to + // requests recently + self.failed_peers.contains(peer), + // Prefer peers with less requests to load balance across peers + active_requests_by_peer.get(peer).copied().unwrap_or(0), + // Final random factor to give all peers a shot in each retry + rand::thread_rng().gen::(), + *peer, + ) + }) + .collect::>(); + priorized_peers.sort_unstable(); + + let Some((_, _, _, peer_id)) = priorized_peers.first() else { + // Do not tolerate not having custody peers, hard error. + // TODO(das): we might implement some grace period. The request will pause for X + // seconds expecting the peer manager to find peers before failing the request. + return Err(Error::NoPeers(*column_index)); + }; + + columns_to_request_by_peer + .entry(*peer_id) + .or_default() + .push(*column_index); + } + } + + for (peer_id, indices) in columns_to_request_by_peer.into_iter() { + let request_result = cx + .data_column_lookup_request( + DataColumnsByRootRequester::Custody(self.custody_id), + peer_id, + DataColumnsByRootSingleBlockRequest { + block_root: self.block_root, + indices: indices.clone(), + }, + ) + .map_err(Error::SendFailed)?; + + match request_result { + LookupRequestResult::RequestSent(req_id) => { + for column_index in &indices { + let column_request = self + .column_requests + .get_mut(column_index) + .ok_or(Error::BadState("unknown column_index".to_owned()))?; + + column_request.on_download_start(req_id)?; + } + + self.active_batch_columns_requests + .insert(req_id, ActiveBatchColumnsRequest { indices, peer_id }); + } + LookupRequestResult::NoRequestNeeded(_) => unreachable!(), + LookupRequestResult::Pending(_) => unreachable!(), + } + } + + Ok(None) + } +} + +/// TODO(das): this attempt count is nested into the existing lookup request count. +const MAX_CUSTODY_COLUMN_DOWNLOAD_ATTEMPTS: usize = 3; + +struct ColumnRequest { + status: Status, + download_failures: usize, +} + +#[derive(Debug, Clone)] +enum Status { + NotStarted, + Downloading(DataColumnsByRootRequestId), + Downloaded(PeerId, Arc>), +} + +impl ColumnRequest { + fn new() -> Self { + Self { + status: Status::NotStarted, + download_failures: 0, + } + } + + fn is_awaiting_download(&self) -> bool { + match self.status { + Status::NotStarted => true, + Status::Downloading { .. } | Status::Downloaded { .. } => false, + } + } + + fn is_downloaded(&self) -> bool { + match self.status { + Status::NotStarted | Status::Downloading { .. } => false, + Status::Downloaded { .. } => true, + } + } + + fn on_download_start(&mut self, req_id: DataColumnsByRootRequestId) -> Result<(), Error> { + match &self.status { + Status::NotStarted => { + self.status = Status::Downloading(req_id); + Ok(()) + } + other => Err(Error::BadState(format!( + "bad state on_download_start expected NotStarted got {other:?}" + ))), + } + } + + fn on_download_error(&mut self, req_id: DataColumnsByRootRequestId) -> Result<(), Error> { + match &self.status { + Status::Downloading(expected_req_id) => { + if req_id != *expected_req_id { + return Err(Error::UnexpectedRequestId { + expected_req_id: *expected_req_id, + req_id, + }); + } + self.status = Status::NotStarted; + Ok(()) + } + other => Err(Error::BadState(format!( + "bad state on_download_error expected Downloading got {other:?}" + ))), + } + } + + fn on_download_error_and_mark_failure( + &mut self, + req_id: DataColumnsByRootRequestId, + ) -> Result<(), Error> { + // TODO(das): Should track which peers don't have data + self.download_failures += 1; + self.on_download_error(req_id) + } + + fn on_download_success( + &mut self, + req_id: DataColumnsByRootRequestId, + peer_id: PeerId, + data_column: Arc>, + ) -> Result<(), Error> { + match &self.status { + Status::Downloading(expected_req_id) => { + if req_id != *expected_req_id { + return Err(Error::UnexpectedRequestId { + expected_req_id: *expected_req_id, + req_id, + }); + } + self.status = Status::Downloaded(peer_id, data_column); + Ok(()) + } + other => Err(Error::BadState(format!( + "bad state on_download_success expected Downloading got {other:?}" + ))), + } + } + + fn complete(self) -> Result<(PeerId, Arc>), Error> { + match self.status { + Status::Downloaded(peer_id, data_column) => Ok((peer_id, data_column)), + other => Err(Error::BadState(format!( + "bad state complete expected Downloaded got {other:?}" + ))), + } + } +} diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 8387e9b0e1a..94eecff42d3 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -9,13 +9,19 @@ use types::{ blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, }; +pub use data_columns_by_root::{ + ActiveDataColumnsByRootRequest, DataColumnsByRootSingleBlockRequest, +}; + +mod data_columns_by_root; + #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { NoResponseReturned, NotEnoughResponsesReturned { expected: usize, actual: usize }, TooManyResponses, UnrequestedBlockRoot(Hash256), - UnrequestedBlobIndex(u64), + UnrequestedIndex(u64), InvalidInclusionProof, DuplicateData, } @@ -131,7 +137,7 @@ impl ActiveBlobsByRootRequest { return Err(LookupVerifyError::InvalidInclusionProof); } if !self.request.indices.contains(&blob.index) { - return Err(LookupVerifyError::UnrequestedBlobIndex(blob.index)); + return Err(LookupVerifyError::UnrequestedIndex(blob.index)); } if self.blobs.iter().any(|b| b.index == blob.index) { return Err(LookupVerifyError::DuplicateData); diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs new file mode 100644 index 00000000000..a42ae7ca41f --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs @@ -0,0 +1,103 @@ +use lighthouse_network::service::api_types::DataColumnsByRootRequester; +use lighthouse_network::{rpc::methods::DataColumnsByRootRequest, PeerId}; +use std::sync::Arc; +use types::{ChainSpec, DataColumnIdentifier, DataColumnSidecar, EthSpec, Hash256}; + +use super::LookupVerifyError; + +#[derive(Debug, Clone)] +pub struct DataColumnsByRootSingleBlockRequest { + pub block_root: Hash256, + pub indices: Vec, +} + +impl DataColumnsByRootSingleBlockRequest { + pub fn into_request(self, spec: &ChainSpec) -> DataColumnsByRootRequest { + DataColumnsByRootRequest::new( + self.indices + .into_iter() + .map(|index| DataColumnIdentifier { + block_root: self.block_root, + index, + }) + .collect(), + spec, + ) + } +} + +pub struct ActiveDataColumnsByRootRequest { + request: DataColumnsByRootSingleBlockRequest, + items: Vec>>, + resolved: bool, + pub(crate) peer_id: PeerId, + pub(crate) requester: DataColumnsByRootRequester, +} + +impl ActiveDataColumnsByRootRequest { + pub fn new( + request: DataColumnsByRootSingleBlockRequest, + peer_id: PeerId, + requester: DataColumnsByRootRequester, + ) -> Self { + Self { + request, + items: vec![], + resolved: false, + peer_id, + requester, + } + } + + /// Appends a chunk to this multi-item request. If all expected chunks are received, this + /// method returns `Some`, resolving the request before the stream terminator. + /// The active request SHOULD be dropped after `add_response` returns an error + pub fn add_response( + &mut self, + data_column: Arc>, + ) -> Result>>>, LookupVerifyError> { + if self.resolved { + return Err(LookupVerifyError::TooManyResponses); + } + + let block_root = data_column.block_root(); + if self.request.block_root != block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); + } + if !data_column.verify_inclusion_proof() { + return Err(LookupVerifyError::InvalidInclusionProof); + } + if !self.request.indices.contains(&data_column.index) { + return Err(LookupVerifyError::UnrequestedIndex(data_column.index)); + } + if self.items.iter().any(|d| d.index == data_column.index) { + return Err(LookupVerifyError::DuplicateData); + } + + self.items.push(data_column); + if self.items.len() >= self.request.indices.len() { + // All expected chunks received, return result early + self.resolved = true; + Ok(Some(std::mem::take(&mut self.items))) + } else { + Ok(None) + } + } + + pub fn terminate(self) -> Result<(), LookupVerifyError> { + if self.resolved { + Ok(()) + } else { + Err(LookupVerifyError::NotEnoughResponsesReturned { + expected: self.request.indices.len(), + actual: self.items.len(), + }) + } + } + + /// Mark request as resolved (= has returned something downstream) while marking this status as + /// true for future calls. + pub fn resolve(&mut self) -> bool { + std::mem::replace(&mut self.resolved, true) + } +} diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 7f9629740bb..53fb55b14da 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -20,6 +20,7 @@ const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 3; #[derive(Debug, Copy, Clone, Display)] #[strum(serialize_all = "snake_case")] pub enum ByRangeRequestType { + BlocksAndColumns, BlocksAndBlobs, Blocks, } @@ -199,9 +200,9 @@ impl BatchInfo { } /// Verifies if an incoming block belongs to this batch. - pub fn is_expecting_block(&self, peer_id: &PeerId, request_id: &Id) -> bool { - if let BatchState::Downloading(expected_peer, expected_id) = &self.state { - return peer_id == expected_peer && expected_id == request_id; + pub fn is_expecting_block(&self, request_id: &Id) -> bool { + if let BatchState::Downloading(_, expected_id) = &self.state { + return expected_id == request_id; } false } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 556b4194dd8..1756fb513da 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,15 +1,18 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; use super::RangeSyncType; use crate::metrics; +use crate::metrics::PEERS_PER_COLUMN_SUBNET; use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::network_context::RangeRequestId; use crate::sync::{network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; +use lighthouse_metrics::set_int_gauge; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; -use rand::{seq::SliceRandom, Rng}; +use rand::seq::SliceRandom; +use rand::Rng; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::hash::{Hash, Hasher}; @@ -111,9 +114,6 @@ pub struct SyncingChain { /// The current processing batch, if any. current_processing_batch: Option, - /// Batches validated by this chain. - validated_batches: u64, - /// The chain's log. log: slog::Logger, } @@ -161,7 +161,6 @@ impl SyncingChain { attempted_optimistic_starts: HashSet::default(), state: ChainSyncingState::Stopped, current_processing_batch: None, - validated_batches: 0, log: log.new(o!("chain" => id)), } } @@ -182,8 +181,10 @@ impl SyncingChain { } /// Progress in epochs made by the chain - pub fn validated_epochs(&self) -> u64 { - self.validated_batches * EPOCHS_PER_BATCH + pub fn processed_epochs(&self) -> u64 { + self.processing_target + .saturating_sub(self.start_epoch) + .into() } /// Returns the total count of pending blocks in all the batches of this chain @@ -258,7 +259,9 @@ impl SyncingChain { // sending an error /timeout) if the peer is removed from the chain for other // reasons. Check that this block belongs to the expected peer, and that the // request_id matches - if !batch.is_expecting_block(peer_id, &request_id) { + // TODO(das): removed peer_id matching as the node may request a different peer for data + // columns. + if !batch.is_expecting_block(&request_id) { return Ok(KeepChain); } batch @@ -441,6 +444,11 @@ impl SyncingChain { self.request_batches(network)?; } } + } else if !self.good_peers_on_custody_subnets(self.processing_target, network) { + // This is to handle the case where no batch was sent for the current processing + // target when there is no custody peers available. This is a valid state and should not + // return an error. + return Ok(KeepChain); } else { return Err(RemoveChain::WrongChainState(format!( "Batch not found for current processing target {}", @@ -655,7 +663,6 @@ impl SyncingChain { let removed_batches = std::mem::replace(&mut self.batches, remaining_batches); for (id, batch) in removed_batches.into_iter() { - self.validated_batches = self.validated_batches.saturating_add(1); // only for batches awaiting validation can we be sure the last attempt is // right, and thus, that any different attempt is wrong match batch.state() { @@ -865,7 +872,9 @@ impl SyncingChain { // A batch could be retried without the peer failing the request (disconnecting/ // sending an error /timeout) if the peer is removed from the chain for other // reasons. Check that this block belongs to the expected peer - if !batch.is_expecting_block(peer_id, &request_id) { + // TODO(das): removed peer_id matching as the node may request a different peer for data + // columns. + if !batch.is_expecting_block(&request_id) { debug!( self.log, "Batch not expecting block"; @@ -956,7 +965,7 @@ impl SyncingChain { let batch_state = self.visualize_batch_state(); if let Some(batch) = self.batches.get_mut(&batch_id) { let (request, batch_type) = batch.to_blocks_by_range_request(); - match network.blocks_and_blobs_by_range_request( + match network.block_components_by_range_request( peer, batch_type, request, @@ -1066,6 +1075,14 @@ impl SyncingChain { // check if we have the batch for our optimistic start. If not, request it first. // We wait for this batch before requesting any other batches. if let Some(epoch) = self.optimistic_start { + if !self.good_peers_on_custody_subnets(epoch, network) { + debug!( + self.log, + "Waiting for peers to be available on custody column subnets" + ); + return Ok(KeepChain); + } + if let Entry::Vacant(entry) = self.batches.entry(epoch) { if let Some(peer) = idle_peers.pop() { let batch_type = network.batch_type(epoch); @@ -1090,6 +1107,36 @@ impl SyncingChain { Ok(KeepChain) } + /// Checks all custody column subnets for peers. Returns `true` if there is at least one peer in + /// every custody column subnet. + fn good_peers_on_custody_subnets(&self, epoch: Epoch, network: &SyncNetworkContext) -> bool { + if network.chain.spec.is_peer_das_enabled_for_epoch(epoch) { + // Require peers on all custody column subnets before sending batches + let peers_on_all_custody_subnets = + network + .network_globals() + .custody_subnets() + .all(|subnet_id| { + let peer_count = network + .network_globals() + .peers + .read() + .good_custody_subnet_peer(subnet_id) + .count(); + + set_int_gauge( + &PEERS_PER_COLUMN_SUBNET, + &[&subnet_id.to_string()], + peer_count as i64, + ); + peer_count > 0 + }); + peers_on_all_custody_subnets + } else { + true + } + } + /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. fn include_next_batch(&mut self, network: &mut SyncNetworkContext) -> Option { @@ -1120,6 +1167,18 @@ impl SyncingChain { return None; } + // don't send batch requests until we have peers on custody subnets + // TODO(das): this is a workaround to avoid sending out excessive block requests because + // block and data column requests are currently coupled. This can be removed once we find a + // way to decouple the requests and do retries individually, see issue #6258. + if !self.good_peers_on_custody_subnets(self.to_be_downloaded, network) { + debug!( + self.log, + "Waiting for peers to be available on custody column subnets" + ); + return None; + } + let batch_id = self.to_be_downloaded; // this batch could have been included already being an optimistic batch match self.batches.entry(batch_id) { @@ -1212,7 +1271,6 @@ impl slog::KV for SyncingChain { )?; serializer.emit_usize("batches", self.batches.len())?; serializer.emit_usize("peers", self.peers.len())?; - serializer.emit_u64("validated_batches", self.validated_batches)?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; slog::Result::Ok(()) } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 3621a6605af..1217fbf8fed 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -24,7 +24,7 @@ use types::{Epoch, Hash256, Slot}; const PARALLEL_HEAD_CHAINS: usize = 2; /// Minimum work we require a finalized chain to do before picking a chain with more peers. -const MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS: u64 = 10; +const MIN_FINALIZED_CHAIN_PROCESSED_EPOCHS: u64 = 10; /// The state of the long range/batch sync. #[derive(Clone)] @@ -273,8 +273,8 @@ impl ChainCollection { // chains are different, check that they don't have the same number of peers if let Some(syncing_chain) = self.finalized_chains.get_mut(&syncing_id) { if max_peers > syncing_chain.available_peers() - && syncing_chain.validated_epochs() - > MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS + && syncing_chain.processed_epochs() + > MIN_FINALIZED_CHAIN_PROCESSED_EPOCHS { syncing_chain.stop_syncing(); old_id = Some(Some(syncing_id)); diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 334c58090e2..beb04fac28b 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -406,7 +406,7 @@ mod tests { use std::collections::HashSet; use store::MemoryStore; use tokio::sync::mpsc; - use types::{ForkName, MinimalEthSpec as E}; + use types::{FixedBytesExtended, ForkName, MinimalEthSpec as E}; #[derive(Debug)] struct FakeStorage { @@ -689,7 +689,11 @@ mod tests { log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); - let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); + let globals = Arc::new(NetworkGlobals::new_test_globals( + Vec::new(), + &log, + chain.spec.clone(), + )); let (network_beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( globals.clone(), diff --git a/beacon_node/network/src/sync/sampling.rs b/beacon_node/network/src/sync/sampling.rs new file mode 100644 index 00000000000..524fe86bee9 --- /dev/null +++ b/beacon_node/network/src/sync/sampling.rs @@ -0,0 +1,628 @@ +use self::request::ActiveColumnSampleRequest; +use super::network_context::{ + DataColumnsByRootSingleBlockRequest, RpcResponseError, SyncNetworkContext, +}; +use crate::metrics; +use beacon_chain::BeaconChainTypes; +use fnv::FnvHashMap; +use lighthouse_network::service::api_types::{ + DataColumnsByRootRequester, SamplingId, SamplingRequestId, SamplingRequester, +}; +use lighthouse_network::{PeerAction, PeerId}; +use rand::{seq::SliceRandom, thread_rng}; +use slog::{debug, error, warn}; +use std::{ + collections::hash_map::Entry, collections::HashMap, marker::PhantomData, sync::Arc, + time::Duration, +}; +use types::{data_column_sidecar::ColumnIndex, ChainSpec, DataColumnSidecar, Hash256}; + +pub type SamplingResult = Result<(), SamplingError>; + +type DataColumnSidecarList = Vec>>; + +pub struct Sampling { + // TODO(das): stalled sampling request are never cleaned up + requests: HashMap>, + sampling_config: SamplingConfig, + log: slog::Logger, +} + +impl Sampling { + pub fn new(sampling_config: SamplingConfig, log: slog::Logger) -> Self { + Self { + requests: <_>::default(), + sampling_config, + log, + } + } + + #[cfg(test)] + pub fn active_sampling_requests(&self) -> Vec { + self.requests.values().map(|r| r.block_root).collect() + } + + /// Create a new sampling request for a known block + /// + /// ### Returns + /// + /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. + /// - `None`: Request still active, requester should do no action + pub fn on_new_sample_request( + &mut self, + block_root: Hash256, + cx: &mut SyncNetworkContext, + ) -> Option<(SamplingRequester, SamplingResult)> { + let id = SamplingRequester::ImportedBlock(block_root); + + let request = match self.requests.entry(id) { + Entry::Vacant(e) => e.insert(ActiveSamplingRequest::new( + block_root, + id, + &self.sampling_config, + self.log.clone(), + &cx.chain.spec, + )), + Entry::Occupied(_) => { + // Sampling is triggered from multiple sources, duplicate sampling requests are + // likely (gossip block + gossip data column) + // TODO(das): Should track failed sampling request for some time? Otherwise there's + // a risk of a loop with multiple triggers creating the request, then failing, + // and repeat. + debug!(self.log, "Ignoring duplicate sampling request"; "id" => ?id); + return None; + } + }; + + debug!(self.log, "Created new sample request"; "id" => ?id); + + // TOOD(das): If a node has very little peers, continue_sampling() will attempt to find enough + // to sample here, immediately failing the sampling request. There should be some grace + // period to allow the peer manager to find custody peers. + let result = request.continue_sampling(cx); + self.handle_sampling_result(result, &id) + } + + /// Insert a downloaded column into an active sampling request. Then make progress on the + /// entire request. + /// + /// ### Returns + /// + /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. + /// - `None`: Request still active, requester should do no action + pub fn on_sample_downloaded( + &mut self, + id: SamplingId, + peer_id: PeerId, + resp: Result<(DataColumnSidecarList, Duration), RpcResponseError>, + cx: &mut SyncNetworkContext, + ) -> Option<(SamplingRequester, SamplingResult)> { + let Some(request) = self.requests.get_mut(&id.id) else { + // TOOD(das): This log can happen if the request is error'ed early and dropped + debug!(self.log, "Sample downloaded event for unknown request"; "id" => ?id); + return None; + }; + + let result = request.on_sample_downloaded(peer_id, id.sampling_request_id, resp, cx); + self.handle_sampling_result(result, &id.id) + } + + /// Insert a downloaded column into an active sampling request. Then make progress on the + /// entire request. + /// + /// ### Returns + /// + /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. + /// - `None`: Request still active, requester should do no action + pub fn on_sample_verified( + &mut self, + id: SamplingId, + result: Result<(), String>, + cx: &mut SyncNetworkContext, + ) -> Option<(SamplingRequester, SamplingResult)> { + let Some(request) = self.requests.get_mut(&id.id) else { + // TOOD(das): This log can happen if the request is error'ed early and dropped + debug!(self.log, "Sample verified event for unknown request"; "id" => ?id); + return None; + }; + + let result = request.on_sample_verified(id.sampling_request_id, result, cx); + self.handle_sampling_result(result, &id.id) + } + + /// Converts a result from the internal format of `ActiveSamplingRequest` (error first to use ? + /// conveniently), to an Option first format to use an `if let Some() { act on result }` pattern + /// in the sync manager. + fn handle_sampling_result( + &mut self, + result: Result, SamplingError>, + id: &SamplingRequester, + ) -> Option<(SamplingRequester, SamplingResult)> { + let result = result.transpose(); + if let Some(result) = result { + debug!(self.log, "Sampling request completed, removing"; "id" => ?id, "result" => ?result); + metrics::inc_counter_vec( + &metrics::SAMPLING_REQUEST_RESULT, + &[metrics::from_result(&result)], + ); + self.requests.remove(id); + Some((*id, result)) + } else { + None + } + } +} + +pub struct ActiveSamplingRequest { + block_root: Hash256, + requester_id: SamplingRequester, + column_requests: FnvHashMap, + /// Mapping of column indexes for a sampling request. + column_indexes_by_sampling_request: FnvHashMap>, + /// Sequential ID for sampling requests. + current_sampling_request_id: SamplingRequestId, + column_shuffle: Vec, + required_successes: Vec, + /// Logger for the `SyncNetworkContext`. + pub log: slog::Logger, + _phantom: PhantomData, +} + +#[derive(Debug)] +pub enum SamplingError { + SendFailed(#[allow(dead_code)] &'static str), + ProcessorUnavailable, + TooManyFailures, + BadState(#[allow(dead_code)] String), + ColumnIndexOutOfBounds, +} + +/// Required success index by current failures, with p_target=5.00E-06 +/// Ref: https://colab.research.google.com/drive/18uUgT2i-m3CbzQ5TyP9XFKqTn1DImUJD#scrollTo=E82ITcgB5ATh +const REQUIRED_SUCCESSES: [usize; 11] = [16, 20, 23, 26, 29, 32, 34, 37, 39, 42, 44]; + +#[derive(Debug, Clone)] +pub enum SamplingConfig { + Default, + #[allow(dead_code)] + Custom { + required_successes: Vec, + }, +} + +impl ActiveSamplingRequest { + fn new( + block_root: Hash256, + requester_id: SamplingRequester, + sampling_config: &SamplingConfig, + log: slog::Logger, + spec: &ChainSpec, + ) -> Self { + // Select ahead of time the full list of to-sample columns + let mut column_shuffle = + (0..spec.number_of_columns as ColumnIndex).collect::>(); + let mut rng = thread_rng(); + column_shuffle.shuffle(&mut rng); + + Self { + block_root, + requester_id, + column_requests: <_>::default(), + column_indexes_by_sampling_request: <_>::default(), + current_sampling_request_id: SamplingRequestId(0), + column_shuffle, + required_successes: match sampling_config { + SamplingConfig::Default => REQUIRED_SUCCESSES.to_vec(), + SamplingConfig::Custom { required_successes } => required_successes.clone(), + }, + log, + _phantom: PhantomData, + } + } + + /// Insert a downloaded column into an active sampling request. Then make progress on the + /// entire request. + /// + /// ### Returns + /// + /// - `Err`: Sampling request has failed and will be dropped + /// - `Ok(Some)`: Sampling request has successfully completed and will be dropped + /// - `Ok(None)`: Sampling request still active + pub(crate) fn on_sample_downloaded( + &mut self, + _peer_id: PeerId, + sampling_request_id: SamplingRequestId, + resp: Result<(DataColumnSidecarList, Duration), RpcResponseError>, + cx: &mut SyncNetworkContext, + ) -> Result, SamplingError> { + // Select columns to sample + // Create individual request per column + // Progress requests + // If request fails retry or expand search + // If all good return + let Some(column_indexes) = self + .column_indexes_by_sampling_request + .get(&sampling_request_id) + else { + error!(self.log, "Column indexes for the sampling request ID not found"; "sampling_request_id" => ?sampling_request_id); + return Ok(None); + }; + + match resp { + Ok((mut resp_data_columns, seen_timestamp)) => { + debug!(self.log, "Sample download success"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "count" => resp_data_columns.len()); + metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::SUCCESS]); + + // Filter the data received in the response using the requested column indexes. + let mut data_columns = vec![]; + for column_index in column_indexes { + let Some(request) = self.column_requests.get_mut(column_index) else { + warn!( + self.log, + "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + ); + continue; + }; + + let Some(data_pos) = resp_data_columns + .iter() + .position(|data| &data.index == column_index) + else { + // Peer does not have the requested data. + // TODO(das) what to do? + debug!(self.log, "Sampling peer claims to not have the data"; "block_root" => %self.block_root, "column_index" => column_index); + request.on_sampling_error()?; + continue; + }; + + data_columns.push(resp_data_columns.swap_remove(data_pos)); + } + + if !resp_data_columns.is_empty() { + let resp_column_indexes = resp_data_columns + .iter() + .map(|d| d.index) + .collect::>(); + debug!( + self.log, + "Received data that was not requested"; "block_root" => %self.block_root, "column_indexes" => ?resp_column_indexes + ); + } + + // Handle the downloaded data columns. + if data_columns.is_empty() { + debug!(self.log,"Received empty response"; "block_root" => %self.block_root); + self.column_indexes_by_sampling_request + .remove(&sampling_request_id); + } else { + // Overwrite `column_indexes` with the column indexes received in the response. + let column_indexes = data_columns.iter().map(|d| d.index).collect::>(); + self.column_indexes_by_sampling_request + .insert(sampling_request_id, column_indexes.clone()); + // Peer has data column, send to verify + let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { + // If processor is not available, error the entire sampling + debug!(self.log, "Dropping sampling"; "block" => %self.block_root, "reason" => "beacon processor unavailable"); + return Err(SamplingError::ProcessorUnavailable); + }; + debug!(self.log, "Sending data_column for verification"; "block" => ?self.block_root, "column_indexes" => ?column_indexes); + if let Err(e) = beacon_processor.send_rpc_validate_data_columns( + self.block_root, + data_columns, + seen_timestamp, + SamplingId { + id: self.requester_id, + sampling_request_id, + }, + ) { + // TODO(das): Beacon processor is overloaded, what should we do? + error!(self.log, "Dropping sampling"; "block" => %self.block_root, "reason" => e.to_string()); + return Err(SamplingError::SendFailed("beacon processor send failure")); + } + } + } + Err(err) => { + debug!(self.log, "Sample download error"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "error" => ?err); + metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::FAILURE]); + + // Error downloading, maybe penalize peer and retry again. + // TODO(das) with different peer or different peer? + for column_index in column_indexes { + let Some(request) = self.column_requests.get_mut(column_index) else { + warn!( + self.log, + "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + ); + continue; + }; + request.on_sampling_error()?; + } + } + }; + + self.continue_sampling(cx) + } + + /// Insert a column verification result into an active sampling request. Then make progress + /// on the entire request. + /// + /// ### Returns + /// + /// - `Err`: Sampling request has failed and will be dropped + /// - `Ok(Some)`: Sampling request has successfully completed and will be dropped + /// - `Ok(None)`: Sampling request still active + pub(crate) fn on_sample_verified( + &mut self, + sampling_request_id: SamplingRequestId, + result: Result<(), String>, + cx: &mut SyncNetworkContext, + ) -> Result, SamplingError> { + let Some(column_indexes) = self + .column_indexes_by_sampling_request + .get(&sampling_request_id) + else { + error!(self.log, "Column indexes for the sampling request ID not found"; "sampling_request_id" => ?sampling_request_id); + return Ok(None); + }; + + match result { + Ok(_) => { + debug!(self.log, "Sample verification success"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes); + metrics::inc_counter_vec(&metrics::SAMPLE_VERIFY_RESULT, &[metrics::SUCCESS]); + + // Valid, continue_sampling will maybe consider sampling succees + for column_index in column_indexes { + let Some(request) = self.column_requests.get_mut(column_index) else { + warn!( + self.log, + "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + ); + continue; + }; + request.on_sampling_success()?; + } + } + Err(err) => { + debug!(self.log, "Sample verification failure"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "reason" => ?err); + metrics::inc_counter_vec(&metrics::SAMPLE_VERIFY_RESULT, &[metrics::FAILURE]); + + // TODO(das): Peer sent invalid data, penalize and try again from different peer + // TODO(das): Count individual failures + for column_index in column_indexes { + let Some(request) = self.column_requests.get_mut(column_index) else { + warn!( + self.log, + "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + ); + continue; + }; + let peer_id = request.on_sampling_error()?; + cx.report_peer( + peer_id, + PeerAction::LowToleranceError, + "invalid data column", + ); + } + } + } + + self.continue_sampling(cx) + } + + pub(crate) fn continue_sampling( + &mut self, + cx: &mut SyncNetworkContext, + ) -> Result, SamplingError> { + // First check if sampling is completed, by computing `required_successes` + let mut successes = 0; + let mut failures = 0; + let mut ongoings = 0; + + for request in self.column_requests.values() { + if request.is_completed() { + successes += 1; + } + if request.is_failed() { + failures += 1; + } + if request.is_ongoing() { + ongoings += 1; + } + } + + // If there are too many failures, consider the sampling failed + let Some(required_successes) = self.required_successes.get(failures) else { + return Err(SamplingError::TooManyFailures); + }; + + // If there are enough successes, consider the sampling complete + if successes >= *required_successes { + return Ok(Some(())); + } + + // First, attempt to progress sampling by requesting more columns, so that request failures + // are accounted for below. + + // Group the requested column indexes by the destination peer to batch sampling requests. + let mut column_indexes_to_request = FnvHashMap::default(); + for idx in 0..*required_successes { + // Re-request columns. Note: out of bounds error should never happen, inputs are hardcoded + let column_index = *self + .column_shuffle + .get(idx) + .ok_or(SamplingError::ColumnIndexOutOfBounds)?; + let request = self + .column_requests + .entry(column_index) + .or_insert(ActiveColumnSampleRequest::new(column_index)); + + if request.is_ready_to_request() { + if let Some(peer_id) = request.choose_peer(cx) { + let indexes = column_indexes_to_request.entry(peer_id).or_insert(vec![]); + indexes.push(column_index); + } + } + } + + // Send requests. + let mut sent_request = false; + for (peer_id, column_indexes) in column_indexes_to_request { + cx.data_column_lookup_request( + DataColumnsByRootRequester::Sampling(SamplingId { + id: self.requester_id, + sampling_request_id: self.current_sampling_request_id, + }), + peer_id, + DataColumnsByRootSingleBlockRequest { + block_root: self.block_root, + indices: column_indexes.clone(), + }, + ) + .map_err(SamplingError::SendFailed)?; + self.column_indexes_by_sampling_request + .insert(self.current_sampling_request_id, column_indexes.clone()); + self.current_sampling_request_id.0 += 1; + sent_request = true; + + // Update request status. + for column_index in column_indexes { + let Some(request) = self.column_requests.get_mut(&column_index) else { + continue; + }; + request.on_start_sampling(peer_id)?; + } + } + + // Make sure that sampling doesn't stall, by ensuring that this sampling request will + // receive a new event of some type. If there are no ongoing requests, and no new + // request was sent, loop to increase the required_successes until the sampling fails if + // there are no peers. + if ongoings == 0 && !sent_request { + debug!(self.log, "Sampling request stalled"; "block_root" => %self.block_root); + } + + Ok(None) + } +} + +mod request { + use super::SamplingError; + use crate::sync::network_context::SyncNetworkContext; + use beacon_chain::BeaconChainTypes; + use lighthouse_network::PeerId; + use rand::seq::SliceRandom; + use rand::thread_rng; + use std::collections::HashSet; + use types::data_column_sidecar::ColumnIndex; + + pub(crate) struct ActiveColumnSampleRequest { + column_index: ColumnIndex, + status: Status, + // TODO(das): Should downscore peers that claim to not have the sample? + peers_dont_have: HashSet, + } + + #[derive(Debug, Clone)] + enum Status { + NoPeers, + NotStarted, + Sampling(PeerId), + Verified, + } + + impl ActiveColumnSampleRequest { + pub(crate) fn new(column_index: ColumnIndex) -> Self { + Self { + column_index, + status: Status::NotStarted, + peers_dont_have: <_>::default(), + } + } + + pub(crate) fn is_completed(&self) -> bool { + match self.status { + Status::NoPeers | Status::NotStarted | Status::Sampling(_) => false, + Status::Verified => true, + } + } + + pub(crate) fn is_failed(&self) -> bool { + match self.status { + Status::NotStarted | Status::Sampling(_) | Status::Verified => false, + Status::NoPeers => true, + } + } + + pub(crate) fn is_ongoing(&self) -> bool { + match self.status { + Status::NotStarted | Status::NoPeers | Status::Verified => false, + Status::Sampling(_) => true, + } + } + + pub(crate) fn is_ready_to_request(&self) -> bool { + match self.status { + Status::NoPeers | Status::NotStarted => true, + Status::Sampling(_) | Status::Verified => false, + } + } + + pub(crate) fn choose_peer( + &mut self, + cx: &SyncNetworkContext, + ) -> Option { + // TODO: When is a fork and only a subset of your peers know about a block, sampling should only + // be queried on the peers on that fork. Should this case be handled? How to handle it? + let mut peer_ids = cx.get_custodial_peers(self.column_index); + + peer_ids.retain(|peer_id| !self.peers_dont_have.contains(peer_id)); + + if let Some(peer_id) = peer_ids.choose(&mut thread_rng()) { + Some(*peer_id) + } else { + self.status = Status::NoPeers; + None + } + } + + pub(crate) fn on_start_sampling(&mut self, peer_id: PeerId) -> Result<(), SamplingError> { + match self.status.clone() { + Status::NoPeers | Status::NotStarted => { + self.status = Status::Sampling(peer_id); + Ok(()) + } + other => Err(SamplingError::BadState(format!( + "bad state on_start_sampling expected NoPeers|NotStarted got {other:?}. column_index:{}", + self.column_index + ))), + } + } + + pub(crate) fn on_sampling_error(&mut self) -> Result { + match self.status.clone() { + Status::Sampling(peer_id) => { + self.peers_dont_have.insert(peer_id); + self.status = Status::NotStarted; + Ok(peer_id) + } + other => Err(SamplingError::BadState(format!( + "bad state on_sampling_error expected Sampling got {other:?}. column_index:{}", + self.column_index + ))), + } + } + + pub(crate) fn on_sampling_success(&mut self) -> Result<(), SamplingError> { + match &self.status { + Status::Sampling(_) => { + self.status = Status::Verified; + Ok(()) + } + other => Err(SamplingError::BadState(format!( + "bad state on_sampling_success expected Sampling got {other:?}. column_index:{}", + self.column_index + ))), + } + } + } +} diff --git a/beacon_node/operation_pool/src/reward_cache.rs b/beacon_node/operation_pool/src/reward_cache.rs index 9e4c424bd7d..dd9902353f8 100644 --- a/beacon_node/operation_pool/src/reward_cache.rs +++ b/beacon_node/operation_pool/src/reward_cache.rs @@ -1,6 +1,8 @@ use crate::OpPoolError; use bitvec::vec::BitVec; -use types::{BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, ParticipationFlags}; +use types::{ + BeaconState, BeaconStateError, Epoch, EthSpec, FixedBytesExtended, Hash256, ParticipationFlags, +}; #[derive(Debug, PartialEq, Eq, Clone)] struct Initialization { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 2e1b1c093c8..67bc9d7d407 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -54,6 +54,37 @@ pub fn cli_app() -> Command { /* * Network parameters. */ + .arg( + Arg::new("subscribe-all-data-column-subnets") + .long("subscribe-all-data-column-subnets") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("Subscribe to all data column subnets and participate in data custody for \ + all columns. This will also advertise the beacon node as being long-lived \ + subscribed to all data column subnets. \ + NOTE: this is an experimental flag and may change any time without notice!") + .display_order(0) + .hide(true) + ) + .arg( + // TODO(das): remove this before PeerDAS release + Arg::new("malicious-withhold-count") + .long("malicious-withhold-count") + .action(ArgAction::Set) + .help_heading(FLAG_HEADER) + .help("TESTING ONLY do not use this") + .hide(true) + .display_order(0) + ) + .arg( + Arg::new("enable-sampling") + .long("enable-sampling") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("Enable peer sampling on data columns. Disabled by default.") + .hide(true) + .display_order(0) + ) .arg( Arg::new("subscribe-all-subnets") .long("subscribe-all-subnets") @@ -907,7 +938,15 @@ pub fn cli_app() -> Command { .long("purge-db") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) - .help("If present, the chain database will be deleted. Use with caution.") + .help("If present, the chain database will be deleted. Requires manual confirmation.") + .display_order(0) + ) + .arg( + Arg::new("purge-db-force") + .long("purge-db-force") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("If present, the chain database will be deleted without confirmation. Use with caution.") .display_order(0) ) .arg( diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b4fa38da7d7..6f61748a2d3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,3 +1,4 @@ +use account_utils::{read_input_from_user, STDIN_INPUTS_FLAG}; use beacon_chain::chain_config::{ DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, @@ -21,6 +22,7 @@ use slog::{info, warn, Logger}; use std::cmp::max; use std::fmt::Debug; use std::fs; +use std::io::IsTerminal; use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::num::NonZeroU16; @@ -30,6 +32,8 @@ use std::time::Duration; use types::graffiti::GraffitiString; use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes}; +const PURGE_DB_CONFIRMATION: &str = "confirm"; + /// Gets the fully-initialized global client. /// /// The top-level `clap` arguments should be provided as `cli_args`. @@ -50,26 +54,45 @@ pub fn get_config( client_config.set_data_dir(get_data_dir(cli_args)); // If necessary, remove any existing database and configuration - if client_config.data_dir().exists() && cli_args.get_flag("purge-db") { - // Remove the chain_db. - let chain_db = client_config.get_db_path(); - if chain_db.exists() { - fs::remove_dir_all(chain_db) - .map_err(|err| format!("Failed to remove chain_db: {}", err))?; - } - - // Remove the freezer db. - let freezer_db = client_config.get_freezer_db_path(); - if freezer_db.exists() { - fs::remove_dir_all(freezer_db) - .map_err(|err| format!("Failed to remove freezer_db: {}", err))?; - } - - // Remove the blobs db. - let blobs_db = client_config.get_blobs_db_path(); - if blobs_db.exists() { - fs::remove_dir_all(blobs_db) - .map_err(|err| format!("Failed to remove blobs_db: {}", err))?; + if client_config.data_dir().exists() { + if cli_args.get_flag("purge-db-force") { + let chain_db = client_config.get_db_path(); + let freezer_db = client_config.get_freezer_db_path(); + let blobs_db = client_config.get_blobs_db_path(); + purge_db(chain_db, freezer_db, blobs_db)?; + } else if cli_args.get_flag("purge-db") { + let stdin_inputs = cfg!(windows) || cli_args.get_flag(STDIN_INPUTS_FLAG); + if std::io::stdin().is_terminal() || stdin_inputs { + info!( + log, + "You are about to delete the chain database. This is irreversable \ + and you will need to resync the chain." + ); + info!( + log, + "Type 'confirm' to delete the database. Any other input will leave \ + the database intact and Lighthouse will exit." + ); + let confirmation = read_input_from_user(stdin_inputs)?; + + if confirmation == PURGE_DB_CONFIRMATION { + let chain_db = client_config.get_db_path(); + let freezer_db = client_config.get_freezer_db_path(); + let blobs_db = client_config.get_blobs_db_path(); + purge_db(chain_db, freezer_db, blobs_db)?; + info!(log, "Database was deleted."); + } else { + info!(log, "Database was not deleted. Lighthouse will now close."); + std::process::exit(1); + } + } else { + warn!( + log, + "The `--purge-db` flag was passed, but Lighthouse is not running \ + interactively. The database was not purged. Use `--purge-db-force` \ + to purge the database without requiring confirmation." + ); + } } } @@ -181,6 +204,10 @@ pub fn get_config( client_config.chain.shuffling_cache_size = cache_size; } + if cli_args.get_flag("enable-sampling") { + client_config.chain.enable_sampling = true; + } + /* * Prometheus metrics HTTP server */ @@ -454,6 +481,12 @@ pub fn get_config( client_config.store.blob_prune_margin_epochs = blob_prune_margin_epochs; } + if let Some(malicious_withhold_count) = + clap_utils::parse_optional(cli_args, "malicious-withhold-count")? + { + client_config.chain.malicious_withhold_count = malicious_withhold_count; + } + /* * Zero-ports * @@ -1130,6 +1163,10 @@ pub fn set_network_config( config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR); }; + if parse_flag(cli_args, "subscribe-all-data-column-subnets") { + config.subscribe_all_data_column_subnets = true; + } + if parse_flag(cli_args, "subscribe-all-subnets") { config.subscribe_all_subnets = true; } @@ -1522,3 +1559,26 @@ where .next() .ok_or(format!("Must provide at least one value to {}", flag_name)) } + +/// Remove chain, freezer and blobs db. +fn purge_db(chain_db: PathBuf, freezer_db: PathBuf, blobs_db: PathBuf) -> Result<(), String> { + // Remove the chain_db. + if chain_db.exists() { + fs::remove_dir_all(chain_db) + .map_err(|err| format!("Failed to remove chain_db: {}", err))?; + } + + // Remove the freezer db. + if freezer_db.exists() { + fs::remove_dir_all(freezer_db) + .map_err(|err| format!("Failed to remove freezer_db: {}", err))?; + } + + // Remove the blobs db. + if blobs_db.exists() { + fs::remove_dir_all(blobs_db) + .map_err(|err| format!("Failed to remove blobs_db: {}", err))?; + } + + Ok(()) +} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8b144c1be93..a53b697ea77 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -18,11 +18,11 @@ use crate::metadata::{ STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::state_cache::{PutStateOutcome, StateCache}; -use crate::{get_data_column_key, metrics, parse_data_column_key}; use crate::{ - get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, - PartialBeaconState, StoreItem, StoreOp, + get_data_column_key, get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, + KeyValueStoreOp, PartialBeaconState, StoreItem, StoreOp, }; +use crate::{metrics, parse_data_column_key}; use itertools::process_results; use leveldb::iterator::LevelDBIterator; use lru::LruCache; @@ -390,7 +390,7 @@ impl HotColdDB, LevelDB> { pub fn iter_temporary_state_roots(&self) -> impl Iterator> + '_ { let column = DBColumn::BeaconStateTemporary; let start_key = - BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes())); + BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_slice())); let keys_iter = self.hot_db.keys_iter(); keys_iter.seek(&start_key); @@ -473,7 +473,7 @@ impl, Cold: ItemStore> HotColdDB blinded_block: &SignedBeaconBlock>, ops: &mut Vec, ) { - let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_bytes()); + let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_slice()); ops.push(KeyValueStoreOp::PutKeyValue( db_key, blinded_block.as_ssz_bytes(), @@ -597,7 +597,7 @@ impl, Cold: ItemStore> HotColdDB decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, ) -> Result>, Error> { self.hot_db - .get_bytes(DBColumn::BeaconBlock.into(), block_root.as_bytes())? + .get_bytes(DBColumn::BeaconBlock.into(), block_root.as_slice())? .map(|block_bytes| decoder(&block_bytes)) .transpose() .map_err(|e| e.into()) @@ -611,7 +611,7 @@ impl, Cold: ItemStore> HotColdDB fork_name: ForkName, ) -> Result>, Error> { let column = ExecutionPayload::::db_column().into(); - let key = block_root.as_bytes(); + let key = block_root.as_slice(); match self.hot_db.get_bytes(column, key)? { Some(bytes) => Ok(Some(ExecutionPayload::from_ssz_bytes(&bytes, fork_name)?)), @@ -637,30 +637,30 @@ impl, Cold: ItemStore> HotColdDB /// Check if the blobs for a block exists on disk. pub fn blobs_exist(&self, block_root: &Hash256) -> Result { self.blobs_db - .key_exists(DBColumn::BeaconBlob.into(), block_root.as_bytes()) + .key_exists(DBColumn::BeaconBlob.into(), block_root.as_slice()) } /// Determine whether a block exists in the database. pub fn block_exists(&self, block_root: &Hash256) -> Result { self.hot_db - .key_exists(DBColumn::BeaconBlock.into(), block_root.as_bytes()) + .key_exists(DBColumn::BeaconBlock.into(), block_root.as_slice()) } /// Delete a block from the store and the block cache. pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { self.block_cache.lock().delete(block_root); self.hot_db - .key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())?; + .key_delete(DBColumn::BeaconBlock.into(), block_root.as_slice())?; self.hot_db - .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes())?; + .key_delete(DBColumn::ExecPayload.into(), block_root.as_slice())?; self.blobs_db - .key_delete(DBColumn::BeaconBlob.into(), block_root.as_bytes()) + .key_delete(DBColumn::BeaconBlob.into(), block_root.as_slice()) } pub fn put_blobs(&self, block_root: &Hash256, blobs: BlobSidecarList) -> Result<(), Error> { self.blobs_db.put_bytes( DBColumn::BeaconBlob.into(), - block_root.as_bytes(), + block_root.as_slice(), &blobs.as_ssz_bytes(), )?; self.block_cache.lock().put_blobs(*block_root, blobs); @@ -673,7 +673,7 @@ impl, Cold: ItemStore> HotColdDB blobs: BlobSidecarList, ops: &mut Vec, ) { - let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_bytes()); + let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_slice()); ops.push(KeyValueStoreOp::PutKeyValue(db_key, blobs.as_ssz_bytes())); } @@ -996,17 +996,17 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteStateTemporaryFlag(state_root) => { let db_key = - get_key_for_col(TemporaryFlag::db_column().into(), state_root.as_bytes()); + get_key_for_col(TemporaryFlag::db_column().into(), state_root.as_slice()); key_value_batch.push(KeyValueStoreOp::DeleteKey(db_key)); } StoreOp::DeleteBlock(block_root) => { - let key = get_key_for_col(DBColumn::BeaconBlock.into(), block_root.as_bytes()); + let key = get_key_for_col(DBColumn::BeaconBlock.into(), block_root.as_slice()); key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } StoreOp::DeleteBlobs(block_root) => { - let key = get_key_for_col(DBColumn::BeaconBlob.into(), block_root.as_bytes()); + let key = get_key_for_col(DBColumn::BeaconBlob.into(), block_root.as_slice()); key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } @@ -1022,18 +1022,18 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteState(state_root, slot) => { let state_summary_key = - get_key_for_col(DBColumn::BeaconStateSummary.into(), state_root.as_bytes()); + get_key_for_col(DBColumn::BeaconStateSummary.into(), state_root.as_slice()); key_value_batch.push(KeyValueStoreOp::DeleteKey(state_summary_key)); if slot.map_or(true, |slot| slot % E::slots_per_epoch() == 0) { let state_key = - get_key_for_col(DBColumn::BeaconState.into(), state_root.as_bytes()); + get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); key_value_batch.push(KeyValueStoreOp::DeleteKey(state_key)); } } StoreOp::DeleteExecutionPayload(block_root) => { - let key = get_key_for_col(DBColumn::ExecPayload.into(), block_root.as_bytes()); + let key = get_key_for_col(DBColumn::ExecPayload.into(), block_root.as_slice()); key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } @@ -1455,7 +1455,7 @@ impl, Cold: ItemStore> HotColdDB fn load_restore_point(&self, state_root: &Hash256) -> Result, Error> { let partial_state_bytes = self .cold_db - .get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? + .get_bytes(DBColumn::BeaconState.into(), state_root.as_slice())? .ok_or(HotColdDBError::MissingRestorePoint(*state_root))?; let mut partial_state: PartialBeaconState = PartialBeaconState::from_ssz_bytes(&partial_state_bytes, &self.spec)?; @@ -1666,7 +1666,7 @@ impl, Cold: ItemStore> HotColdDB match self .blobs_db - .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? + .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_slice())? { Some(ref blobs_bytes) => { let blobs = BlobSidecarList::from_ssz_bytes(blobs_bytes)?; @@ -1682,7 +1682,7 @@ impl, Cold: ItemStore> HotColdDB /// Fetch all keys in the data_column column with prefix `block_root` pub fn get_data_column_keys(&self, block_root: Hash256) -> Result, Error> { self.blobs_db - .iter_raw_keys(DBColumn::BeaconDataColumn, block_root.as_bytes()) + .iter_raw_keys(DBColumn::BeaconDataColumn, block_root.as_slice()) .map(|key| key.and_then(|key| parse_data_column_key(key).map(|key| key.1))) .collect() } @@ -1787,7 +1787,7 @@ impl, Cold: ItemStore> HotColdDB mut ops: Vec, ) -> Result<(), Error> { let column = SchemaVersion::db_column().into(); - let key = SCHEMA_VERSION_KEY.as_bytes(); + let key = SCHEMA_VERSION_KEY.as_slice(); let db_key = get_key_for_col(column, key); let op = KeyValueStoreOp::PutKeyValue(db_key, schema_version.as_store_bytes()); ops.push(op); @@ -1882,7 +1882,7 @@ impl, Cold: ItemStore> HotColdDB } else { KeyValueStoreOp::DeleteKey(get_key_for_col( DBColumn::BeaconMeta.into(), - ANCHOR_INFO_KEY.as_bytes(), + ANCHOR_INFO_KEY.as_slice(), )) } } diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index f752bf39795..48c289f2b2d 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -13,7 +13,7 @@ pub fn store_full_state( }; metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as u64); metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); - let key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_bytes()); + let key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); ops.push(KeyValueStoreOp::PutKeyValue(key, bytes)); Ok(()) } @@ -25,7 +25,7 @@ pub fn get_full_state, E: EthSpec>( ) -> Result>, Error> { let total_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); - match db.get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? { + match db.get_bytes(DBColumn::BeaconState.into(), state_root.as_slice())? { Some(bytes) => { let overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_OVERHEAD_TIMES); let container = StorageContainer::from_ssz_bytes(&bytes, spec)?; diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 03090ca14c5..a7e0c09ed1f 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -385,6 +385,7 @@ mod test { use beacon_chain::test_utils::BeaconChainHarness; use beacon_chain::types::{ChainSpec, MainnetEthSpec}; use sloggers::{null::NullLoggerBuilder, Build}; + use types::FixedBytesExtended; fn get_state() -> BeaconState { let harness = BeaconChainHarness::builder(E::default()) diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 32ff942ddc7..28e04f56205 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -182,7 +182,6 @@ impl KeyValueStore for LevelDB { fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { let start_key = BytesKey::from_vec(get_key_for_col(column.into(), from)); - let iter = self.db.iter(self.read_options()); iter.seek(&start_key); diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 1f8cc8ca019..60dddeb1760 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -151,7 +151,7 @@ pub fn get_col_from_key(key: &[u8]) -> Option { } pub fn get_data_column_key(block_root: &Hash256, column_index: &ColumnIndex) -> Vec { - let mut result = block_root.as_bytes().to_vec(); + let mut result = block_root.as_slice().to_vec(); result.extend_from_slice(&column_index.to_le_bytes()); result } @@ -183,7 +183,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Store an item in `Self`. fn put(&self, key: &Hash256, item: &I) -> Result<(), Error> { let column = I::db_column().into(); - let key = key.as_bytes(); + let key = key.as_slice(); self.put_bytes(column, key, &item.as_store_bytes()) .map_err(Into::into) @@ -191,7 +191,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati fn put_sync(&self, key: &Hash256, item: &I) -> Result<(), Error> { let column = I::db_column().into(); - let key = key.as_bytes(); + let key = key.as_slice(); self.put_bytes_sync(column, key, &item.as_store_bytes()) .map_err(Into::into) @@ -200,7 +200,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Retrieve an item from `Self`. fn get(&self, key: &Hash256) -> Result, Error> { let column = I::db_column().into(); - let key = key.as_bytes(); + let key = key.as_slice(); match self.get_bytes(column, key)? { Some(bytes) => Ok(Some(I::from_store_bytes(&bytes[..])?)), @@ -211,7 +211,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Returns `true` if the given key represents an item in `Self`. fn exists(&self, key: &Hash256) -> Result { let column = I::db_column().into(); - let key = key.as_bytes(); + let key = key.as_slice(); self.key_exists(column, key) } @@ -219,7 +219,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Remove an item from `Self`. fn delete(&self, key: &Hash256) -> Result<(), Error> { let column = I::db_column().into(); - let key = key.as_bytes(); + let key = key.as_slice(); self.key_delete(column, key) } @@ -300,6 +300,9 @@ pub enum DBColumn { BeaconHistoricalSummaries, #[strum(serialize = "olc")] OverflowLRUCache, + /// For persisting eagerly computed light client data + #[strum(serialize = "lcu")] + LightClientUpdate, } /// A block from the database, which might have an execution payload or not. @@ -342,7 +345,8 @@ impl DBColumn { | Self::BeaconStateRoots | Self::BeaconHistoricalRoots | Self::BeaconHistoricalSummaries - | Self::BeaconRandaoMixes => 8, + | Self::BeaconRandaoMixes + | Self::LightClientUpdate => 8, Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, } } @@ -362,7 +366,7 @@ pub trait StoreItem: Sized { fn from_store_bytes(bytes: &[u8]) -> Result; fn as_kv_store_op(&self, key: Hash256) -> KeyValueStoreOp { - let db_key = get_key_for_col(Self::db_column().into(), key.as_bytes()); + let db_key = get_key_for_col(Self::db_column().into(), key.as_slice()); KeyValueStoreOp::PutKeyValue(db_key, self.as_store_bytes()) } } diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 8f40b4b9241..8a66ec121e1 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -323,7 +323,7 @@ impl PartialBeaconState { /// Prepare the partial state for storage in the KV database. pub fn as_kv_store_op(&self, state_root: Hash256) -> KeyValueStoreOp { - let db_key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_bytes()); + let db_key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); KeyValueStoreOp::PutKeyValue(db_key, self.as_ssz_bytes()) } diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index bbec70330b7..4be6536df9d 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -25,7 +25,7 @@ fn build_node(env: &mut Environment) -> LocalBeaconNode { #[test] fn http_server_genesis_state() { let mut env = env_builder() - .null_logger() + .test_logger() //.async_logger("debug", None) .expect("should build env logger") .multi_threaded_tokio_runtime() diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index f2f9caf46b5..adde78270a6 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -9,10 +9,10 @@ HTTP header: - Value: `Bearer ` Where `` is a string that can be obtained from the validator client -host. Here is an example `Authorization` header: +host. Here is an example of the `Authorization` header: ```text -Authorization: Bearer api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 +Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh ``` ## Obtaining the API token @@ -24,7 +24,7 @@ text editor will suffice: ```bash cat api-token.txt -api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 +hGut6B8uEujufDXSmZsT0thnxvdvKFBvh ``` When starting the validator client it will output a log message containing the path @@ -54,7 +54,7 @@ Response: Here is an example `curl` command using the API token in the `Authorization` header: ```bash -curl localhost:5062/lighthouse/version -H "Authorization: Bearer api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123" +curl localhost:5062/lighthouse/version -H "Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh" ``` The server should respond with its version: diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index a36aa737083..6cb66859128 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -53,12 +53,12 @@ Example Response Body: } ``` -> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/API-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. +> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/api-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. -> As an alternative, you can also provide the API token directly, for example, `-H "Authorization: Bearer api-token-0x02dc2a13115cc8c83baf170f597f22b1eb2930542941ab902df3daadebcb8f8176`. In this case, you obtain the token from the file `API token.txt` and the command becomes: +> As an alternative, you can also provide the API token directly, for example, `-H "Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh`. In this case, you obtain the token from the file `api-token.txt` and the command becomes: ```bash -curl -X GET "http://localhost:5062/lighthouse/version" -H "Authorization: Bearer api-token-0x02dc2a13115cc8c83baf170f597f22b1eb2930542941ab902df3daadebcb8f8176" | jq +curl -X GET "http://localhost:5062/lighthouse/version" -H "Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh" | jq ``` ## `GET /lighthouse/health` diff --git a/book/src/api-vc-sig-header.md b/book/src/api-vc-sig-header.md deleted file mode 100644 index 468f714cfa9..00000000000 --- a/book/src/api-vc-sig-header.md +++ /dev/null @@ -1,108 +0,0 @@ -# Validator Client API: Signature Header - -## Overview - -The validator client HTTP server adds the following header to all responses: - -- Name: `Signature` -- Value: a secp256k1 signature across the SHA256 of the response body. - -Example `Signature` header: - -```text -Signature: 0x304402205b114366444112580bf455d919401e9c869f5af067cd496016ab70d428b5a99d0220067aede1eb5819eecfd5dd7a2b57c5ac2b98f25a7be214b05684b04523aef873 -``` - -## Verifying the Signature - -Below is a browser-ready example of signature verification. - -### HTML - -```html - - -``` - -### Javascript - -```javascript -// Helper function to turn a hex-string into bytes. -function hexStringToByte(str) { - if (!str) { - return new Uint8Array(); - } - - var a = []; - for (var i = 0, len = str.length; i < len; i+=2) { - a.push(parseInt(str.substr(i,2),16)); - } - - return new Uint8Array(a); -} - -// This example uses the secp256k1 curve from the "elliptic" library: -// -// https://github.com/indutny/elliptic -var ec = new elliptic.ec('secp256k1'); - -// The public key is contained in the API token: -// -// Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 -var pk_bytes = hexStringToByte('03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123'); - -// The signature is in the `Signature` header of the response: -// -// Signature: 0x304402205b114366444112580bf455d919401e9c869f5af067cd496016ab70d428b5a99d0220067aede1eb5819eecfd5dd7a2b57c5ac2b98f25a7be214b05684b04523aef873 -var sig_bytes = hexStringToByte('304402205b114366444112580bf455d919401e9c869f5af067cd496016ab70d428b5a99d0220067aede1eb5819eecfd5dd7a2b57c5ac2b98f25a7be214b05684b04523aef873'); - -// The HTTP response body. -var response_body = "{\"data\":{\"version\":\"Lighthouse/v0.2.11-fc0654fbe+/x86_64-linux\"}}"; - -// The HTTP response body is hashed (SHA256) to determine the 32-byte message. -let hash = sha256.create(); -hash.update(response_body); -let message = hash.array(); - -// The 32-byte message hash, the signature and the public key are verified. -if (ec.verify(message, sig_bytes, pk_bytes)) { - console.log("The signature is valid") -} else { - console.log("The signature is invalid") -} -``` - -*This example is also available as a [JSFiddle](https://jsfiddle.net/wnqd74Lz/).* - -## Example - -The previous Javascript example was written using the output from the following -`curl` command: - -```bash -curl -v localhost:5062/lighthouse/version -H "Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123" -``` - -```text -* Trying ::1:5062... -* connect to ::1 port 5062 failed: Connection refused -* Trying 127.0.0.1:5062... -* Connected to localhost (127.0.0.1) port 5062 (#0) -> GET /lighthouse/version HTTP/1.1 -> Host: localhost:5062 -> User-Agent: curl/7.72.0 -> Accept: */* -> Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 -> -* Mark bundle as not supporting multiuse -< HTTP/1.1 200 OK -< content-type: application/json -< signature: 0x304402205b114366444112580bf455d919401e9c869f5af067cd496016ab70d428b5a99d0220067aede1eb5819eecfd5dd7a2b57c5ac2b98f25a7be214b05684b04523aef873 -< server: Lighthouse/v0.2.11-fc0654fbe+/x86_64-linux -< access-control-allow-origin: -< content-length: 65 -< date: Tue, 29 Sep 2020 04:23:46 GMT -< -* Connection #0 to host localhost left intact -{"data":{"version":"Lighthouse/v0.2.11-fc0654fbe+/x86_64-linux"}} -``` diff --git a/book/src/cross-compiling.md b/book/src/cross-compiling.md index dfddcbc2945..c90001d561f 100644 --- a/book/src/cross-compiling.md +++ b/book/src/cross-compiling.md @@ -15,18 +15,10 @@ project. ### Targets -The `Makefile` in the project contains four targets for cross-compiling: +The `Makefile` in the project contains two targets for cross-compiling: - `build-x86_64`: builds an optimized version for x86_64 processors (suitable for most users). -- `build-x86_64-portable`: builds a version for x86_64 processors which avoids using some modern CPU - instructions that are incompatible with older CPUs. - `build-aarch64`: builds an optimized version for 64-bit ARM processors (suitable for Raspberry Pi 4). -- `build-aarch64-portable`: builds a version for 64-bit ARM processors which avoids using some - modern CPU instructions. In practice, very few ARM processors lack the instructions necessary to - run the faster non-portable build. - -For more information about optimized vs portable builds see -[Portability](./installation-binaries.md#portability). ### Example diff --git a/book/src/docker.md b/book/src/docker.md index 16e685491ef..8ee0c56bb48 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -35,28 +35,23 @@ Lighthouse vx.x.xx-xxxxxxxxx BLS Library: xxxx-xxxxxxx ``` -> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker -> Images](#available-docker-images) below. - ### Available Docker Images There are several images available on Docker Hub. -Most users should use the `latest-modern` tag, which corresponds to the latest stable release of -Lighthouse with optimizations enabled. If you are running on older hardware then the default -`latest` image bundles a _portable_ version of Lighthouse which is slower but with better hardware -compatibility (see [Portability](./installation-binaries.md#portability)). +Most users should use the `latest` tag, which corresponds to the latest stable release of +Lighthouse with optimizations enabled. -To install a specific tag (in this case `latest-modern`), add the tag name to your `docker` commands: +To install a specific tag (in this case `latest`), add the tag name to your `docker` commands: ```bash -docker pull sigp/lighthouse:latest-modern +docker pull sigp/lighthouse:latest ``` Image tags follow this format: ```text -${version}${arch}${stability}${modernity}${features} +${version}${arch}${stability} ``` The `version` is: @@ -75,19 +70,9 @@ The `stability` is: * `-unstable` for the `unstable` branch * empty for a tagged release or the `stable` branch -The `modernity` is: - -* `-modern` for optimized builds -* empty for a `portable` unoptimized build - -The `features` is: - -* `-dev` for a development build with `minimal` preset enabled (`spec-minimal` feature). -* empty for a standard build with no custom feature enabled. - Examples: -* `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM) +* `latest-unstable`: most recent `unstable` build * `latest-amd64`: most recent Lighthouse release for older x86_64 CPUs * `latest-amd64-unstable`: most recent `unstable` build for older x86_64 CPUs diff --git a/book/src/help_bn.md b/book/src/help_bn.md index f9180b65832..733446e5d27 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -565,7 +565,11 @@ Flags: being referenced by validator client using the --proposer-node flag. This configuration is for enabling more secure setups. --purge-db - If present, the chain database will be deleted. Use with caution. + If present, the chain database will be deleted. Requires manual + confirmation. + --purge-db-force + If present, the chain database will be deleted without confirmation. + Use with caution. --reconstruct-historic-states After a checkpoint sync, reconstruct historic states in the database. This requires syncing all the way back to genesis. @@ -585,6 +589,8 @@ Flags: server on localhost:5052 and import deposit logs from the execution node. This is equivalent to `--http` on merge-ready networks, or `--http --eth1` pre-merge + --stdin-inputs + If present, read all user inputs from stdin instead of tty. --subscribe-all-subnets Subscribe to all subnets regardless of validator count. This will also advertise the beacon node as being long-lived subscribed to all diff --git a/book/src/help_general.md b/book/src/help_general.md index 84bc67a86e2..1c2d1266d08 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -136,6 +136,8 @@ Flags: contain sensitive information about your validator and so this flag should be used with caution. For Windows users, the log file permissions will be inherited from the parent folder. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. ``` diff --git a/book/src/help_vc.md b/book/src/help_vc.md index e205f4c3454..7f2cfab8e3a 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -266,6 +266,8 @@ Flags: by builders, regardless of payload value. --produce-block-v3 This flag is deprecated and is no longer in use. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. --unencrypted-http-transport This is a safety flag to ensure that the user is aware that the http transport is unencrypted and using a custom HTTP address is unsafe. diff --git a/book/src/help_vm.md b/book/src/help_vm.md index 99a45c1a76d..f787985b215 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -123,6 +123,8 @@ Flags: contain sensitive information about your validator and so this flag should be used with caution. For Windows users, the log file permissions will be inherited from the parent folder. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. ``` diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index e18aad79589..0883139ad21 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -121,6 +121,8 @@ Flags: contain sensitive information about your validator and so this flag should be used with caution. For Windows users, the log file permissions will be inherited from the parent folder. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. ``` diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index faef0a5783d..12dd1e91402 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -88,8 +88,6 @@ Options: A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "source" and contains the validators that are to be moved. - --stdin-inputs - If present, read all user inputs from stdin instead of tty. --suggested-fee-recipient All created validators will use this value for the suggested fee recipient. Omit this flag to use the default value from the VC. @@ -142,6 +140,8 @@ Flags: contain sensitive information about your validator and so this flag should be used with caution. For Windows users, the log file permissions will be inherited from the parent folder. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. ``` diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 580b5c19d45..e3a2bfb8a00 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -13,13 +13,6 @@ Binaries are supplied for four platforms: - `x86_64-apple-darwin`: macOS with Intel chips - `x86_64-windows`: Windows with 64-bit processors -Additionally there is also a `-portable` suffix which indicates if the `portable` feature is used: - -- Without `portable`: uses modern CPU instructions to provide the fastest signature verification times (may cause `Illegal instruction` error on older CPUs) -- With `portable`: approx. 20% slower, but should work on all modern 64-bit processors. - -For details, see [Portability](#portability). - ## Usage Each binary is contained in a `.tar.gz` archive. For this example, lets assume the user needs @@ -41,30 +34,3 @@ a `x86_64` binary. 1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. For example, to copy `lighthouse` from the current directory to `usr/bin`, run `sudo cp lighthouse /usr/bin`. > Windows users will need to execute the commands in Step 2 from PowerShell. - -## Portability - -Portable builds of Lighthouse are designed to run on the widest range of hardware possible, but -sacrifice the ability to make use of modern CPU instructions. - -If you have a modern CPU then you should try running a non-portable build to get a 20-30% speed up. - -- For **x86_64**, any CPU supporting the [ADX](https://en.wikipedia.org/wiki/Intel_ADX) instruction set -extension is compatible with the optimized build. This includes Intel Broadwell (2014) -and newer, and AMD Ryzen (2017) and newer. -- For **ARMv8**, most CPUs are compatible with the optimized build, including the Cortex-A72 used by -the Raspberry Pi 4. - -## Troubleshooting - -If you get a SIGILL (exit code 132), then your CPU is incompatible with the optimized build -of Lighthouse and you should switch to the `-portable` build. In this case, you will see a -warning like this on start-up: - -``` -WARN CPU seems incompatible with optimized Lighthouse build, advice: If you get a SIGILL, please try Lighthouse portable build -``` - -On some VPS providers, the virtualization can make it appear as if CPU features are not available, -even when they are. In this case you might see the warning above, but so long as the client -continues to function, it's nothing to worry about. diff --git a/book/src/installation-source.md b/book/src/installation-source.md index be03a189de7..3c9f27d236a 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -160,8 +160,7 @@ FEATURES=gnosis,slasher-lmdb make Commonly used features include: - `gnosis`: support for the Gnosis Beacon Chain. -- `portable`: support for legacy hardware. -- `modern`: support for exclusively modern hardware. +- `portable`: the default feature as Lighthouse now uses runtime detection of hardware CPU features. - `slasher-lmdb`: support for the LMDB slasher backend. Enabled by default. - `slasher-mdbx`: support for the MDBX slasher backend. - `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. diff --git a/book/src/installation.md b/book/src/installation.md index a0df394bd2d..137a00b918b 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -23,7 +23,7 @@ There are also community-maintained installation methods: Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources. -After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100): +After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/), [Reth](https://github.com/paradigmxyz/reth)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100): - CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer - Memory: 32 GB RAM* diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index fc4530589d9..4a00f33aa44 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -50,10 +50,10 @@ A pair of messages at `INFO` level will be logged if a re-org opportunity is det > INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 -This should be followed shortly after by a `WARN` log indicating that a re-org occurred. This is +This should be followed shortly after by a `INFO` log indicating that a re-org occurred. This is expected and normal: -> WARN Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +> INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 In case a re-org is not viable (which should be most of the time), Lighthouse will just propose a block as normal and log the reason the re-org was not attempted at debug level: diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 6de05cff2a4..7a123254bf7 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -7,7 +7,7 @@ There are two configuration changes required for a Lighthouse node to operate correctly throughout the merge: -1. You *must* run your own execution engine such as Besu, Erigon, Geth or Nethermind alongside Lighthouse. +1. You *must* run your own execution engine such as Besu, Erigon, Reth, Geth or Nethermind alongside Lighthouse. You *must* update your `lighthouse bn` configuration to connect to the execution engine using new flags which are documented on this page in the [Connecting to an execution engine](#connecting-to-an-execution-engine) section. @@ -65,6 +65,7 @@ Each execution engine has its own flags for configuring the engine API and JWT. the relevant page for your execution engine for the required flags: - [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) +- [Reth: Running the Consensus Layer](https://reth.rs/run/mainnet.html?highlight=consensus#running-the-consensus-layer) - [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) - [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) @@ -123,7 +124,7 @@ a deprecation warning will be logged and Lighthouse *may* remove these flags in ### The relationship between `--eth1-endpoints` and `--execution-endpoint` Pre-merge users will be familiar with the `--eth1-endpoints` flag. This provides a list of Ethereum -"eth1" nodes (Besu, Erigon, Geth or Nethermind). Each beacon node (BN) can have multiple eth1 endpoints +"eth1" nodes (Besu, Erigon, Reth, Geth or Nethermind). Each beacon node (BN) can have multiple eth1 endpoints and each eth1 endpoint can have many BNs connection (many-to-many relationship). The eth1 node provides a source of truth for the [deposit contract](https://ethereum.org/en/staking/deposit-contract/) and beacon chain proposers include this @@ -134,7 +135,7 @@ achieve this. To progress through the Bellatrix upgrade nodes will need a *new* connection to an "eth1" node; `--execution-endpoint`. This connection has a few different properties. Firstly, the term "eth1 node" has been deprecated and replaced with "execution engine". Whilst "eth1 node" and "execution -engine" still refer to the same projects (Besu, Erigon, Geth or Nethermind), the former refers to the pre-merge +engine" still refer to the same projects (Besu, Erigon, Reth, Geth or Nethermind), the former refers to the pre-merge versions and the latter refers to post-merge versions. Secondly, there is a strict one-to-one relationship between Lighthouse and the execution engine; only one Lighthouse node can connect to one execution engine. Thirdly, it is impossible to fully verify the post-merge chain without an diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index 6c1f23d8e8e..9b9e0cba8e5 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -26,6 +26,7 @@ per beacon node. The reason for this is that the beacon node *controls* the exec - [Besu](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) - [Erigon](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) - [Geth](https://geth.ethereum.org/docs/getting-started/consensus-clients) +- [Reth](https://reth.rs/run/mainnet.html) > Note: Each execution engine has its own flags for configuring the engine API and JWT secret to connect to a beacon node. Please consult the relevant page of your execution engine as above for the required flags. diff --git a/book/src/validator-manager-move.md b/book/src/validator-manager-move.md index 10de1fe87c6..f7184ce1277 100644 --- a/book/src/validator-manager-move.md +++ b/book/src/validator-manager-move.md @@ -116,7 +116,7 @@ location of the file varies, but it is located in the "validator directory" of y alongside validator keystores. For example: `~/.lighthouse/mainnet/validators/api-token.txt`. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path. Copy the contents of that file into a new file on the **destination host** at `~/src-token.txt`. The -API token should be similar to `api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123`. +API token is a random string, e.g., `hGut6B8uEujufDXSmZsT0thnxvdvKFBvh`. ### 4. Create an SSH Tunnel diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 4cde8ea2707..46ccd4566be 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "5.2.1" +version = "5.3.0" authors = ["Sigma Prime "] edition = { workspace = true } diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 8707ae531f7..665953fa522 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -35,6 +35,8 @@ const DEFAULT_PASSWORD_LEN: usize = 48; pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:"; +pub const STDIN_INPUTS_FLAG: &str = "stdin-inputs"; + /// Returns the "default" path where a wallet should store its password file. pub fn default_wallet_password_path>(wallet_name: &str, secrets_dir: P) -> PathBuf { secrets_dir.as_ref().join(format!("{}.pass", wallet_name)) diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index e4dfb2a5560..73823ae24e9 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -7,12 +7,12 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +alloy-primitives = { workspace = true } clap = { workspace = true } hex = { workspace = true } dirs = { workspace = true } eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } -ethereum-types = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index ea56e7e672a..cba7399c9bf 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,9 +1,9 @@ //! A helper library for parsing values from `clap::ArgMatches`. +use alloy_primitives::U256 as Uint256; use clap::builder::styling::*; use clap::ArgMatches; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; -use ethereum_types::U256 as Uint256; use ssz::Decode; use std::path::PathBuf; use std::str::FromStr; @@ -36,7 +36,7 @@ pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result(cli_args, "terminal-total-difficulty-override")? { let stripped = string.replace(',', ""); - let terminal_total_difficulty = Uint256::from_dec_str(&stripped).map_err(|e| { + let terminal_total_difficulty = Uint256::from_str(&stripped).map_err(|e| { format!( "Could not parse --terminal-total-difficulty-override as decimal value: {:?}", e diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 6d000f576f9..2805d36b90c 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -121,6 +121,7 @@ impl fmt::Display for Error { pub struct Timeouts { pub attestation: Duration, pub attester_duties: Duration, + pub attestation_subscriptions: Duration, pub liveness: Duration, pub proposal: Duration, pub proposer_duties: Duration, @@ -137,6 +138,7 @@ impl Timeouts { Timeouts { attestation: timeout, attester_duties: timeout, + attestation_subscriptions: timeout, liveness: timeout, proposal: timeout, proposer_duties: timeout, @@ -763,6 +765,31 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `GET beacon/light_client/updates` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_light_client_updates( + &self, + start_period: u64, + count: u64, + ) -> Result>>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("light_client") + .push("updates"); + + path.query_pairs_mut() + .append_pair("start_period", &start_period.to_string()); + + path.query_pairs_mut() + .append_pair("count", &count.to_string()); + + self.get_opt(path).await + } + /// `GET beacon/light_client/bootstrap` /// /// Returns `Ok(None)` on a 404 error. @@ -1114,7 +1141,8 @@ impl BeaconNodeHttpClient { &self, block_id: BlockId, indices: Option<&[u64]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let mut path = self.get_blobs_path(block_id)?; if let Some(indices) = indices { let indices_string = indices @@ -2515,7 +2543,12 @@ impl BeaconNodeHttpClient { .push("validator") .push("beacon_committee_subscriptions"); - self.post(path, &subscriptions).await?; + self.post_with_timeout( + path, + &subscriptions, + self.timeouts.attestation_subscriptions, + ) + .await?; Ok(()) } diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index ab90d336fa4..ee05c298399 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -8,6 +8,7 @@ pub use slashing_protection::interchange::Interchange; #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct GetFeeRecipientResponse { pub pubkey: PublicKeyBytes, + #[serde(with = "serde_utils::address_hex")] pub ethaddress: Address, } diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index d903d7b73d7..1921549bcb5 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -163,6 +163,7 @@ pub struct Web3SignerValidatorRequest { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct UpdateFeeRecipientRequest { + #[serde(with = "serde_utils::address_hex")] pub ethaddress: Address, } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index fa5fb654b72..3925d2deda8 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -563,6 +563,7 @@ pub struct BlockHeaderData { pub struct DepositContractData { #[serde(with = "serde_utils::quoted_u64")] pub chain_id: u64, + #[serde(with = "serde_utils::address_hex")] pub address: Address, } @@ -784,6 +785,24 @@ pub struct ValidatorAggregateAttestationQuery { pub committee_index: Option, } +#[derive(Clone, Deserialize)] +pub struct LightClientUpdatesQuery { + pub start_period: u64, + pub count: u64, +} + +#[derive(Encode, Decode)] +pub struct LightClientUpdateSszResponse { + pub response_chunk_len: Vec, + pub response_chunk: Vec, +} + +#[derive(Encode, Decode)] +pub struct LightClientUpdateResponseChunk { + pub context: [u8; 4], + pub payload: Vec, +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub struct BeaconCommitteeSubscription { #[serde(with = "serde_utils::quoted_u64")] @@ -1017,6 +1036,7 @@ pub struct SsePayloadAttributes { #[superstruct(getter(copy))] pub prev_randao: Hash256, #[superstruct(getter(copy))] + #[serde(with = "serde_utils::address_hex")] pub suggested_fee_recipient: Address, #[superstruct(only(V2, V3))] pub withdrawals: Vec, @@ -1032,6 +1052,7 @@ pub struct SseExtendedPayloadAttributesGeneric { pub parent_block_root: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub parent_block_number: u64, + pub parent_block_hash: ExecutionBlockHash, pub payload_attributes: T, } @@ -1772,12 +1793,12 @@ impl TryFrom<&HeaderMap> for ProduceBlockV3Metadata { })?; let execution_payload_value = parse_required_header(headers, EXECUTION_PAYLOAD_VALUE_HEADER, |s| { - Uint256::from_dec_str(s) + Uint256::from_str_radix(s, 10) .map_err(|e| format!("invalid {EXECUTION_PAYLOAD_VALUE_HEADER}: {e:?}")) })?; let consensus_block_value = parse_required_header(headers, CONSENSUS_BLOCK_VALUE_HEADER, |s| { - Uint256::from_dec_str(s) + Uint256::from_str_radix(s, 10) .map_err(|e| format!("invalid {CONSENSUS_BLOCK_VALUE_HEADER}: {e:?}")) })?; diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 066b27795cd..74fca4c5010 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -138,6 +138,6 @@ MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS -CUSTODY_REQUIREMENT: 1 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32 +CUSTODY_REQUIREMENT: 4 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 23cf040b276..07bd21b35c2 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -121,6 +121,6 @@ MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS -CUSTODY_REQUIREMENT: 1 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32 +CUSTODY_REQUIREMENT: 4 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index cec2b61f213..67f1e5b6831 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -125,6 +125,6 @@ MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS -CUSTODY_REQUIREMENT: 1 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32 +CUSTODY_REQUIREMENT: 4 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 500b9e60a5c..acf4d83f323 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -147,6 +147,6 @@ MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS -CUSTODY_REQUIREMENT: 1 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32 +CUSTODY_REQUIREMENT: 4 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 2a1809d6ce9..8b84d870103 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -121,6 +121,6 @@ MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS -CUSTODY_REQUIREMENT: 1 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32 +CUSTODY_REQUIREMENT: 4 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 \ No newline at end of file diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index fb8c6938cdb..472ac55ca09 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -462,7 +462,7 @@ mod tests { use super::*; use ssz::Encode; use tempfile::Builder as TempBuilder; - use types::{Eth1Data, GnosisEthSpec, MainnetEthSpec}; + use types::{Eth1Data, FixedBytesExtended, GnosisEthSpec, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index d32d7994689..f988dd86b1f 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v5.2.1-", - fallback = "Lighthouse/v5.2.1" + prefix = "Lighthouse/v5.3.0-", + fallback = "Lighthouse/v5.3.0" ); /// Returns the first eight characters of the latest commit hash for this build. diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 5fc473c8539..d3d91497ccb 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -289,10 +289,10 @@ pub fn test_logger() -> Logger { sloggers::terminal::TerminalLoggerBuilder::new() .level(sloggers::types::Severity::Debug) .build() - .expect("Should build test_logger") + .expect("Should build TerminalLoggerBuilder") } else { sloggers::null::NullLoggerBuilder .build() - .expect("Should build null_logger") + .expect("Should build NullLoggerBuilder") } } diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs index ec8f45d850e..46fbff7eacd 100644 --- a/common/task_executor/src/test_utils.rs +++ b/common/task_executor/src/test_utils.rs @@ -1,7 +1,6 @@ use crate::TaskExecutor; -use logging::test_logger; +pub use logging::test_logger; use slog::Logger; -use sloggers::{null::NullLoggerBuilder, Build}; use std::sync::Arc; use tokio::runtime; @@ -67,10 +66,3 @@ impl TestRuntime { self.task_executor.log = log; } } - -pub fn null_logger() -> Result { - let log_builder = NullLoggerBuilder; - log_builder - .build() - .map_err(|e| format!("Failed to start null logger: {:?}", e)) -} diff --git a/consensus/fixed_bytes/Cargo.toml b/consensus/fixed_bytes/Cargo.toml new file mode 100644 index 00000000000..e5201a04551 --- /dev/null +++ b/consensus/fixed_bytes/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "fixed_bytes" +version = "0.1.0" +authors = ["Eitan Seri-Levi "] +edition = { workspace = true } + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +alloy-primitives = { workspace = true } +safe_arith = { workspace = true } diff --git a/consensus/fixed_bytes/src/lib.rs b/consensus/fixed_bytes/src/lib.rs new file mode 100644 index 00000000000..efd3e593b93 --- /dev/null +++ b/consensus/fixed_bytes/src/lib.rs @@ -0,0 +1,160 @@ +use alloy_primitives::FixedBytes; +use safe_arith::SafeArith; + +pub type Hash64 = alloy_primitives::B64; +pub type Hash256 = alloy_primitives::B256; +pub type Uint256 = alloy_primitives::U256; +pub type Address = alloy_primitives::Address; + +pub trait UintExtended { + fn to_i64(self) -> i64; +} + +pub trait FixedBytesExtended { + fn from_low_u64_be(value: u64) -> Self; + fn from_low_u64_le(value: u64) -> Self; + fn to_low_u64_le(&self) -> u64; + fn zero() -> Self; +} + +impl FixedBytesExtended for FixedBytes { + fn from_low_u64_be(value: u64) -> Self { + let value_bytes = value.to_be_bytes(); + let mut buffer = [0x0; N]; + let bytes_to_copy = value_bytes.len().min(buffer.len()); + // Panic-free because bytes_to_copy <= buffer.len() + let start_index = buffer + .len() + .safe_sub(bytes_to_copy) + .expect("bytes_to_copy <= buffer.len()"); + // Panic-free because start_index <= buffer.len() + // and bytes_to_copy <= value_bytes.len() + buffer + .get_mut(start_index..) + .expect("start_index <= buffer.len()") + .copy_from_slice( + value_bytes + .get(..bytes_to_copy) + .expect("bytes_to_copy <= value_byte.len()"), + ); + Self::from(buffer) + } + + fn from_low_u64_le(value: u64) -> Self { + let value_bytes = value.to_le_bytes(); + let mut buffer = [0x0; N]; + let bytes_to_copy = value_bytes.len().min(buffer.len()); + // Panic-free because bytes_to_copy <= buffer.len(), + // and bytes_to_copy <= value_bytes.len() + buffer + .get_mut(..bytes_to_copy) + .expect("bytes_to_copy <= buffer.len()") + .copy_from_slice( + value_bytes + .get(..bytes_to_copy) + .expect("bytes_to_copy <= value_byte.len()"), + ); + Self::from(buffer) + } + + fn zero() -> Self { + Self::ZERO + } + + /// Trims FixedBytes to its first 8 bytes and converts to u64 + fn to_low_u64_le(&self) -> u64 { + let mut result = [0u8; 8]; + let bytes = self.as_slice(); + // Panic-free because result.len() == bytes[0..8].len() + result.copy_from_slice(&bytes[0..8]); + u64::from_le_bytes(result) + } +} + +impl FixedBytesExtended for alloy_primitives::Address { + fn from_low_u64_be(value: u64) -> Self { + FixedBytes::<20>::from_low_u64_be(value).into() + } + + fn from_low_u64_le(value: u64) -> Self { + FixedBytes::<20>::from_low_u64_le(value).into() + } + + fn zero() -> Self { + FixedBytes::<20>::zero().into() + } + + fn to_low_u64_le(&self) -> u64 { + FixedBytes::<20>::to_low_u64_le(self) + } +} + +impl UintExtended for Uint256 { + /// Trims the Uint256 to its first 8 bytes and converts to i64 + fn to_i64(self) -> i64 { + let mut result = [0u8; 8]; + let bytes = self.to_le_bytes::<32>(); + // Panic-free because result.len() == bytes[0..8].len() + result.copy_from_slice(&bytes[0..8]); + i64::from_le_bytes(result) + } +} + +#[cfg(test)] +mod test { + use super::*; + use alloy_primitives::bytes::Buf; + + #[test] + fn from_low_u64_be() { + let values = [0, 1, 0xff, 1 << 16, u64::MAX, u64::MAX - 1]; + for value in values { + assert_eq!( + (&Hash256::from_low_u64_be(value).as_slice()[24..]).get_u64(), + value + ); + } + } + + #[test] + fn from_low_u64_le() { + let values = [0, 1, 0xff, 1 << 16, u64::MAX, u64::MAX - 1]; + for value in values { + assert_eq!( + u64::from_le_bytes( + Hash256::from_low_u64_le(value).as_slice()[0..8] + .try_into() + .unwrap() + ), + value + ); + } + } + + #[test] + fn to_low_u64_le() { + let values = [0, 1, 0xff, 1 << 16, u64::MAX, u64::MAX - 1]; + for value in values { + assert_eq!(Hash256::from_low_u64_le(value).to_low_u64_le(), value); + } + } + + #[test] + fn to_i64_in_range() { + let values = [0, 1, 0xff, 1 << 16, i64::MAX, i64::MAX - 1]; + for value in values { + assert_eq!(Uint256::from(value).to_i64(), value); + } + } + + #[test] + fn to_i64_out_of_range() { + let values = [u128::MAX, 1 << 70, 1 << 80, i64::MAX as u128 + 1]; + for value in values { + assert_eq!( + Uint256::from(value).to_i64(), + i64::from_le_bytes(value.to_le_bytes()[0..8].try_into().unwrap()) + ); + } + } +} diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 7a06d7352b7..4a4f6e9086a 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -12,6 +12,7 @@ state_processing = { workspace = true } proto_array = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +lighthouse_metrics = { workspace = true } slog = { workspace = true } [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index c55219a6761..ca59a6adfb6 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,3 +1,4 @@ +use crate::metrics::{self, scrape_for_metrics}; use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, @@ -15,8 +16,8 @@ use std::time::Duration; use types::{ consts::bellatrix::INTERVALS_PER_SLOT, AbstractExecPayload, AttestationShufflingId, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, - Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestationRef, RelativeEpoch, - SignedBeaconBlock, Slot, + Epoch, EthSpec, ExecPayload, ExecutionBlockHash, FixedBytesExtended, Hash256, + IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -262,6 +263,11 @@ fn dequeue_attestations( .unwrap_or(queued_attestations.len()), ); + metrics::inc_counter_by( + &metrics::FORK_CHOICE_DEQUEUED_ATTESTATIONS, + queued_attestations.len() as u64, + ); + std::mem::replace(queued_attestations, remaining) } @@ -649,6 +655,8 @@ where payload_verification_status: PayloadVerificationStatus, spec: &ChainSpec, ) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_ON_BLOCK_TIMES); + // If this block has already been processed we do not need to reprocess it. // We check this immediately in case re-processing the block mutates some property of the // global fork choice store, e.g. the justified checkpoints or the proposer boost root. @@ -1040,6 +1048,8 @@ where attestation: IndexedAttestationRef, is_from_block: AttestationFromBlock, ) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_ON_ATTESTATION_TIMES); + self.update_time(system_time_current_slot)?; // Ignore any attestations to the zero hash. @@ -1087,6 +1097,8 @@ where /// /// We assume that the attester slashing provided to this function has already been verified. pub fn on_attester_slashing(&mut self, slashing: AttesterSlashingRef<'_, E>) { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_ON_ATTESTER_SLASHING_TIMES); + let attesting_indices_set = |att: IndexedAttestationRef<'_, E>| { att.attesting_indices_iter() .copied() @@ -1502,6 +1514,11 @@ where queued_attestations: self.queued_attestations().to_vec(), } } + + /// Update the global metrics `DEFAULT_REGISTRY` with info from the fork choice + pub fn scrape_for_metrics(&self) { + scrape_for_metrics(self); + } } /// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes. diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 5e8cfb1ee49..17f1dc38a6b 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -1,5 +1,6 @@ mod fork_choice; mod fork_choice_store; +mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, diff --git a/consensus/fork_choice/src/metrics.rs b/consensus/fork_choice/src/metrics.rs new file mode 100644 index 00000000000..eb0dbf435e3 --- /dev/null +++ b/consensus/fork_choice/src/metrics.rs @@ -0,0 +1,62 @@ +pub use lighthouse_metrics::*; +use std::sync::LazyLock; +use types::EthSpec; + +use crate::{ForkChoice, ForkChoiceStore}; + +pub static FORK_CHOICE_QUEUED_ATTESTATIONS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "fork_choice_queued_attestations", + "Current count of queued attestations", + ) +}); +pub static FORK_CHOICE_NODES: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("fork_choice_nodes", "Current count of proto array nodes") +}); +pub static FORK_CHOICE_INDICES: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "fork_choice_indices", + "Current count of proto array indices", + ) +}); +pub static FORK_CHOICE_DEQUEUED_ATTESTATIONS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "fork_choice_dequeued_attestations_total", + "Total count of dequeued attestations", + ) +}); +pub static FORK_CHOICE_ON_BLOCK_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_fork_choice_process_block_seconds", + "The duration in seconds of on_block runs", + ) +}); +pub static FORK_CHOICE_ON_ATTESTATION_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_fork_choice_process_attestation_seconds", + "The duration in seconds of on_attestation runs", + ) +}); +pub static FORK_CHOICE_ON_ATTESTER_SLASHING_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_fork_choice_on_attester_slashing_seconds", + "The duration in seconds on on_attester_slashing runs", + ) + }); + +/// Update the global metrics `DEFAULT_REGISTRY` with info from the fork choice. +pub fn scrape_for_metrics, E: EthSpec>(fork_choice: &ForkChoice) { + set_gauge( + &FORK_CHOICE_QUEUED_ATTESTATIONS, + fork_choice.queued_attestations().len() as i64, + ); + set_gauge( + &FORK_CHOICE_NODES, + fork_choice.proto_array().core_proto_array().nodes.len() as i64, + ); + set_gauge( + &FORK_CHOICE_INDICES, + fork_choice.proto_array().core_proto_array().indices.len() as i64, + ); +} diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index d2935dbca45..b1ef833be0f 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -16,8 +16,8 @@ use std::time::Duration; use store::MemoryStore; use types::{ test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, - Epoch, EthSpec, ForkName, Hash256, IndexedAttestation, MainnetEthSpec, RelativeEpoch, - SignedBeaconBlock, Slot, SubnetId, + Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, IndexedAttestation, MainnetEthSpec, + RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, }; pub type E = MainnetEthSpec; @@ -1344,7 +1344,7 @@ async fn progressive_balances_cache_attester_slashing() { // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip // the slot in this scenario rather than panic-ing. The same applies to // `progressive_balances_cache_proposer_slashing`. - .apply_blocks(1) + .apply_blocks(2) .await .add_previous_epoch_attester_slashing() .await diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 15f65dfe4f6..c2c6bf270a6 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -5,13 +5,14 @@ authors = ["Michael Sproul "] edition = { workspace = true } [dependencies] -ethereum-types = { workspace = true } +alloy-primitives = { workspace = true } ethereum_hashing = { workspace = true } safe_arith = { workspace = true } +fixed_bytes = { workspace = true } [dev-dependencies] quickcheck = { workspace = true } quickcheck_macros = { workspace = true } [features] -arbitrary = ["ethereum-types/arbitrary"] +arbitrary = ["alloy-primitives/arbitrary"] diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index b9457fffab8..b01f3f4429f 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -1,8 +1,10 @@ use ethereum_hashing::{hash, hash32_concat, ZERO_HASHES}; -use ethereum_types::H256; use safe_arith::ArithError; use std::sync::LazyLock; +type H256 = fixed_bytes::Hash256; +pub use fixed_bytes::FixedBytesExtended; + const MAX_TREE_DEPTH: usize = 32; const EMPTY_SLICE: &[H256] = &[]; @@ -86,8 +88,8 @@ impl MerkleTree { let left_subtree = MerkleTree::create(left_leaves, depth - 1); let right_subtree = MerkleTree::create(right_leaves, depth - 1); let hash = H256::from_slice(&hash32_concat( - left_subtree.hash().as_bytes(), - right_subtree.hash().as_bytes(), + left_subtree.hash().as_slice(), + right_subtree.hash().as_slice(), )); Node(hash, Box::new(left_subtree), Box::new(right_subtree)) @@ -143,9 +145,9 @@ impl MerkleTree { // All other possibilities are invalid MerkleTrees (_, _) => return Err(MerkleTreeError::Invalid), }; - hash.assign_from_slice(&hash32_concat( - left.hash().as_bytes(), - right.hash().as_bytes(), + hash.copy_from_slice(&hash32_concat( + left.hash().as_slice(), + right.hash().as_slice(), )); } Finalized(_) => return Err(MerkleTreeError::FinalizedNodePushed), @@ -274,8 +276,8 @@ impl MerkleTree { }; let hash = H256::from_slice(&hash32_concat( - left.hash().as_bytes(), - right.hash().as_bytes(), + left.hash().as_slice(), + right.hash().as_slice(), )); Ok(MerkleTree::Node(hash, Box::new(left), Box::new(right))) } @@ -369,15 +371,15 @@ pub fn verify_merkle_proof( pub fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 { assert_eq!(branch.len(), depth, "proof length should equal depth"); - let mut merkle_root = leaf.as_bytes().to_vec(); + let mut merkle_root = leaf.as_slice().to_vec(); for (i, leaf) in branch.iter().enumerate().take(depth) { let ith_bit = (index >> i) & 0x01; if ith_bit == 1 { - merkle_root = hash32_concat(leaf.as_bytes(), &merkle_root)[..].to_vec(); + merkle_root = hash32_concat(leaf.as_slice(), &merkle_root)[..].to_vec(); } else { let mut input = merkle_root; - input.extend_from_slice(leaf.as_bytes()); + input.extend_from_slice(leaf.as_slice()); merkle_root = hash(&input); } } @@ -433,7 +435,6 @@ mod tests { } let leaves_iter = int_leaves.into_iter().map(H256::from_low_u64_be); - let mut merkle_tree = MerkleTree::create(&[], depth); let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| { @@ -465,10 +466,10 @@ mod tests { let leaf_b10 = H256::from([0xCC; 32]); let leaf_b11 = H256::from([0xDD; 32]); - let node_b0x = H256::from_slice(&hash32_concat(leaf_b00.as_bytes(), leaf_b01.as_bytes())); - let node_b1x = H256::from_slice(&hash32_concat(leaf_b10.as_bytes(), leaf_b11.as_bytes())); + let node_b0x = H256::from_slice(&hash32_concat(leaf_b00.as_slice(), leaf_b01.as_slice())); + let node_b1x = H256::from_slice(&hash32_concat(leaf_b10.as_slice(), leaf_b11.as_slice())); - let root = H256::from_slice(&hash32_concat(node_b0x.as_bytes(), node_b1x.as_bytes())); + let root = H256::from_slice(&hash32_concat(node_b0x.as_slice(), node_b1x.as_slice())); let tree = MerkleTree::create(&[leaf_b00, leaf_b01, leaf_b10, leaf_b11], 2); assert_eq!(tree.hash(), root); @@ -482,10 +483,10 @@ mod tests { let leaf_b10 = H256::from([0xCC; 32]); let leaf_b11 = H256::from([0xDD; 32]); - let node_b0x = H256::from_slice(&hash32_concat(leaf_b00.as_bytes(), leaf_b01.as_bytes())); - let node_b1x = H256::from_slice(&hash32_concat(leaf_b10.as_bytes(), leaf_b11.as_bytes())); + let node_b0x = H256::from_slice(&hash32_concat(leaf_b00.as_slice(), leaf_b01.as_slice())); + let node_b1x = H256::from_slice(&hash32_concat(leaf_b10.as_slice(), leaf_b11.as_slice())); - let root = H256::from_slice(&hash32_concat(node_b0x.as_bytes(), node_b1x.as_bytes())); + let root = H256::from_slice(&hash32_concat(node_b0x.as_slice(), node_b1x.as_slice())); // Run some proofs assert!(verify_merkle_proof( diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 57648499753..d99ace05f95 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -8,8 +8,8 @@ use crate::{InvalidationOperation, JustifiedBalances}; use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use types::{ - AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, - MainnetEthSpec, Slot, + AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, FixedBytesExtended, + Hash256, MainnetEthSpec, Slot, }; pub use execution_status::*; diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index 27a7969e49b..de84fbdd128 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -1,3 +1,5 @@ +use types::FixedBytesExtended; + use super::*; pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 74f3a986c9d..38ea1411994 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -7,8 +7,8 @@ use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; use superstruct::superstruct; use types::{ - AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, - Slot, + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, + FixedBytesExtended, Hash256, Slot, }; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 606269aee08..88d46603117 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -15,8 +15,8 @@ use std::{ fmt, }; use types::{ - AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, - Slot, + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, + FixedBytesExtended, Hash256, Slot, }; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -993,7 +993,7 @@ fn compute_deltas( #[cfg(test)] mod test_compute_deltas { use super::*; - use types::MainnetEthSpec; + use types::{FixedBytesExtended, MainnetEthSpec}; /// Gives a hash that is not the zero hash (unless i is `usize::MAX)`. fn hash_from_index(i: usize) -> Hash256 { diff --git a/consensus/state_processing/src/all_caches.rs b/consensus/state_processing/src/all_caches.rs index b915091405b..e49eb395c40 100644 --- a/consensus/state_processing/src/all_caches.rs +++ b/consensus/state_processing/src/all_caches.rs @@ -1,6 +1,8 @@ use crate::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use crate::epoch_cache::initialize_epoch_cache; -use types::{BeaconState, ChainSpec, EpochCacheError, EthSpec, Hash256, RelativeEpoch}; +use types::{ + BeaconState, ChainSpec, EpochCacheError, EthSpec, FixedBytesExtended, Hash256, RelativeEpoch, +}; /// Mixin trait for the beacon state that provides operations on *all* caches. /// diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index 0e940fabe4f..5af5e639fd2 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -3,7 +3,9 @@ use crate::common::base::SqrtTotalActiveBalance; use crate::common::{altair, base}; use safe_arith::SafeArith; use types::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; -use types::{ActivationQueue, BeaconState, ChainSpec, EthSpec, ForkName, Hash256}; +use types::{ + ActivationQueue, BeaconState, ChainSpec, EthSpec, FixedBytesExtended, ForkName, Hash256, +}; /// Precursor to an `EpochCache`. pub struct PreEpochCache { diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 049599ea945..00697def5d2 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -7,6 +7,7 @@ use crate::upgrade::{ upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, }; use safe_arith::{ArithError, SafeArith}; +use std::sync::Arc; use tree_hash::TreeHash; use types::*; @@ -122,6 +123,16 @@ pub fn initialize_beacon_state_from_eth1( // Remove intermediate Deneb fork from `state.fork`. state.fork_mut().previous_version = spec.electra_fork_version; + // TODO(electra): think about this more and determine the best way to + // do this. The spec tests will expect that the sync committees are + // calculated using the electra value for MAX_EFFECTIVE_BALANCE when + // calling `initialize_beacon_state_from_eth1()`. But the sync committees + // are actually calcuated back in `upgrade_to_altair()`. We need to + // re-calculate the sync committees here now that the state is `Electra` + let sync_committee = Arc::new(state.get_next_sync_committee(spec)?); + *state.current_sync_committee_mut()? = sync_committee.clone(); + *state.next_sync_committee_mut()? = sync_committee; + // Override latest execution payload header. // See https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#testing if let Some(ExecutionPayloadHeader::Electra(header)) = execution_payload_header { diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 223d7a4b891..24cb51d7557 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -170,7 +170,6 @@ where self.include_exits(block)?; self.include_sync_aggregate(block)?; self.include_bls_to_execution_changes(block)?; - self.include_consolidations(block)?; Ok(()) } @@ -359,27 +358,6 @@ where Ok(()) } - /// Includes all signatures in `self.block.body.consolidations` for verification. - pub fn include_consolidations>( - &mut self, - block: &'a SignedBeaconBlock, - ) -> Result<()> { - if let Ok(consolidations) = block.message().body().consolidations() { - self.sets.sets.reserve(consolidations.len()); - for consolidation in consolidations { - let set = consolidation_signature_set( - self.state, - self.get_pubkey.clone(), - consolidation, - self.spec, - )?; - - self.sets.push(set); - } - } - Ok(()) - } - /// Verify all the signatures that have been included in `self`, returning `true` if and only if /// all the signatures are valid. /// diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index cebb10b6071..fdeec6f08c3 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -89,46 +89,6 @@ pub enum BlockProcessingError { found: Hash256, }, WithdrawalCredentialsInvalid, - TooManyPendingConsolidations { - consolidations: usize, - limit: usize, - }, - ConsolidationChurnLimitTooLow { - churn_limit: u64, - minimum: u64, - }, - MatchingSourceTargetConsolidation { - index: u64, - }, - InactiveConsolidationSource { - index: u64, - current_epoch: Epoch, - }, - InactiveConsolidationTarget { - index: u64, - current_epoch: Epoch, - }, - SourceValidatorExiting { - index: u64, - }, - TargetValidatorExiting { - index: u64, - }, - FutureConsolidationEpoch { - current_epoch: Epoch, - consolidation_epoch: Epoch, - }, - NoSourceExecutionWithdrawalCredential { - index: u64, - }, - NoTargetExecutionWithdrawalCredential { - index: u64, - }, - MismatchedWithdrawalCredentials { - source_address: Address, - target_address: Address, - }, - InavlidConsolidationSignature, PendingAttestationInElectra, } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 17607f7f337..74166f67130 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -4,7 +4,6 @@ use crate::common::{ slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; -use crate::signature_sets::consolidation_signature_set; use crate::VerifySignatures; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; use types::typenum::U33; @@ -40,15 +39,16 @@ pub fn process_operations>( } if state.fork_name_unchecked().electra_enabled() { - let requests = block_body.execution_payload()?.withdrawal_requests()?; - if let Some(requests) = requests { - process_execution_layer_withdrawal_requests(state, &requests, spec)?; + state.update_pubkey_cache()?; + if let Some(deposit_requests) = block_body.execution_payload()?.deposit_requests()? { + process_deposit_requests(state, &deposit_requests, spec)?; } - let receipts = block_body.execution_payload()?.deposit_requests()?; - if let Some(receipts) = receipts { - process_deposit_requests(state, &receipts, spec)?; + if let Some(withdrawal_requests) = block_body.execution_payload()?.withdrawal_requests()? { + process_withdrawal_requests(state, &withdrawal_requests, spec)?; + } + if let Some(consolidations) = block_body.execution_payload()?.consolidation_requests()? { + process_consolidation_requests(state, &consolidations, spec)?; } - process_consolidations(state, block_body.consolidations()?, verify_signatures, spec)?; } Ok(()) @@ -371,10 +371,11 @@ pub fn process_deposits( ) -> Result<(), BlockProcessingError> { // [Modified in Electra:EIP6110] // Disable former deposit mechanism once all prior deposits are processed - // - // If `deposit_requests_start_index` does not exist as a field on `state`, electra is disabled - // which means we always want to use the old check, so this field defaults to `u64::MAX`. - let eth1_deposit_index_limit = state.deposit_requests_start_index().unwrap_or(u64::MAX); + let deposit_requests_start_index = state.deposit_requests_start_index().unwrap_or(u64::MAX); + let eth1_deposit_index_limit = std::cmp::min( + deposit_requests_start_index, + state.eth1_data().deposit_count, + ); if state.eth1_deposit_index() < eth1_deposit_index_limit { let expected_deposit_len = std::cmp::min( @@ -528,9 +529,10 @@ pub fn apply_deposit( Ok(()) } -pub fn process_execution_layer_withdrawal_requests( +// Make sure to build the pubkey cache before calling this function +pub fn process_withdrawal_requests( state: &mut BeaconState, - requests: &[ExecutionLayerWithdrawalRequest], + requests: &[WithdrawalRequest], spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { for request in requests { @@ -545,13 +547,11 @@ pub fn process_execution_layer_withdrawal_requests( } // Verify pubkey exists - let index_opt = state.get_validator_index(&request.validator_pubkey)?; - let Some(index) = index_opt else { + let Some(index) = state.pubkey_cache().get(&request.validator_pubkey) else { continue; }; let validator = state.get_validator(index)?; - // Verify withdrawal credentials let has_correct_credential = validator.has_execution_withdrawal_credential(spec); let is_correct_source_address = validator @@ -627,19 +627,19 @@ pub fn process_execution_layer_withdrawal_requests( pub fn process_deposit_requests( state: &mut BeaconState, - receipts: &[DepositRequest], + deposit_requests: &[DepositRequest], spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - for receipt in receipts { + for request in deposit_requests { // Set deposit receipt start index if state.deposit_requests_start_index()? == spec.unset_deposit_requests_start_index { - *state.deposit_requests_start_index_mut()? = receipt.index + *state.deposit_requests_start_index_mut()? = request.index } let deposit_data = DepositData { - pubkey: receipt.pubkey, - withdrawal_credentials: receipt.withdrawal_credentials, - amount: receipt.amount, - signature: receipt.signature.clone().into(), + pubkey: request.pubkey, + withdrawal_credentials: request.withdrawal_credentials, + amount: request.amount, + signature: request.signature.clone().into(), }; apply_deposit(state, deposit_data, None, false, spec)? } @@ -647,149 +647,96 @@ pub fn process_deposit_requests( Ok(()) } -pub fn process_consolidations( +// Make sure to build the pubkey cache before calling this function +pub fn process_consolidation_requests( state: &mut BeaconState, - consolidations: &[SignedConsolidation], - verify_signatures: VerifySignatures, + consolidation_requests: &[ConsolidationRequest], spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - if consolidations.is_empty() { - return Ok(()); + for request in consolidation_requests { + process_consolidation_request(state, request, spec)?; } - // If the pending consolidations queue is full, no consolidations are allowed in the block - let pending_consolidations = state.pending_consolidations()?.len(); - let pending_consolidations_limit = E::pending_consolidations_limit(); - block_verify! { - pending_consolidations < pending_consolidations_limit, - BlockProcessingError::TooManyPendingConsolidations { - consolidations: pending_consolidations, - limit: pending_consolidations_limit - } - } + Ok(()) +} - // If there is too little available consolidation churn limit, no consolidations are allowed in the block - let churn_limit = state.get_consolidation_churn_limit(spec)?; - block_verify! { - churn_limit > spec.min_activation_balance, - BlockProcessingError::ConsolidationChurnLimitTooLow { - churn_limit, - minimum: spec.min_activation_balance - } +pub fn process_consolidation_request( + state: &mut BeaconState, + consolidation_request: &ConsolidationRequest, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + // If the pending consolidations queue is full, consolidation requests are ignored + if state.pending_consolidations()?.len() == E::PendingConsolidationsLimit::to_usize() { + return Ok(()); + } + // If there is too little available consolidation churn limit, consolidation requests are ignored + if state.get_consolidation_churn_limit(spec)? <= spec.min_activation_balance { + return Ok(()); } - for signed_consolidation in consolidations { - let consolidation = signed_consolidation.message.clone(); - - // Verify that source != target, so a consolidation cannot be used as an exit. - block_verify! { - consolidation.source_index != consolidation.target_index, - BlockProcessingError::MatchingSourceTargetConsolidation { - index: consolidation.source_index - } - } - - let source_validator = state.get_validator(consolidation.source_index as usize)?; - let target_validator = state.get_validator(consolidation.target_index as usize)?; - - // Verify the source and the target are active - let current_epoch = state.current_epoch(); - block_verify! { - source_validator.is_active_at(current_epoch), - BlockProcessingError::InactiveConsolidationSource{ - index: consolidation.source_index, - current_epoch - } - } - block_verify! { - target_validator.is_active_at(current_epoch), - BlockProcessingError::InactiveConsolidationTarget{ - index: consolidation.target_index, - current_epoch - } - } - - // Verify exits for source and target have not been initiated - block_verify! { - source_validator.exit_epoch == spec.far_future_epoch, - BlockProcessingError::SourceValidatorExiting{ - index: consolidation.source_index, - } - } - block_verify! { - target_validator.exit_epoch == spec.far_future_epoch, - BlockProcessingError::TargetValidatorExiting{ - index: consolidation.target_index, - } - } - - // Consolidations must specify an epoch when they become valid; they are not valid before then - block_verify! { - current_epoch >= consolidation.epoch, - BlockProcessingError::FutureConsolidationEpoch { - current_epoch, - consolidation_epoch: consolidation.epoch - } - } + let Some(source_index) = state + .pubkey_cache() + .get(&consolidation_request.source_pubkey) + else { + // source validator doesn't exist + return Ok(()); + }; + let Some(target_index) = state + .pubkey_cache() + .get(&consolidation_request.target_pubkey) + else { + // target validator doesn't exist + return Ok(()); + }; + // Verify that source != target, so a consolidation cannot be used as an exit. + if source_index == target_index { + return Ok(()); + } - // Verify the source and the target have Execution layer withdrawal credentials - block_verify! { - source_validator.has_execution_withdrawal_credential(spec), - BlockProcessingError::NoSourceExecutionWithdrawalCredential { - index: consolidation.source_index, - } - } - block_verify! { - target_validator.has_execution_withdrawal_credential(spec), - BlockProcessingError::NoTargetExecutionWithdrawalCredential { - index: consolidation.target_index, - } + let source_validator = state.get_validator(source_index)?; + // Verify the source withdrawal credentials + if let Some(withdrawal_address) = source_validator.get_execution_withdrawal_address(spec) { + if withdrawal_address != consolidation_request.source_address { + return Ok(()); } + } else { + // Source doen't have execution withdrawal credentials + return Ok(()); + } - // Verify the same withdrawal address - let source_address = source_validator - .get_execution_withdrawal_address(spec) - .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?; - let target_address = target_validator - .get_execution_withdrawal_address(spec) - .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?; - block_verify! { - source_address == target_address, - BlockProcessingError::MismatchedWithdrawalCredentials { - source_address, - target_address - } - } + let target_validator = state.get_validator(target_index)?; + // Verify the target has execution withdrawal credentials + if !target_validator.has_execution_withdrawal_credential(spec) { + return Ok(()); + } - if verify_signatures.is_true() { - let signature_set = consolidation_signature_set( - state, - |i| get_pubkey_from_state(state, i), - signed_consolidation, - spec, - )?; - block_verify! { - signature_set.verify(), - BlockProcessingError::InavlidConsolidationSignature - } - } - let exit_epoch = state.compute_consolidation_epoch_and_update_churn( - source_validator.effective_balance, - spec, - )?; - let source_validator = state.get_validator_mut(consolidation.source_index as usize)?; - // Initiate source validator exit and append pending consolidation - source_validator.exit_epoch = exit_epoch; - source_validator.withdrawable_epoch = source_validator - .exit_epoch - .safe_add(spec.min_validator_withdrawability_delay)?; - state - .pending_consolidations_mut()? - .push(PendingConsolidation { - source_index: consolidation.source_index, - target_index: consolidation.target_index, - })?; + // Verify the source and target are active + let current_epoch = state.current_epoch(); + if !source_validator.is_active_at(current_epoch) + || !target_validator.is_active_at(current_epoch) + { + return Ok(()); + } + // Verify exits for source and target have not been initiated + if source_validator.exit_epoch != spec.far_future_epoch + || target_validator.exit_epoch != spec.far_future_epoch + { + return Ok(()); } + // Initiate source validator exit and append pending consolidation + let source_exit_epoch = state + .compute_consolidation_epoch_and_update_churn(source_validator.effective_balance, spec)?; + let source_validator = state.get_validator_mut(source_index)?; + source_validator.exit_epoch = source_exit_epoch; + source_validator.withdrawable_epoch = + source_exit_epoch.safe_add(spec.min_validator_withdrawability_delay)?; + state + .pending_consolidations_mut()? + .push(PendingConsolidation { + source_index: source_index as u64, + target_index: target_index as u64, + })?; + Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 3c683766adb..2e00ee03418 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -11,8 +11,8 @@ use types::{ BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, IndexedAttestationRef, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedBlsToExecutionChange, SignedConsolidation, SignedContributionAndProof, SignedRoot, - SignedVoluntaryExit, SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, + SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, }; pub type Result = std::result::Result; @@ -664,37 +664,3 @@ where message, ))) } - -/// Returns two signature sets, one for the source and one for the target validator -/// in the `SignedConsolidation`. -pub fn consolidation_signature_set<'a, E, F>( - state: &'a BeaconState, - get_pubkey: F, - consolidation: &'a SignedConsolidation, - spec: &'a ChainSpec, -) -> Result> -where - E: EthSpec, - F: Fn(usize) -> Option>, -{ - let source_index = consolidation.message.source_index as usize; - let target_index = consolidation.message.target_index as usize; - - let domain = spec.compute_domain( - Domain::Consolidation, - spec.genesis_fork_version, - state.genesis_validators_root(), - ); - - let message = consolidation.message.signing_root(domain); - let source_pubkey = - get_pubkey(source_index).ok_or(Error::ValidatorUnknown(source_index as u64))?; - let target_pubkey = - get_pubkey(target_index).ok_or(Error::ValidatorUnknown(target_index as u64))?; - - Ok(SignatureSet::multiple_pubkeys( - &consolidation.signature, - vec![source_pubkey, target_pubkey], - message, - )) -} diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index 1e8f25ed10b..24024fa8990 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -30,7 +30,7 @@ pub fn verify_bls_to_execution_change( verify!( validator .withdrawal_credentials - .as_bytes() + .as_slice() .first() .map(|byte| *byte == spec.bls_withdrawal_prefix_byte) .unwrap_or(false), @@ -41,7 +41,7 @@ pub fn verify_bls_to_execution_change( // future. let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized()); verify!( - validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..), + validator.withdrawal_credentials.as_slice().get(1..) == pubkey_hash.get(1..), Invalid::WithdrawalCredentialsMismatch ); diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 514cf639360..51f45b87e80 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -17,8 +17,8 @@ use types::{ }, milhouse::Cow, ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, - ExitCache, ForkName, List, ParticipationFlags, ProgressiveBalancesCache, RelativeEpoch, - Unsigned, Validator, + ExitCache, ForkName, List, ParticipationFlags, PendingBalanceDeposit, ProgressiveBalancesCache, + RelativeEpoch, Unsigned, Validator, }; pub struct SinglePassConfig { @@ -91,6 +91,8 @@ struct PendingBalanceDepositsContext { deposit_balance_to_consume: u64, /// Total balance increases for each validator due to pending balance deposits. validator_deposits_to_process: HashMap, + /// The deposits to append to `pending_balance_deposits` after processing all applicable deposits. + deposits_to_postpone: Vec, } struct EffectiveBalancesContext { @@ -342,12 +344,15 @@ pub fn process_epoch_single_pass( // of the `pending_balance_deposits` list. But we may as well preserve the write ordering used // by the spec and do this first. if let Some(ctxt) = pending_balance_deposits_ctxt { - let new_pending_balance_deposits = List::try_from_iter( + let mut new_pending_balance_deposits = List::try_from_iter( state .pending_balance_deposits()? .iter_from(ctxt.next_deposit_index)? .cloned(), )?; + for deposit in ctxt.deposits_to_postpone { + new_pending_balance_deposits.push(deposit)?; + } *state.pending_balance_deposits_mut()? = new_pending_balance_deposits; *state.deposit_balance_to_consume_mut()? = ctxt.deposit_balance_to_consume; } @@ -805,22 +810,61 @@ impl PendingBalanceDepositsContext { let available_for_processing = state .deposit_balance_to_consume()? .safe_add(state.get_activation_exit_churn_limit(spec)?)?; + let current_epoch = state.current_epoch(); + let next_epoch = state.next_epoch()?; let mut processed_amount = 0; let mut next_deposit_index = 0; let mut validator_deposits_to_process = HashMap::new(); + let mut deposits_to_postpone = vec![]; let pending_balance_deposits = state.pending_balance_deposits()?; for deposit in pending_balance_deposits.iter() { - if processed_amount.safe_add(deposit.amount)? > available_for_processing { - break; + // We have to do a bit of indexing into `validators` here, but I can't see any way + // around that without changing the spec. + // + // We need to work out if `validator.exit_epoch` will be set to a non-default value + // *after* changes applied by `process_registry_updates`, which in our implementation + // does not happen until after this (but in the spec happens before). However it's not + // hard to work out: we don't need to know exactly what value the `exit_epoch` will + // take, just whether it is non-default. Nor do we need to know the value of + // `withdrawable_epoch`, because `next_epoch <= withdrawable_epoch` will evaluate to + // `true` both for the actual value & the default placeholder value (`FAR_FUTURE_EPOCH`). + let validator = state.get_validator(deposit.index as usize)?; + let already_exited = validator.exit_epoch < spec.far_future_epoch; + // In the spec process_registry_updates is called before process_pending_balance_deposits + // so we must account for process_registry_updates ejecting the validator for low balance + // and setting the exit_epoch to < far_future_epoch. Note that in the spec the effective + // balance update does not happen until *after* the registry update, so we don't need to + // account for changes to the effective balance that would push it below the ejection + // balance here. + let will_be_exited = validator.is_active_at(current_epoch) + && validator.effective_balance <= spec.ejection_balance; + if already_exited || will_be_exited { + if next_epoch <= validator.withdrawable_epoch { + deposits_to_postpone.push(deposit.clone()); + } else { + // Deposited balance will never become active. Increase balance but do not + // consume churn. + validator_deposits_to_process + .entry(deposit.index as usize) + .or_insert(0) + .safe_add_assign(deposit.amount)?; + } + } else { + // Deposit does not fit in the churn, no more deposit processing in this epoch. + if processed_amount.safe_add(deposit.amount)? > available_for_processing { + break; + } + // Deposit fits in the churn, process it. Increase balance and consume churn. + validator_deposits_to_process + .entry(deposit.index as usize) + .or_insert(0) + .safe_add_assign(deposit.amount)?; + processed_amount.safe_add_assign(deposit.amount)?; } - validator_deposits_to_process - .entry(deposit.index as usize) - .or_insert(0) - .safe_add_assign(deposit.amount)?; - processed_amount.safe_add_assign(deposit.amount)?; + // Regardless of how the deposit was handled, we move on in the queue. next_deposit_index.safe_add_assign(1)?; } @@ -834,6 +878,7 @@ impl PendingBalanceDepositsContext { next_deposit_index, deposit_balance_to_consume, validator_deposits_to_process, + deposits_to_postpone, }) } } @@ -865,7 +910,7 @@ fn process_pending_consolidations( spec: &ChainSpec, ) -> Result<(), Error> { let mut next_pending_consolidation: usize = 0; - let current_epoch = state.current_epoch(); + let next_epoch = state.next_epoch()?; let pending_consolidations = state.pending_consolidations()?.clone(); let mut affected_validators = BTreeSet::new(); @@ -878,7 +923,7 @@ fn process_pending_consolidations( next_pending_consolidation.safe_add_assign(1)?; continue; } - if source_validator.withdrawable_epoch > current_epoch { + if source_validator.withdrawable_epoch > next_epoch { break; } diff --git a/consensus/state_processing/src/per_epoch_processing/tests.rs b/consensus/state_processing/src/per_epoch_processing/tests.rs index 14bbfbc071d..8c240548b04 100644 --- a/consensus/state_processing/src/per_epoch_processing/tests.rs +++ b/consensus/state_processing/src/per_epoch_processing/tests.rs @@ -2,7 +2,7 @@ use crate::per_epoch_processing::process_epoch; use beacon_chain::test_utils::BeaconChainHarness; use beacon_chain::types::{EthSpec, MinimalEthSpec}; -use bls::Hash256; +use bls::{FixedBytesExtended, Hash256}; use env_logger::{Builder, Env}; use types::Slot; diff --git a/consensus/state_processing/src/state_advance.rs b/consensus/state_processing/src/state_advance.rs index 721907cac93..4d38e7797e6 100644 --- a/consensus/state_processing/src/state_advance.rs +++ b/consensus/state_processing/src/state_advance.rs @@ -5,7 +5,7 @@ //! duplication and protect against some easy-to-make mistakes when performing state advances. use crate::*; -use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot}; +use types::{BeaconState, ChainSpec, EthSpec, FixedBytesExtended, Hash256, Slot}; #[derive(Debug, PartialEq)] pub enum Error { diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index ea9b603c5bc..aff0225edd4 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -12,8 +12,10 @@ harness = false criterion = { workspace = true } [dependencies] +alloy-primitives = { workspace = true } ethereum_hashing = { workspace = true } -ethereum-types = { workspace = true } +fixed_bytes = { workspace = true } [features] -arbitrary = ["ethereum-types/arbitrary"] +arbitrary = ["alloy-primitives/arbitrary"] +getrandom = ["alloy-primitives/getrandom"] diff --git a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs index 5f25c517b0e..a7f25ea65f7 100644 --- a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs +++ b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs @@ -87,7 +87,7 @@ fn bytes_to_int64(slice: &[u8]) -> u64 { #[cfg(test)] mod tests { use super::*; - use ethereum_types::H256 as Hash256; + use alloy_primitives::B256 as Hash256; #[test] #[ignore] diff --git a/consensus/swap_or_not_shuffle/src/lib.rs b/consensus/swap_or_not_shuffle/src/lib.rs index e9a131ab059..c6af6b77516 100644 --- a/consensus/swap_or_not_shuffle/src/lib.rs +++ b/consensus/swap_or_not_shuffle/src/lib.rs @@ -20,4 +20,4 @@ mod shuffle_list; pub use compute_shuffled_index::compute_shuffled_index; pub use shuffle_list::shuffle_list; -type Hash256 = ethereum_types::H256; +type Hash256 = fixed_bytes::Hash256; diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index c6c89de5704..c1559a407cf 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -9,14 +9,13 @@ name = "benches" harness = false [dependencies] -alloy-primitives = { workspace = true, features = ["rlp"] } +alloy-primitives = { workspace = true, features = ["rlp", "getrandom"] } merkle_proof = { workspace = true } bls = { workspace = true, features = ["arbitrary"] } kzg = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } -ethereum-types = { workspace = true, features = ["arbitrary"] } ethereum_hashing = { workspace = true } hex = { workspace = true } int_to_bytes = { workspace = true } @@ -31,7 +30,7 @@ ethereum_ssz_derive = { workspace = true } ssz_types = { workspace = true, features = ["arbitrary"] } swap_or_not_shuffle = { workspace = true, features = ["arbitrary"] } test_random_derive = { path = "../../common/test_random_derive" } -tree_hash = { workspace = true, features = ["arbitrary"] } +tree_hash = { workspace = true } tree_hash_derive = { workspace = true } rand_xorshift = "0.3.0" serde_yaml = { workspace = true } @@ -53,6 +52,7 @@ maplit = { workspace = true } alloy-rlp = { version = "0.3.4", features = ["derive"] } milhouse = { workspace = true } rpds = { workspace = true } +fixed_bytes = { workspace = true } [dev-dependencies] criterion = { workspace = true } diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 56c48e6cb1c..effc6a21068 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -4,8 +4,8 @@ use rayon::prelude::*; use ssz::Encode; use std::sync::Arc; use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Epoch, Eth1Data, EthSpec, Hash256, - MainnetEthSpec, Validator, + test_utils::generate_deterministic_keypair, BeaconState, Epoch, Eth1Data, EthSpec, + FixedBytesExtended, Hash256, MainnetEthSpec, Validator, }; fn get_state(validator_count: usize) -> BeaconState { diff --git a/consensus/types/presets/gnosis/electra.yaml b/consensus/types/presets/gnosis/electra.yaml index 38f6960bac8..660ed9b64cf 100644 --- a/consensus/types/presets/gnosis/electra.yaml +++ b/consensus/types/presets/gnosis/electra.yaml @@ -30,7 +30,7 @@ MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 # `uint64(2**0)` (= 1) -MAX_CONSOLIDATIONS: 1 +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 # Execution # --------------------------------------------------------------- diff --git a/consensus/types/presets/mainnet/electra.yaml b/consensus/types/presets/mainnet/electra.yaml index 38f6960bac8..660ed9b64cf 100644 --- a/consensus/types/presets/mainnet/electra.yaml +++ b/consensus/types/presets/mainnet/electra.yaml @@ -30,7 +30,7 @@ MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 # `uint64(2**0)` (= 1) -MAX_CONSOLIDATIONS: 1 +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 # Execution # --------------------------------------------------------------- diff --git a/consensus/types/presets/minimal/electra.yaml b/consensus/types/presets/minimal/electra.yaml index cf726e004b1..ef1ce494d8e 100644 --- a/consensus/types/presets/minimal/electra.yaml +++ b/consensus/types/presets/minimal/electra.yaml @@ -30,7 +30,7 @@ MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 # `uint64(2**0)` (= 1) -MAX_CONSOLIDATIONS: 1 +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 # Execution # --------------------------------------------------------------- diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 7b53a98caa1..3801a2b5d2b 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -77,9 +77,9 @@ pub struct Attestation { #[superstruct(only(Electra), partial_getter(rename = "aggregation_bits_electra"))] pub aggregation_bits: BitList, pub data: AttestationData, + pub signature: AggregateSignature, #[superstruct(only(Electra))] pub committee_bits: BitVector, - pub signature: AggregateSignature, } impl Hash for Attestation { @@ -412,13 +412,7 @@ impl AttestationBase { pub fn extend_aggregation_bits( &self, ) -> Result, ssz_types::Error> { - let mut extended_aggregation_bits: BitList = - BitList::with_capacity(self.aggregation_bits.len())?; - - for (i, bit) in self.aggregation_bits.iter().enumerate() { - extended_aggregation_bits.set(i, bit)?; - } - Ok(extended_aggregation_bits) + self.aggregation_bits.resize::() } } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index f67a965955c..4a6816c024d 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -507,7 +507,7 @@ impl> BeaconBlockCapella message: BlsToExecutionChange { validator_index: 0, from_bls_pubkey: PublicKeyBytes::empty(), - to_execution_address: Address::zero(), + to_execution_address: Address::ZERO, }, signature: Signature::empty() }; @@ -637,7 +637,7 @@ impl> BeaconBlockElectra message: BlsToExecutionChange { validator_index: 0, from_bls_pubkey: PublicKeyBytes::empty(), - to_execution_address: Address::zero(), + to_execution_address: Address::ZERO, }, signature: Signature::empty() }; @@ -670,7 +670,6 @@ impl> BeaconBlockElectra graffiti: Graffiti::default(), execution_payload: Payload::Electra::default(), blob_kzg_commitments: VariableList::empty(), - consolidations: VariableList::empty(), }, } } @@ -701,7 +700,6 @@ impl> EmptyBlock for BeaconBlockElec execution_payload: Payload::Electra::default(), bls_to_execution_changes: VariableList::empty(), blob_kzg_commitments: VariableList::empty(), - consolidations: VariableList::empty(), }, } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 373e165e0bb..305ef105445 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -114,8 +114,6 @@ pub struct BeaconBlockBody = FullPay VariableList, #[superstruct(only(Deneb, Electra))] pub blob_kzg_commitments: KzgCommitments, - #[superstruct(only(Electra))] - pub consolidations: VariableList, #[superstruct(only(Base, Altair))] #[metastruct(exclude_from(fields))] #[ssz(skip_serializing, skip_deserializing)] @@ -664,7 +662,6 @@ impl From>> execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, - consolidations, } = body; ( @@ -683,7 +680,6 @@ impl From>> }, bls_to_execution_changes, blob_kzg_commitments: blob_kzg_commitments.clone(), - consolidations, }, Some(execution_payload), ) @@ -822,7 +818,6 @@ impl BeaconBlockBodyElectra> { execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, - consolidations, } = self; BeaconBlockBodyElectra { @@ -840,7 +835,6 @@ impl BeaconBlockBodyElectra> { }, bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), - consolidations: consolidations.clone(), } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 054e5dbe271..a08f6d720c7 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1,6 +1,7 @@ use self::committee_cache::get_active_validator_indices; use crate::historical_summary::HistoricalSummary; use crate::test_utils::TestRandom; +use crate::FixedBytesExtended; use crate::*; use compare_fields::CompareFields; use compare_fields_derive::CompareFields; @@ -486,11 +487,7 @@ where // Electra #[superstruct(only(Electra), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] - #[serde( - with = "serde_utils::quoted_u64", - //TODO(electra) remove alias when ef tests are updated - alias = "deposit_receipts_start_index" - )] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_requests_start_index: u64, #[superstruct(only(Electra), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] @@ -897,6 +894,8 @@ impl BeaconState { return Err(Error::InsufficientValidators); } + let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); + let mut i = 0; loop { let shuffled_index = compute_shuffled_index( @@ -912,9 +911,7 @@ impl BeaconState { let random_byte = Self::shuffling_random_byte(i, seed)?; let effective_balance = self.get_effective_balance(candidate_index)?; if effective_balance.safe_mul(MAX_RANDOM_BYTE)? - >= spec - .max_effective_balance - .safe_mul(u64::from(random_byte))? + >= max_effective_balance.safe_mul(u64::from(random_byte))? { return Ok(candidate_index); } @@ -1042,7 +1039,7 @@ impl BeaconState { let epoch = slot.epoch(E::slots_per_epoch()); let mut preimage = self .get_seed(epoch, Domain::BeaconProposer, spec)? - .as_bytes() + .as_slice() .to_vec(); preimage.append(&mut int_to_bytes8(slot.as_u64())); Ok(hash(&preimage)) @@ -1095,6 +1092,7 @@ impl BeaconState { let active_validator_count = active_validator_indices.len(); let seed = self.get_seed(epoch, Domain::SyncCommittee, spec)?; + let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); let mut i = 0; let mut sync_committee_indices = Vec::with_capacity(E::SyncCommitteeSize::to_usize()); @@ -1102,19 +1100,17 @@ impl BeaconState { let shuffled_index = compute_shuffled_index( i.safe_rem(active_validator_count)?, active_validator_count, - seed.as_bytes(), + seed.as_slice(), spec.shuffle_round_count, ) .ok_or(Error::UnableToShuffle)?; let candidate_index = *active_validator_indices .get(shuffled_index) .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; - let random_byte = Self::shuffling_random_byte(i, seed.as_bytes())?; + let random_byte = Self::shuffling_random_byte(i, seed.as_slice())?; let effective_balance = self.get_validator(candidate_index)?.effective_balance; if effective_balance.safe_mul(MAX_RANDOM_BYTE)? - >= spec - .max_effective_balance - .safe_mul(u64::from(random_byte))? + >= max_effective_balance.safe_mul(u64::from(random_byte))? { sync_committee_indices.push(candidate_index); } @@ -1533,7 +1529,7 @@ impl BeaconState { let mut preimage = [0; NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES + NUM_MIX_BYTES]; preimage[0..NUM_DOMAIN_BYTES].copy_from_slice(&domain_bytes); preimage[NUM_DOMAIN_BYTES..MIX_OFFSET].copy_from_slice(&epoch_bytes); - preimage[MIX_OFFSET..].copy_from_slice(mix.as_bytes()); + preimage[MIX_OFFSET..].copy_from_slice(mix.as_slice()); Ok(Hash256::from_slice(&hash(&preimage))) } @@ -2219,8 +2215,9 @@ impl BeaconState { .get_mut(validator_index) .ok_or(Error::UnknownValidator(validator_index))?; if validator.has_eth1_withdrawal_credential(spec) { - validator.withdrawal_credentials.as_fixed_bytes_mut()[0] = + AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = spec.compounding_withdrawal_prefix_byte; + self.queue_excess_active_balance(validator_index, spec)?; } Ok(()) diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 7d67e96bbc8..3ad3ccf5617 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -3,8 +3,8 @@ use crate::test_utils::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::types::{ test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, - ChainSpec, Domain, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, - RelativeEpoch, Slot, Vector, + ChainSpec, Domain, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, + MinimalEthSpec, RelativeEpoch, Slot, Vector, }; use ssz::Encode; use std::ops::Mul; diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 6b32523c35f..0f7dbb2673c 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,9 +1,10 @@ use crate::test_utils::TestRandom; +use crate::ForkName; use crate::{ beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, Epoch, EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, }; -use crate::{KzgProofs, SignedBeaconBlock}; +use crate::{ForkVersionDeserialize, KzgProofs, SignedBeaconBlock}; use bls::Signature; use derivative::Derivative; use kzg::{Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT}; @@ -273,3 +274,12 @@ pub type BlobSidecarList = VariableList>, :: pub type FixedBlobSidecarList = FixedVector>>, ::MaxBlobsPerBlock>; pub type BlobsList = VariableList, ::MaxBlobCommitmentsPerBlock>; + +impl ForkVersionDeserialize for BlobSidecarList { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + _: ForkName, + ) -> Result { + serde_json::from_value::>(value).map_err(serde::de::Error::custom) + } +} diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index e6426e125ff..07d71b360f9 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -23,6 +23,7 @@ pub struct BlsToExecutionChange { #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub from_bls_pubkey: PublicKeyBytes, + #[serde(with = "serde_utils::address_hex")] pub to_execution_address: Address, } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ca4df32d1e5..e31427121ec 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -426,6 +426,13 @@ impl ChainSpec { }) } + /// Returns true if `EIP7594_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. + pub fn is_peer_das_scheduled(&self) -> bool { + self.eip7594_fork_epoch.map_or(false, |eip7594_fork_epoch| { + eip7594_fork_epoch != self.far_future_epoch + }) + } + /// Returns a full `Fork` struct for a given epoch. pub fn fork_at_epoch(&self, epoch: Epoch) -> Fork { let current_fork_name = self.fork_name_at_epoch(epoch); @@ -537,7 +544,7 @@ impl ChainSpec { let mut result = [0; 4]; let root = Self::compute_fork_data_root(current_version, genesis_validators_root); result.copy_from_slice( - root.as_bytes() + root.as_slice() .get(0..4) .expect("root hash is at least 4 bytes"), ); @@ -557,7 +564,7 @@ impl ChainSpec { domain[0..4].copy_from_slice(&int_to_bytes4(domain_constant)); domain[4..].copy_from_slice( Self::compute_fork_data_root(fork_version, genesis_validators_root) - .as_bytes() + .as_slice() .get(..28) .expect("fork has is 32 bytes so first 28 bytes should exist"), ); @@ -747,7 +754,8 @@ impl ChainSpec { proportional_slashing_multiplier_bellatrix: 3, bellatrix_fork_version: [0x02, 0x00, 0x00, 0x00], bellatrix_fork_epoch: Some(Epoch::new(144896)), - terminal_total_difficulty: Uint256::from_dec_str("58750000000000000000000") + terminal_total_difficulty: "58750000000000000000000" + .parse() .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), @@ -800,8 +808,8 @@ impl ChainSpec { * DAS params */ eip7594_fork_epoch: None, - custody_requirement: 1, - data_column_sidecar_subnet_count: 32, + custody_requirement: 4, + data_column_sidecar_subnet_count: 128, number_of_columns: 128, /* @@ -893,7 +901,7 @@ impl ChainSpec { .expect("subtraction does not overflow") // Add 1 since the spec declares `2**256 - 2**10` and we use // `Uint256::MAX` which is `2*256- 1`. - .checked_add(Uint256::one()) + .checked_add(Uint256::from(2u64.pow(0))) .expect("addition does not overflow"), // Capella capella_fork_version: [0x03, 0x00, 0x00, 0x01], @@ -1067,10 +1075,9 @@ impl ChainSpec { proportional_slashing_multiplier_bellatrix: 3, bellatrix_fork_version: [0x02, 0x00, 0x00, 0x64], bellatrix_fork_epoch: Some(Epoch::new(385536)), - terminal_total_difficulty: Uint256::from_dec_str( - "8626000000000000000000058750000000000000000000", - ) - .expect("terminal_total_difficulty is a valid integer"), + terminal_total_difficulty: "8626000000000000000000058750000000000000000000" + .parse() + .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, @@ -1122,8 +1129,8 @@ impl ChainSpec { * DAS params */ eip7594_fork_epoch: None, - custody_requirement: 1, - data_column_sidecar_subnet_count: 32, + custody_requirement: 4, + data_column_sidecar_subnet_count: 128, number_of_columns: 128, /* * Network specific @@ -1298,6 +1305,7 @@ pub struct Config { deposit_chain_id: u64, #[serde(with = "serde_utils::quoted_u64")] deposit_network_id: u64, + #[serde(with = "serde_utils::address_hex")] deposit_contract_address: Address, #[serde(default = "default_gossip_max_size")] @@ -1365,10 +1373,13 @@ pub struct Config { #[serde(with = "serde_utils::quoted_u64")] max_per_epoch_activation_exit_churn_limit: u64, + #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] custody_requirement: u64, + #[serde(default = "default_data_column_sidecar_subnet_count")] #[serde(with = "serde_utils::quoted_u64")] data_column_sidecar_subnet_count: u64, + #[serde(default = "default_number_of_columns")] #[serde(with = "serde_utils::quoted_u64")] number_of_columns: u64, } @@ -1397,7 +1408,7 @@ fn default_electra_fork_version() -> [u8; 4] { /// /// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 const fn default_terminal_total_difficulty() -> Uint256 { - ethereum_types::U256([ + Uint256::from_limbs([ 18446744073709550592, 18446744073709551615, 18446744073709551615, @@ -1509,6 +1520,18 @@ const fn default_maximum_gossip_clock_disparity_millis() -> u64 { 500 } +const fn default_custody_requirement() -> u64 { + 1 +} + +const fn default_data_column_sidecar_subnet_count() -> u64 { + 32 +} + +const fn default_number_of_columns() -> u64 { + 128 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::from_vec( @@ -1903,7 +1926,7 @@ mod tests { let domain2 = spec.compute_domain(domain_type, version, genesis_validators_root); assert_eq!(domain1, domain2); - assert_eq!(&domain1.as_bytes()[0..4], &int_to_bytes4(raw_domain)[..]); + assert_eq!(&domain1.as_slice()[0..4], &int_to_bytes4(raw_domain)[..]); } } @@ -2100,7 +2123,7 @@ mod yaml_tests { DEPOSIT_NETWORK_ID: 1 DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa CUSTODY_REQUIREMENT: 1 - DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32 + DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 "#; @@ -2141,9 +2164,8 @@ mod yaml_tests { fn test_total_terminal_difficulty() { assert_eq!( Ok(default_terminal_total_difficulty()), - Uint256::from_dec_str( - "115792089237316195423570985008687907853269984665640564039457584007913129638912" - ) + "115792089237316195423570985008687907853269984665640564039457584007913129638912" + .parse() ); } diff --git a/consensus/types/src/consolidation.rs b/consensus/types/src/consolidation.rs deleted file mode 100644 index 6cc4aa90f27..00000000000 --- a/consensus/types/src/consolidation.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::Epoch; -use crate::{test_utils::TestRandom, SignedRoot}; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use test_random_derive::TestRandom; -use tree_hash_derive::TreeHash; - -#[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Eq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, -)] -pub struct Consolidation { - #[serde(with = "serde_utils::quoted_u64")] - pub source_index: u64, - #[serde(with = "serde_utils::quoted_u64")] - pub target_index: u64, - pub epoch: Epoch, -} - -impl SignedRoot for Consolidation {} - -#[cfg(test)] -mod tests { - use super::*; - - ssz_and_tree_hash_tests!(Consolidation); -} diff --git a/consensus/types/src/signed_consolidation.rs b/consensus/types/src/consolidation_request.rs similarity index 53% rename from consensus/types/src/signed_consolidation.rs rename to consensus/types/src/consolidation_request.rs index f004ec23bd4..b21f34e7bba 100644 --- a/consensus/types/src/signed_consolidation.rs +++ b/consensus/types/src/consolidation_request.rs @@ -1,5 +1,4 @@ -use crate::test_utils::TestRandom; -use crate::{Consolidation, Signature}; +use crate::{test_utils::TestRandom, Address, PublicKeyBytes, SignedRoot}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -19,14 +18,17 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] -pub struct SignedConsolidation { - pub message: Consolidation, - pub signature: Signature, +pub struct ConsolidationRequest { + pub source_address: Address, + pub source_pubkey: PublicKeyBytes, + pub target_pubkey: PublicKeyBytes, } +impl SignedRoot for ConsolidationRequest {} + #[cfg(test)] mod tests { use super::*; - ssz_and_tree_hash_tests!(SignedConsolidation); + ssz_and_tree_hash_tests!(ConsolidationRequest); } diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index a0e3ca6cce3..90c05aea1f7 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,17 +1,12 @@ use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; use crate::test_utils::TestRandom; -use crate::{ - BeaconBlockHeader, ChainSpec, EthSpec, Hash256, KzgProofs, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, -}; -use crate::{BeaconStateError, BlobsList}; +use crate::BeaconStateError; +use crate::{BeaconBlockHeader, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; use bls::Signature; use derivative::Derivative; -use kzg::Kzg; -use kzg::{Blob as KzgBlob, Cell as KzgCell, Error as KzgError}; +use kzg::Error as KzgError; use kzg::{KzgCommitment, KzgProof}; use merkle_proof::verify_merkle_proof; -use rayon::prelude::*; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; @@ -60,7 +55,7 @@ pub struct DataColumnSidecar { pub index: ColumnIndex, #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] pub column: DataColumn, - /// All of the KZG commitments and proofs associated with the block, used for verifying sample cells. + /// All the KZG commitments and proofs associated with the block, used for verifying sample cells. pub kzg_commitments: KzgCommitments, pub kzg_proofs: KzgProofs, pub signed_block_header: SignedBeaconBlockHeader, @@ -98,197 +93,6 @@ impl DataColumnSidecar { ) } - pub fn build_sidecars( - blobs: &BlobsList, - block: &SignedBeaconBlock, - kzg: &Kzg, - spec: &ChainSpec, - ) -> Result, DataColumnSidecarError> { - let number_of_columns = spec.number_of_columns; - if blobs.is_empty() { - return Ok(vec![]); - } - let kzg_commitments = block - .message() - .body() - .blob_kzg_commitments() - .map_err(|_err| DataColumnSidecarError::PreDeneb)?; - let kzg_commitments_inclusion_proof = - block.message().body().kzg_commitments_merkle_proof()?; - let signed_block_header = block.signed_block_header(); - - let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - let mut column_kzg_proofs = - vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - - // NOTE: assumes blob sidecars are ordered by index - let blob_cells_and_proofs_vec = blobs - .into_par_iter() - .map(|blob| { - let blob = KzgBlob::from_bytes(blob).map_err(KzgError::from)?; - kzg.compute_cells_and_proofs(&blob) - }) - .collect::, KzgError>>()?; - - for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { - // we iterate over each column, and we construct the column from "top to bottom", - // pushing on the cell and the corresponding proof at each column index. we do this for - // each blob (i.e. the outer loop). - for col in 0..number_of_columns { - let cell = - blob_cells - .get(col) - .ok_or(DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing blob cell at index {col}" - )))?; - let cell: Vec = cell.into_inner().into_iter().collect(); - let cell = Cell::::from(cell); - - let proof = blob_cell_proofs.get(col).ok_or( - DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing blob cell KZG proof at index {col}" - )), - )?; - - let column = - columns - .get_mut(col) - .ok_or(DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing data column at index {col}" - )))?; - let column_proofs = column_kzg_proofs.get_mut(col).ok_or( - DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing data column proofs at index {col}" - )), - )?; - - column.push(cell); - column_proofs.push(*proof); - } - } - - let sidecars: Vec>> = columns - .into_iter() - .zip(column_kzg_proofs) - .enumerate() - .map(|(index, (col, proofs))| { - Arc::new(DataColumnSidecar { - index: index as u64, - column: DataColumn::::from(col), - kzg_commitments: kzg_commitments.clone(), - kzg_proofs: KzgProofs::::from(proofs), - signed_block_header: signed_block_header.clone(), - kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), - }) - }) - .collect(); - - Ok(sidecars) - } - - pub fn reconstruct( - kzg: &Kzg, - data_columns: &[Arc], - spec: &ChainSpec, - ) -> Result>, KzgError> { - let number_of_columns = spec.number_of_columns; - let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - let mut column_kzg_proofs = - vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - - let first_data_column = data_columns - .first() - .ok_or(KzgError::InconsistentArrayLength( - "data_columns should have at least one element".to_string(), - ))?; - let num_of_blobs = first_data_column.kzg_commitments.len(); - - let blob_cells_and_proofs_vec = (0..num_of_blobs) - .into_par_iter() - .map(|row_index| { - let mut cells: Vec = vec![]; - let mut cell_ids: Vec = vec![]; - for data_column in data_columns { - let cell = data_column.column.get(row_index).ok_or( - KzgError::InconsistentArrayLength(format!( - "Missing data column at index {row_index}" - )), - )?; - - cells.push(ssz_cell_to_crypto_cell::(cell)?); - cell_ids.push(data_column.index); - } - // recover_all_cells does not expect sorted - let all_cells = kzg.recover_all_cells(&cell_ids, &cells)?; - let blob = kzg.cells_to_blob(&all_cells)?; - - // Note: This function computes all cells and proofs. According to Justin this is okay, - // computing a partial set may be more expensive and requires code paths that don't exist. - // Computing the blobs cells is technically unnecessary but very cheap. It's done here again - // for simplicity. - kzg.compute_cells_and_proofs(&blob) - }) - .collect::, KzgError>>()?; - - for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { - // we iterate over each column, and we construct the column from "top to bottom", - // pushing on the cell and the corresponding proof at each column index. we do this for - // each blob (i.e. the outer loop). - for col in 0..number_of_columns { - let cell = blob_cells - .get(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing blob cell at index {col}" - )))?; - let cell: Vec = cell.into_inner().into_iter().collect(); - let cell = Cell::::from(cell); - - let proof = blob_cell_proofs - .get(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing blob cell KZG proof at index {col}" - )))?; - - let column = columns - .get_mut(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing data column at index {col}" - )))?; - let column_proofs = - column_kzg_proofs - .get_mut(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing data column proofs at index {col}" - )))?; - - column.push(cell); - column_proofs.push(*proof); - } - } - - // Clone sidecar elements from existing data column, no need to re-compute - let kzg_commitments = &first_data_column.kzg_commitments; - let signed_block_header = &first_data_column.signed_block_header; - let kzg_commitments_inclusion_proof = &first_data_column.kzg_commitments_inclusion_proof; - - let sidecars: Vec>> = columns - .into_iter() - .zip(column_kzg_proofs) - .enumerate() - .map(|(index, (col, proofs))| { - Arc::new(DataColumnSidecar { - index: index as u64, - column: DataColumn::::from(col), - kzg_commitments: kzg_commitments.clone(), - kzg_proofs: KzgProofs::::from(proofs), - signed_block_header: signed_block_header.clone(), - kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), - }) - }) - .collect(); - Ok(sidecars) - } - pub fn min_size() -> usize { // min size is one cell Self { @@ -360,7 +164,7 @@ pub enum DataColumnSidecarError { MissingBlobSidecars, PreDeneb, SszError(SszError), - InconsistentArrayLength(String), + BuildSidecarFailed(String), } impl From for DataColumnSidecarError { @@ -386,9 +190,3 @@ impl From for DataColumnSidecarError { Self::SszError(e) } } - -/// Converts a cell ssz List object to an array to be used with the kzg -/// crypto library. -fn ssz_cell_to_crypto_cell(cell: &Cell) -> Result { - KzgCell::from_bytes(cell.as_ref()).map_err(Into::into) -} diff --git a/consensus/types/src/data_column_subnet_id.rs b/consensus/types/src/data_column_subnet_id.rs index dd58c6c36b4..df964cf8de7 100644 --- a/consensus/types/src/data_column_subnet_id.rs +++ b/consensus/types/src/data_column_subnet_id.rs @@ -1,7 +1,7 @@ //! Identifies each data column subnet by an integer identifier. use crate::data_column_sidecar::ColumnIndex; use crate::{ChainSpec, EthSpec}; -use ethereum_types::U256; +use alloy_primitives::U256; use itertools::Itertools; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; @@ -38,7 +38,7 @@ impl DataColumnSubnetId { /// Compute required subnets to subscribe to given the node id. #[allow(clippy::arithmetic_side_effects)] pub fn compute_custody_subnets( - node_id: U256, + raw_node_id: [u8; 32], custody_subnet_count: u64, spec: &ChainSpec, ) -> impl Iterator { @@ -46,10 +46,10 @@ impl DataColumnSubnetId { // value, but here we assume it is valid. let mut subnets: HashSet = HashSet::new(); - let mut current_id = node_id; + let mut current_id = U256::from_be_slice(&raw_node_id); while (subnets.len() as u64) < custody_subnet_count { let mut node_id_bytes = [0u8; 32]; - current_id.to_little_endian(&mut node_id_bytes); + node_id_bytes.copy_from_slice(current_id.as_le_slice()); let hash = ethereum_hashing::hash_fixed(&node_id_bytes); let hash_prefix: [u8; 8] = hash[0..8] .try_into() @@ -62,19 +62,19 @@ impl DataColumnSubnetId { } if current_id == U256::MAX { - current_id = U256::zero() + current_id = U256::ZERO } - current_id += U256::one() + current_id += U256::from(1u64) } subnets.into_iter().map(DataColumnSubnetId::new) } pub fn compute_custody_columns( - node_id: U256, + raw_node_id: [u8; 32], custody_subnet_count: u64, spec: &ChainSpec, ) -> impl Iterator { - Self::compute_custody_subnets::(node_id, custody_subnet_count, spec) + Self::compute_custody_subnets::(raw_node_id, custody_subnet_count, spec) .flat_map(|subnet| subnet.columns::(spec)) .sorted() } @@ -134,6 +134,7 @@ mod test { use crate::data_column_subnet_id::DataColumnSubnetId; use crate::EthSpec; use crate::MainnetEthSpec; + use crate::Uint256; type E = MainnetEthSpec; @@ -153,7 +154,7 @@ mod test { "103822458477361691467064888613019442068586830412598673713899771287914656699997", ] .into_iter() - .map(|v| ethereum_types::U256::from_dec_str(v).unwrap()) + .map(|v| Uint256::from_str_radix(v, 10).unwrap().to_be_bytes::<32>()) .collect::>(); let custody_requirement = 4; diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index 1793be1c7c8..df1064daba0 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -40,7 +40,7 @@ impl Default for DepositTreeSnapshot { fn default() -> Self { let mut result = Self { finalized: vec![], - deposit_root: Hash256::default(), + deposit_root: Hash256::zero(), deposit_count: 0, execution_block_hash: Hash256::zero(), execution_block_height: 0, @@ -60,7 +60,7 @@ impl DepositTreeSnapshot { for height in 0..DEPOSIT_TREE_DEPTH { deposit_root = if (size & 1) == 1 { index = index.checked_sub(1)?; - hash32_concat(self.finalized.get(index)?.as_bytes(), &deposit_root) + hash32_concat(self.finalized.get(index)?.as_slice(), &deposit_root) } else { hash32_concat(&deposit_root, ZERO_HASHES.get(height)?) }; diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 15084cb14c4..09ef8e3c1a7 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -154,7 +154,7 @@ pub trait EthSpec: type PendingBalanceDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingPartialWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingConsolidationsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; - type MaxConsolidations: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxConsolidationRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxDepositRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxAttesterSlashingsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxAttestationsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -346,9 +346,9 @@ pub trait EthSpec: Self::PendingConsolidationsLimit::to_usize() } - /// Returns the `MAX_CONSOLIDATIONS` constant for this specification. - fn max_consolidations() -> usize { - Self::MaxConsolidations::to_usize() + /// Returns the `MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD` constant for this specification. + fn max_consolidation_requests_per_payload() -> usize { + Self::MaxConsolidationRequestsPerPayload::to_usize() } /// Returns the `MAX_DEPOSIT_REQUESTS_PER_PAYLOAD` constant for this specification. @@ -433,7 +433,7 @@ impl EthSpec for MainnetEthSpec { type PendingBalanceDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; - type MaxConsolidations = U1; + type MaxConsolidationRequestsPerPayload = U1; type MaxDepositRequestsPerPayload = U8192; type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; @@ -501,7 +501,7 @@ impl EthSpec for MinimalEthSpec { MaxBlobsPerBlock, BytesPerFieldElement, PendingBalanceDepositsLimit, - MaxConsolidations, + MaxConsolidationRequestsPerPayload, MaxAttesterSlashingsElectra, MaxAttestationsElectra }); @@ -560,7 +560,7 @@ impl EthSpec for GnosisEthSpec { type PendingBalanceDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; - type MaxConsolidations = U1; + type MaxConsolidationRequestsPerPayload = U1; type MaxDepositRequestsPerPayload = U8192; type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index b2401f0c0f1..677b3d3408d 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -1,4 +1,5 @@ use crate::test_utils::TestRandom; +use crate::FixedBytesExtended; use crate::Hash256; use derivative::Derivative; use rand::RngCore; @@ -20,7 +21,7 @@ use std::fmt; )] #[derivative(Debug = "transparent")] #[serde(transparent)] -pub struct ExecutionBlockHash(pub Hash256); +pub struct ExecutionBlockHash(#[serde(with = "serde_utils::b256_hex")] pub Hash256); impl ExecutionBlockHash { pub fn zero() -> Self { diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index 2e5a498214c..694162d6ffd 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -74,14 +74,14 @@ impl ExecutionBlockHeader { transactions_root: rlp_transactions_root, receipts_root: payload.receipts_root(), logs_bloom: payload.logs_bloom().clone().into(), - difficulty: Uint256::zero(), - number: payload.block_number().into(), - gas_limit: payload.gas_limit().into(), - gas_used: payload.gas_used().into(), + difficulty: Uint256::ZERO, + number: Uint256::saturating_from(payload.block_number()), + gas_limit: Uint256::saturating_from(payload.gas_limit()), + gas_used: Uint256::saturating_from(payload.gas_used()), timestamp: payload.timestamp(), extra_data: payload.extra_data().clone().into(), mix_hash: payload.prev_randao(), - nonce: Hash64::zero(), + nonce: Hash64::ZERO, base_fee_per_gas: payload.base_fee_per_gas(), withdrawals_root: rlp_withdrawals_root, blob_gas_used: rlp_blob_gas_used, @@ -101,15 +101,15 @@ pub struct EncodableExecutionBlockHeader<'a> { pub transactions_root: &'a [u8], pub receipts_root: &'a [u8], pub logs_bloom: &'a [u8], - pub difficulty: alloy_primitives::U256, - pub number: alloy_primitives::U256, - pub gas_limit: alloy_primitives::U256, - pub gas_used: alloy_primitives::U256, + pub difficulty: Uint256, + pub number: Uint256, + pub gas_limit: Uint256, + pub gas_used: Uint256, pub timestamp: u64, pub extra_data: &'a [u8], pub mix_hash: &'a [u8], pub nonce: &'a [u8], - pub base_fee_per_gas: alloy_primitives::U256, + pub base_fee_per_gas: Uint256, pub withdrawals_root: Option<&'a [u8]>, pub blob_gas_used: Option, pub excess_blob_gas: Option, @@ -119,45 +119,33 @@ pub struct EncodableExecutionBlockHeader<'a> { impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { fn from(header: &'a ExecutionBlockHeader) -> Self { let mut encodable = Self { - parent_hash: header.parent_hash.as_bytes(), - ommers_hash: header.ommers_hash.as_bytes(), - beneficiary: header.beneficiary.as_bytes(), - state_root: header.state_root.as_bytes(), - transactions_root: header.transactions_root.as_bytes(), - receipts_root: header.receipts_root.as_bytes(), + parent_hash: header.parent_hash.as_slice(), + ommers_hash: header.ommers_hash.as_slice(), + beneficiary: header.beneficiary.as_slice(), + state_root: header.state_root.as_slice(), + transactions_root: header.transactions_root.as_slice(), + receipts_root: header.receipts_root.as_slice(), logs_bloom: header.logs_bloom.as_slice(), - difficulty: U256Shim(header.difficulty).into(), - number: U256Shim(header.number).into(), - gas_limit: U256Shim(header.gas_limit).into(), - gas_used: U256Shim(header.gas_used).into(), + difficulty: header.difficulty, + number: header.number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, timestamp: header.timestamp, extra_data: header.extra_data.as_slice(), - mix_hash: header.mix_hash.as_bytes(), - nonce: header.nonce.as_bytes(), - base_fee_per_gas: U256Shim(header.base_fee_per_gas).into(), + mix_hash: header.mix_hash.as_slice(), + nonce: header.nonce.as_slice(), + base_fee_per_gas: header.base_fee_per_gas, withdrawals_root: None, blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: None, }; if let Some(withdrawals_root) = &header.withdrawals_root { - encodable.withdrawals_root = Some(withdrawals_root.as_bytes()); + encodable.withdrawals_root = Some(withdrawals_root.as_slice()); } if let Some(parent_beacon_block_root) = &header.parent_beacon_block_root { - encodable.parent_beacon_block_root = Some(parent_beacon_block_root.as_bytes()) + encodable.parent_beacon_block_root = Some(parent_beacon_block_root.as_slice()) } encodable } } - -// TODO(alloy) this shim can be removed once we fully migrate -// from ethereum types to alloy primitives -struct U256Shim(Uint256); - -impl From for alloy_primitives::U256 { - fn from(value: U256Shim) -> Self { - let mut buffer: [u8; 32] = [0; 32]; - value.0.to_little_endian(&mut buffer); - Self::from_le_slice(&buffer) - } -} diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 02300cc1927..4d41d568308 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -16,7 +16,9 @@ pub type Withdrawals = VariableList::MaxWithdrawal pub type DepositRequests = VariableList::MaxDepositRequestsPerPayload>; pub type WithdrawalRequests = - VariableList::MaxWithdrawalRequestsPerPayload>; + VariableList::MaxWithdrawalRequestsPerPayload>; +pub type ConsolidationRequests = + VariableList::MaxConsolidationRequestsPerPayload>; #[superstruct( variants(Bellatrix, Capella, Deneb, Electra), @@ -55,6 +57,7 @@ pub struct ExecutionPayload { #[superstruct(getter(copy))] pub parent_hash: ExecutionBlockHash, #[superstruct(getter(copy))] + #[serde(with = "serde_utils::address_hex")] pub fee_recipient: Address, #[superstruct(getter(copy))] pub state_root: Hash256, @@ -94,12 +97,12 @@ pub struct ExecutionPayload { #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, #[superstruct(only(Electra))] - //TODO(electra) remove alias once EF tests are updates with correct name - #[serde(alias = "deposit_receipts")] pub deposit_requests: VariableList, #[superstruct(only(Electra))] - pub withdrawal_requests: - VariableList, + pub withdrawal_requests: VariableList, + #[superstruct(only(Electra))] + pub consolidation_requests: + VariableList, } impl<'a, E: EthSpec> ExecutionPayloadRef<'a, E> { diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 149cc286ae9..90dd8c54e21 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -47,6 +47,7 @@ pub struct ExecutionPayloadHeader { #[superstruct(getter(copy))] pub parent_hash: ExecutionBlockHash, #[superstruct(getter(copy))] + #[serde(with = "serde_utils::address_hex")] pub fee_recipient: Address, #[superstruct(getter(copy))] pub state_root: Hash256, @@ -86,11 +87,11 @@ pub struct ExecutionPayloadHeader { #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, #[superstruct(only(Electra), partial_getter(copy))] - //TODO(electra) remove alias once EF tests are updates with correct name - #[serde(alias = "deposit_receipts_root")] pub deposit_requests_root: Hash256, #[superstruct(only(Electra), partial_getter(copy))] pub withdrawal_requests_root: Hash256, + #[superstruct(only(Electra), partial_getter(copy))] + pub consolidation_requests_root: Hash256, } impl ExecutionPayloadHeader { @@ -127,6 +128,15 @@ impl ExecutionPayloadHeader { } } } + + pub fn fork_name_unchecked(&self) -> ForkName { + match self { + ExecutionPayloadHeader::Bellatrix(_) => ForkName::Bellatrix, + ExecutionPayloadHeader::Capella(_) => ForkName::Capella, + ExecutionPayloadHeader::Deneb(_) => ForkName::Deneb, + ExecutionPayloadHeader::Electra(_) => ForkName::Electra, + } + } } impl<'a, E: EthSpec> ExecutionPayloadHeaderRef<'a, E> { @@ -206,6 +216,7 @@ impl ExecutionPayloadHeaderDeneb { excess_blob_gas: self.excess_blob_gas, deposit_requests_root: Hash256::zero(), withdrawal_requests_root: Hash256::zero(), + consolidation_requests_root: Hash256::zero(), } } } @@ -299,6 +310,7 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadElectra> for ExecutionPayloadHe excess_blob_gas: payload.excess_blob_gas, deposit_requests_root: payload.deposit_requests.tree_hash_root(), withdrawal_requests_root: payload.withdrawal_requests.tree_hash_root(), + consolidation_requests_root: payload.consolidation_requests.tree_hash_root(), } } } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 3d8f411cafb..08f8573c6d1 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -90,7 +90,11 @@ impl From for Graffiti { graffiti .get_mut(..graffiti_len) .expect("graffiti_len <= GRAFFITI_BYTES_LEN") - .copy_from_slice(graffiti_bytes); + .copy_from_slice( + graffiti_bytes + .get(..graffiti_len) + .expect("graffiti_len <= GRAFFITI_BYTES_LEN"), + ); graffiti.into() } } @@ -180,6 +184,6 @@ impl TreeHash for Graffiti { impl TestRandom for Graffiti { fn random_for_test(rng: &mut impl RngCore) -> Self { - Self::from(Hash256::random_for_test(rng).to_fixed_bytes()) + Self::from(Hash256::random_for_test(rng).0) } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 2afd7261102..afc64e86a82 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -27,7 +27,7 @@ pub mod bls_to_execution_change; pub mod builder_bid; pub mod chain_spec; pub mod checkpoint; -pub mod consolidation; +pub mod consolidation_request; pub mod consts; pub mod contribution_and_proof; pub mod deposit; @@ -39,7 +39,6 @@ pub mod enr_fork_id; pub mod eth1_data; pub mod eth_spec; pub mod execution_block_hash; -pub mod execution_layer_withdrawal_request; pub mod execution_payload; pub mod execution_payload_header; pub mod fork; @@ -67,7 +66,6 @@ pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; pub mod signed_bls_to_execution_change; -pub mod signed_consolidation; pub mod signed_contribution_and_proof; pub mod signed_voluntary_exit; pub mod signing_data; @@ -77,6 +75,7 @@ pub mod validator; pub mod validator_subscription; pub mod voluntary_exit; pub mod withdrawal_credentials; +pub mod withdrawal_request; #[macro_use] pub mod slot_epoch_macros; pub mod activation_queue; @@ -110,8 +109,6 @@ pub mod light_client_header; pub mod non_zero_usize; pub mod runtime_var_list; -use ethereum_types::{H160, H256}; - pub use crate::activation_queue::ActivationQueue; pub use crate::aggregate_and_proof::{ AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, @@ -139,14 +136,14 @@ pub use crate::beacon_block_body::{ pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{Error as BeaconStateError, *}; -pub use crate::blob_sidecar::{BlobSidecar, BlobSidecarList, BlobsList}; +pub use crate::blob_sidecar::{BlobIdentifier, BlobSidecar, BlobSidecarList, BlobsList}; pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ ConfigAndPreset, ConfigAndPresetCapella, ConfigAndPresetDeneb, ConfigAndPresetElectra, }; -pub use crate::consolidation::Consolidation; +pub use crate::consolidation_request::ConsolidationRequest; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::data_column_sidecar::{ ColumnIndex, DataColumnIdentifier, DataColumnSidecar, DataColumnSidecarList, @@ -163,7 +160,6 @@ pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; -pub use crate::execution_layer_withdrawal_request::ExecutionLayerWithdrawalRequest; pub use crate::execution_payload::{ ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadRef, Transaction, Transactions, Withdrawals, @@ -235,7 +231,6 @@ pub use crate::signed_beacon_block::{ }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; -pub use crate::signed_consolidation::SignedConsolidation; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_data::{SignedRoot, SigningData}; @@ -256,17 +251,19 @@ pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; pub use crate::withdrawal::Withdrawal; pub use crate::withdrawal_credentials::WithdrawalCredentials; +pub use crate::withdrawal_request::WithdrawalRequest; +pub use fixed_bytes::FixedBytesExtended; pub type CommitteeIndex = u64; -pub type Hash256 = H256; -pub type Uint256 = ethereum_types::U256; -pub type Address = H160; +pub type Hash256 = fixed_bytes::Hash256; +pub type Uint256 = fixed_bytes::Uint256; +pub type Address = fixed_bytes::Address; pub type ForkVersion = [u8; 4]; pub type BLSFieldElement = Uint256; pub type Blob = FixedVector::BytesPerBlob>; pub type KzgProofs = VariableList::MaxBlobCommitmentsPerBlock>; pub type VersionedHash = Hash256; -pub type Hash64 = ethereum_types::H64; +pub type Hash64 = alloy_primitives::B64; pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index e65b0572923..dc7561f5fcc 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -192,6 +192,19 @@ impl LightClientFinalityUpdate { // `2 *` because there are two headers in the update fixed_size + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } + + // Implements spec prioritization rules: + // > Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) + // + // ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_finality_update + pub fn is_latest(&self, attested_slot: Slot, signature_slot: Slot) -> bool { + let prev_slot = self.get_attested_header_slot(); + if attested_slot > prev_slot { + true + } else { + attested_slot == prev_slot && signature_slot > *self.signature_slot() + } + } } impl ForkVersionDeserialize for LightClientFinalityUpdate { diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 1feb748fae1..a1d5f85eac0 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -149,6 +149,15 @@ impl LightClientHeaderAltair { } } +impl Default for LightClientHeaderAltair { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + _phantom_data: PhantomData, + } + } +} + impl LightClientHeaderCapella { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, @@ -180,6 +189,17 @@ impl LightClientHeaderCapella { } } +impl Default for LightClientHeaderCapella { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderCapella::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl LightClientHeaderDeneb { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, @@ -211,6 +231,17 @@ impl LightClientHeaderDeneb { } } +impl Default for LightClientHeaderDeneb { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderDeneb::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl LightClientHeaderElectra { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, @@ -242,6 +273,17 @@ impl LightClientHeaderElectra { } } +impl Default for LightClientHeaderElectra { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderElectra::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl ForkVersionDeserialize for LightClientHeader { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index f5b749be706..3cae31edf80 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -178,6 +178,19 @@ impl LightClientOptimisticUpdate { }; fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } + + // Implements spec prioritization rules: + // > Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) + // + // ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_optimistic_update + pub fn is_latest(&self, attested_slot: Slot, signature_slot: Slot) -> bool { + let prev_slot = self.get_slot(); + if attested_slot > prev_slot { + true + } else { + attested_slot == prev_slot && signature_slot > *self.signature_slot() + } + } } impl ForkVersionDeserialize for LightClientOptimisticUpdate { diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 8a3eaff487f..3b48a68df31 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,12 +1,13 @@ use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; use crate::light_client_header::LightClientHeaderElectra; use crate::{ - beacon_state, test_utils::TestRandom, BeaconBlock, BeaconBlockHeader, BeaconState, ChainSpec, - ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, SignedBlindedBeaconBlock, + beacon_state, test_utils::TestRandom, ChainSpec, Epoch, ForkName, ForkVersionDeserialize, + LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + SignedBlindedBeaconBlock, }; use derivative::Derivative; use safe_arith::ArithError; +use safe_arith::SafeArith; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; use ssz::Decode; @@ -16,7 +17,6 @@ use ssz_types::typenum::{U4, U5, U6}; use std::sync::Arc; use superstruct::superstruct; use test_random_derive::TestRandom; -use tree_hash::TreeHash; use tree_hash_derive::TreeHash; pub const FINALIZED_ROOT_INDEX: usize = 105; @@ -35,6 +35,9 @@ pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; +type FinalityBranch = FixedVector; +type NextSyncCommitteeBranch = FixedVector; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), @@ -117,7 +120,7 @@ pub struct LightClientUpdate { /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, /// Merkle proof for next sync committee - pub next_sync_committee_branch: FixedVector, + pub next_sync_committee_branch: NextSyncCommitteeBranch, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -128,7 +131,7 @@ pub struct LightClientUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. - pub finality_branch: FixedVector, + pub finality_branch: FinalityBranch, /// current sync aggreggate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -152,45 +155,17 @@ impl ForkVersionDeserialize for LightClientUpdate { } impl LightClientUpdate { + #[allow(clippy::too_many_arguments)] pub fn new( - beacon_state: BeaconState, - block: BeaconBlock, - attested_state: &mut BeaconState, + sync_aggregate: &SyncAggregate, + block_slot: Slot, + next_sync_committee: Arc>, + next_sync_committee_branch: FixedVector, + finality_branch: FixedVector, attested_block: &SignedBlindedBeaconBlock, - finalized_block: &SignedBlindedBeaconBlock, + finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, ) -> Result { - let sync_aggregate = block.body().sync_aggregate()?; - if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { - return Err(Error::NotEnoughSyncCommitteeParticipants); - } - - let signature_period = block.epoch().sync_committee_period(chain_spec)?; - // Compute and validate attested header. - let mut attested_header = attested_state.latest_block_header().clone(); - attested_header.state_root = attested_state.update_tree_hash_cache()?; - let attested_period = attested_header - .slot - .epoch(E::slots_per_epoch()) - .sync_committee_period(chain_spec)?; - if attested_period != signature_period { - return Err(Error::MismatchingPeriods); - } - // Build finalized header from finalized block - let finalized_header = BeaconBlockHeader { - slot: finalized_block.slot(), - proposer_index: finalized_block.message().proposer_index(), - parent_root: finalized_block.parent_root(), - state_root: finalized_block.state_root(), - body_root: finalized_block.message().body_root(), - }; - if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { - return Err(Error::InvalidFinalizedBlock); - } - let next_sync_committee_branch = - attested_state.compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)?; - let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; - let light_client_update = match attested_block .fork_name(chain_spec) .map_err(|_| Error::InconsistentFork)? @@ -199,71 +174,91 @@ impl LightClientUpdate { ForkName::Altair | ForkName::Bellatrix => { let attested_header = LightClientHeaderAltair::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderAltair::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderAltair::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderAltair::default() + }; + Self::Altair(LightClientUpdateAltair { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } ForkName::Capella => { let attested_header = LightClientHeaderCapella::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderCapella::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderCapella::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderCapella::default() + }; + Self::Capella(LightClientUpdateCapella { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } ForkName::Deneb => { let attested_header = LightClientHeaderDeneb::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderDeneb::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderDeneb::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderDeneb::default() + }; + Self::Deneb(LightClientUpdateDeneb { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } ForkName::Electra => { let attested_header = LightClientHeaderElectra::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderElectra::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderElectra::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderElectra::default() + }; + Self::Electra(LightClientUpdateElectra { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } // To add a new fork, just append the new fork variant on the latest fork. Forks that - // have a distinct execution header will need a new LightClientUdpate variant only + // have a distinct execution header will need a new LightClientUpdate variant only // if you need to test or support lightclient usages }; Ok(light_client_update) } - pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + pub fn from_ssz_bytes(bytes: &[u8], fork_name: &ForkName) -> Result { let update = match fork_name { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientUpdateAltair::from_ssz_bytes(bytes)?) @@ -280,6 +275,142 @@ impl LightClientUpdate { Ok(update) } + + pub fn attested_header_slot(&self) -> Slot { + match self { + LightClientUpdate::Altair(update) => update.attested_header.beacon.slot, + LightClientUpdate::Capella(update) => update.attested_header.beacon.slot, + LightClientUpdate::Deneb(update) => update.attested_header.beacon.slot, + LightClientUpdate::Electra(update) => update.attested_header.beacon.slot, + } + } + + pub fn finalized_header_slot(&self) -> Slot { + match self { + LightClientUpdate::Altair(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Capella(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Deneb(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Electra(update) => update.finalized_header.beacon.slot, + } + } + + fn attested_header_sync_committee_period( + &self, + chain_spec: &ChainSpec, + ) -> Result { + compute_sync_committee_period_at_slot::(self.attested_header_slot(), chain_spec) + .map_err(Error::ArithError) + } + + fn signature_slot_sync_committee_period(&self, chain_spec: &ChainSpec) -> Result { + compute_sync_committee_period_at_slot::(*self.signature_slot(), chain_spec) + .map_err(Error::ArithError) + } + + pub fn is_sync_committee_update(&self, chain_spec: &ChainSpec) -> Result { + Ok(!self.is_next_sync_committee_branch_empty() + && (self.attested_header_sync_committee_period(chain_spec)? + == self.signature_slot_sync_committee_period(chain_spec)?)) + } + + pub fn has_sync_committee_finality(&self, chain_spec: &ChainSpec) -> Result { + Ok( + compute_sync_committee_period_at_slot::(self.finalized_header_slot(), chain_spec)? + == self.attested_header_sync_committee_period(chain_spec)?, + ) + } + + // Implements spec prioritization rules: + // Full nodes SHOULD provide the best derivable LightClientUpdate for each sync committee period + // ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_update + pub fn is_better_light_client_update( + &self, + new: &Self, + chain_spec: &ChainSpec, + ) -> Result { + // Compare super majority (> 2/3) sync committee participation + let max_active_participants = new.sync_aggregate().sync_committee_bits.len(); + + let new_active_participants = new.sync_aggregate().sync_committee_bits.num_set_bits(); + let prev_active_participants = self.sync_aggregate().sync_committee_bits.num_set_bits(); + + let new_has_super_majority = + new_active_participants.safe_mul(3)? >= max_active_participants.safe_mul(2)?; + let prev_has_super_majority = + prev_active_participants.safe_mul(3)? >= max_active_participants.safe_mul(2)?; + + if new_has_super_majority != prev_has_super_majority { + return Ok(new_has_super_majority); + } + + if !new_has_super_majority && new_active_participants != prev_active_participants { + return Ok(new_active_participants > prev_active_participants); + } + + // Compare presence of relevant sync committee + let new_has_relevant_sync_committee = new.is_sync_committee_update(chain_spec)?; + let prev_has_relevant_sync_committee = self.is_sync_committee_update(chain_spec)?; + if new_has_relevant_sync_committee != prev_has_relevant_sync_committee { + return Ok(new_has_relevant_sync_committee); + } + + // Compare indication of any finality + let new_has_finality = !new.is_finality_branch_empty(); + let prev_has_finality = !self.is_finality_branch_empty(); + if new_has_finality != prev_has_finality { + return Ok(new_has_finality); + } + + // Compare sync committee finality + if new_has_finality { + let new_has_sync_committee_finality = new.has_sync_committee_finality(chain_spec)?; + let prev_has_sync_committee_finality = self.has_sync_committee_finality(chain_spec)?; + if new_has_sync_committee_finality != prev_has_sync_committee_finality { + return Ok(new_has_sync_committee_finality); + } + } + + // Tiebreaker 1: Sync committee participation beyond super majority + if new_active_participants != prev_active_participants { + return Ok(new_active_participants > prev_active_participants); + } + + let new_attested_header_slot = new.attested_header_slot(); + let prev_attested_header_slot = self.attested_header_slot(); + + // Tiebreaker 2: Prefer older data (fewer changes to best) + if new_attested_header_slot != prev_attested_header_slot { + return Ok(new_attested_header_slot < prev_attested_header_slot); + } + + return Ok(new.signature_slot() < self.signature_slot()); + } + + fn is_next_sync_committee_branch_empty(&self) -> bool { + for index in self.next_sync_committee_branch().iter() { + if *index != Hash256::default() { + return false; + } + } + true + } + + pub fn is_finality_branch_empty(&self) -> bool { + for index in self.finality_branch().iter() { + if *index != Hash256::default() { + return false; + } + } + true + } +} + +fn compute_sync_committee_period_at_slot( + slot: Slot, + chain_spec: &ChainSpec, +) -> Result { + slot.epoch(E::slots_per_epoch()) + .safe_div(chain_spec.epochs_per_sync_committee_period) } #[cfg(test)] diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 362cb6d3864..cee8b8cc219 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -41,13 +41,16 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn blob_gas_used(&self) -> Result; fn withdrawal_requests( &self, - ) -> Result< - Option>, - Error, - >; + ) -> Result>, Error>; fn deposit_requests( &self, ) -> Result>, Error>; + fn consolidation_requests( + &self, + ) -> Result< + Option>, + Error, + >; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -289,10 +292,8 @@ impl ExecPayload for FullPayload { fn withdrawal_requests( &self, - ) -> Result< - Option>, - Error, - > { + ) -> Result>, Error> + { match self { FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { Err(Error::IncorrectStateVariant) @@ -316,6 +317,24 @@ impl ExecPayload for FullPayload { } } + fn consolidation_requests( + &self, + ) -> Result< + Option< + VariableList::MaxConsolidationRequestsPerPayload>, + >, + Error, + > { + match self { + FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { + Err(Error::IncorrectStateVariant) + } + FullPayload::Electra(inner) => { + Ok(Some(inner.execution_payload.consolidation_requests.clone())) + } + } + } + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -450,10 +469,8 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { fn withdrawal_requests( &self, - ) -> Result< - Option>, - Error, - > { + ) -> Result>, Error> + { match self { FullPayloadRef::Bellatrix(_) | FullPayloadRef::Capella(_) @@ -477,6 +494,24 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { } } + fn consolidation_requests( + &self, + ) -> Result< + Option< + VariableList::MaxConsolidationRequestsPerPayload>, + >, + Error, + > { + match self { + FullPayloadRef::Bellatrix(_) + | FullPayloadRef::Capella(_) + | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Electra(inner) => { + Ok(Some(inner.execution_payload.consolidation_requests.clone())) + } + } + } + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -659,10 +694,8 @@ impl ExecPayload for BlindedPayload { fn withdrawal_requests( &self, - ) -> Result< - Option>, - Error, - > { + ) -> Result>, Error> + { Ok(None) } @@ -672,6 +705,17 @@ impl ExecPayload for BlindedPayload { Ok(None) } + fn consolidation_requests( + &self, + ) -> Result< + Option< + VariableList::MaxConsolidationRequestsPerPayload>, + >, + Error, + > { + Ok(None) + } + fn is_default_with_zero_roots(&self) -> bool { self.to_ref().is_default_with_zero_roots() } @@ -775,10 +819,8 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { fn withdrawal_requests( &self, - ) -> Result< - Option>, - Error, - > { + ) -> Result>, Error> + { Ok(None) } @@ -788,6 +830,17 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { Ok(None) } + fn consolidation_requests( + &self, + ) -> Result< + Option< + VariableList::MaxConsolidationRequestsPerPayload>, + >, + Error, + > { + Ok(None) + } + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_blinded_payload_ref!(&'b _, self, move |payload, cons| { cons(payload); @@ -816,7 +869,8 @@ macro_rules! impl_exec_payload_common { $g:block, $h:block, $i:block, - $j:block) => { + $j:block, + $k:block) => { impl ExecPayload for $wrapper_type { fn block_type() -> BlockType { BlockType::$block_type_variant @@ -883,7 +937,7 @@ macro_rules! impl_exec_payload_common { fn withdrawal_requests( &self, ) -> Result< - Option>, + Option>, Error, > { let i = $i; @@ -896,6 +950,13 @@ macro_rules! impl_exec_payload_common { let j = $j; j(self) } + + fn consolidation_requests( + &self, + ) -> Result::MaxConsolidationRequestsPerPayload>>, Error> { + let k = $k; + k(self) + } } impl From<$wrapped_type> for $wrapper_type { @@ -943,6 +1004,7 @@ macro_rules! impl_exec_payload_for_fork { c }, { |_| { Ok(None) } }, + { |_| { Ok(None) } }, { |_| { Ok(None) } } ); @@ -1035,12 +1097,7 @@ macro_rules! impl_exec_payload_for_fork { let c: for<'a> fn( &'a $wrapper_type_full, ) -> Result< - Option< - VariableList< - ExecutionLayerWithdrawalRequest, - E::MaxWithdrawalRequestsPerPayload, - >, - >, + Option>, Error, > = |payload: &$wrapper_type_full| { let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); @@ -1059,6 +1116,23 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.deposit_requests() }; c + }, + { + let c: for<'a> fn( + &'a $wrapper_type_full, + ) -> Result< + Option< + VariableList< + ConsolidationRequest, + ::MaxConsolidationRequestsPerPayload, + >, + >, + Error, + > = |payload: &$wrapper_type_full| { + let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); + wrapper_ref_type.consolidation_requests() + }; + c } ); diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index b51928d165e..2c576ed332c 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -246,7 +246,7 @@ pub struct ElectraPreset { #[serde(with = "serde_utils::quoted_u64")] pub pending_consolidations_limit: u64, #[serde(with = "serde_utils::quoted_u64")] - pub max_consolidations: u64, + pub max_consolidation_requests_per_payload: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_deposit_requests_per_payload: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -269,7 +269,8 @@ impl ElectraPreset { pending_balance_deposits_limit: E::pending_balance_deposits_limit() as u64, pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, - max_consolidations: E::max_consolidations() as u64, + max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() + as u64, max_deposit_requests_per_payload: E::max_deposit_requests_per_payload() as u64, max_attester_slashings_electra: E::max_attester_slashings_electra() as u64, max_attestations_electra: E::max_attestations_electra() as u64, diff --git a/consensus/types/src/proposer_preparation_data.rs b/consensus/types/src/proposer_preparation_data.rs index 2828b0d4d55..477fb3b9d15 100644 --- a/consensus/types/src/proposer_preparation_data.rs +++ b/consensus/types/src/proposer_preparation_data.rs @@ -9,5 +9,6 @@ pub struct ProposerPreparationData { #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, /// The fee-recipient address. + #[serde(with = "serde_utils::address_hex")] pub fee_recipient: Address, } diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs index 84ad5d074e7..af4ee87c158 100644 --- a/consensus/types/src/runtime_var_list.rs +++ b/consensus/types/src/runtime_var_list.rs @@ -1,20 +1,58 @@ -use ssz::{Decode, Encode}; -use ssz_derive::Encode; +use derivative::Derivative; +use serde::{Deserialize, Serialize}; +use ssz::Decode; +use ssz_types::Error; +use std::ops::{Deref, DerefMut, Index, IndexMut}; +use std::slice::SliceIndex; -#[derive(Debug, Clone, PartialEq, Encode)] -#[ssz(struct_behaviour = "transparent")] -pub struct RuntimeVariableList { +/// Emulates a SSZ `List`. +/// +/// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than +/// `max_len` values. +/// +/// ## Example +/// +/// ``` +/// use ssz_types::{RuntimeVariableList}; +/// +/// let base: Vec = vec![1, 2, 3, 4]; +/// +/// // Create a `RuntimeVariableList` from a `Vec` that has the expected length. +/// let exact: RuntimeVariableList<_> = RuntimeVariableList::from_vec(base.clone(), 4); +/// assert_eq!(&exact[..], &[1, 2, 3, 4]); +/// +/// // Create a `RuntimeVariableList` from a `Vec` that is too long and the `Vec` is truncated. +/// let short: RuntimeVariableList<_> = RuntimeVariableList::from_vec(base.clone(), 3); +/// assert_eq!(&short[..], &[1, 2, 3]); +/// +/// // Create a `RuntimeVariableList` from a `Vec` that is shorter than the maximum. +/// let mut long: RuntimeVariableList<_> = RuntimeVariableList::from_vec(base, 5); +/// assert_eq!(&long[..], &[1, 2, 3, 4]); +/// +/// // Push a value to if it does not exceed the maximum +/// long.push(5).unwrap(); +/// assert_eq!(&long[..], &[1, 2, 3, 4, 5]); +/// +/// // Push a value to if it _does_ exceed the maximum. +/// assert!(long.push(6).is_err()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] +#[serde(transparent)] +pub struct RuntimeVariableList { vec: Vec, - #[ssz(skip_serializing, skip_deserializing)] + #[serde(skip)] max_len: usize, } -impl RuntimeVariableList { - pub fn new(vec: Vec, max_len: usize) -> Result { +impl RuntimeVariableList { + /// Returns `Ok` if the given `vec` equals the fixed length of `Self`. Otherwise returns + /// `Err(OutOfBounds { .. })`. + pub fn new(vec: Vec, max_len: usize) -> Result { if vec.len() <= max_len { Ok(Self { vec, max_len }) } else { - Err(ssz_types::Error::OutOfBounds { + Err(Error::OutOfBounds { i: vec.len(), len: max_len, }) @@ -27,22 +65,50 @@ impl RuntimeVariableList { Self { vec, max_len } } - pub fn to_vec(&self) -> Vec { - self.vec.clone() + /// Create an empty list. + pub fn empty(max_len: usize) -> Self { + Self { + vec: vec![], + max_len, + } } pub fn as_slice(&self) -> &[T] { self.vec.as_slice() } + /// Returns the number of values presently in `self`. pub fn len(&self) -> usize { self.vec.len() } + /// True if `self` does not contain any values. pub fn is_empty(&self) -> bool { - self.vec.is_empty() + self.len() == 0 + } + + /// Returns the type-level maximum length. + pub fn max_len(&self) -> usize { + self.max_len + } + + /// Appends `value` to the back of `self`. + /// + /// Returns `Err(())` when appending `value` would exceed the maximum length. + pub fn push(&mut self, value: T) -> Result<(), Error> { + if self.vec.len() < self.max_len { + self.vec.push(value); + Ok(()) + } else { + Err(Error::OutOfBounds { + i: self.vec.len().saturating_add(1), + len: self.max_len, + }) + } } +} +impl RuntimeVariableList { pub fn from_ssz_bytes(bytes: &[u8], max_len: usize) -> Result { let vec = if bytes.is_empty() { vec![] @@ -54,7 +120,7 @@ impl RuntimeVariableList { if num_items > max_len { return Err(ssz::DecodeError::BytesInvalid(format!( - "VariableList of {} items exceeds maximum of {}", + "RuntimeVariableList of {} items exceeds maximum of {}", num_items, max_len ))); } @@ -73,65 +139,162 @@ impl RuntimeVariableList { } } +impl From> for Vec { + fn from(list: RuntimeVariableList) -> Vec { + list.vec + } +} + +impl> Index for RuntimeVariableList { + type Output = I::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + Index::index(&self.vec, index) + } +} + +impl> IndexMut for RuntimeVariableList { + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + IndexMut::index_mut(&mut self.vec, index) + } +} + +impl Deref for RuntimeVariableList { + type Target = [T]; + + fn deref(&self) -> &[T] { + &self.vec[..] + } +} + +impl DerefMut for RuntimeVariableList { + fn deref_mut(&mut self) -> &mut [T] { + &mut self.vec[..] + } +} + +impl<'a, T> IntoIterator for &'a RuntimeVariableList { + type Item = &'a T; + type IntoIter = std::slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for RuntimeVariableList { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.into_iter() + } +} + +impl ssz::Encode for RuntimeVariableList +where + T: ssz::Encode, +{ + fn is_ssz_fixed_len() -> bool { + >::is_ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.vec.ssz_append(buf) + } + + fn ssz_fixed_len() -> usize { + >::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.vec.ssz_bytes_len() + } +} + #[cfg(test)] mod test { - use ssz_types::{typenum::U4, VariableList}; - use super::*; + use ssz::*; + use std::fmt::Debug; #[test] fn new() { let vec = vec![42; 5]; - let runtime_var_list: Result, _> = - RuntimeVariableList::new(vec, 4); - assert!(runtime_var_list.is_err()); + let fixed: Result, _> = RuntimeVariableList::new(vec, 4); + assert!(fixed.is_err()); let vec = vec![42; 3]; - let runtime_var_list: Result, _> = - RuntimeVariableList::new(vec, 4); - assert!(runtime_var_list.is_ok()); + let fixed: Result, _> = RuntimeVariableList::new(vec, 4); + assert!(fixed.is_ok()); let vec = vec![42; 4]; - let runtime_var_list: Result, _> = - RuntimeVariableList::new(vec, 4); - assert!(runtime_var_list.is_ok()); + let fixed: Result, _> = RuntimeVariableList::new(vec, 4); + assert!(fixed.is_ok()); + } + + #[test] + fn indexing() { + let vec = vec![1, 2]; + + let mut fixed: RuntimeVariableList = RuntimeVariableList::from_vec(vec.clone(), 8192); + + assert_eq!(fixed[0], 1); + assert_eq!(&fixed[0..1], &vec[0..1]); + assert_eq!(fixed[..].len(), 2); + + fixed[1] = 3; + assert_eq!(fixed[1], 3); } #[test] fn length() { + let vec = vec![42; 5]; + let fixed: RuntimeVariableList = RuntimeVariableList::from_vec(vec.clone(), 4); + assert_eq!(&fixed[..], &vec[0..4]); + let vec = vec![42; 3]; - let runtime_var_list: RuntimeVariableList = - RuntimeVariableList::new(vec.clone(), 4).unwrap(); - let var_list: VariableList = VariableList::from(vec.clone()); - assert_eq!(&runtime_var_list.as_slice()[0..3], &vec[..]); - assert_eq!(runtime_var_list.as_slice(), &vec![42, 42, 42][..]); - assert_eq!(runtime_var_list.len(), var_list.len()); + let fixed: RuntimeVariableList = RuntimeVariableList::from_vec(vec.clone(), 4); + assert_eq!(&fixed[0..3], &vec[..]); + assert_eq!(&fixed[..], &vec![42, 42, 42][..]); let vec = vec![]; - let runtime_var_list: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); - assert_eq!(runtime_var_list.as_slice(), &[] as &[u64]); - assert!(runtime_var_list.is_empty()); + let fixed: RuntimeVariableList = RuntimeVariableList::from_vec(vec, 4); + assert_eq!(&fixed[..], &[] as &[u64]); } #[test] - fn encode() { - let runtime_var_list: RuntimeVariableList = - RuntimeVariableList::new(vec![0; 2], 2).unwrap(); + fn deref() { + let vec = vec![0, 2, 4, 6]; + let fixed: RuntimeVariableList = RuntimeVariableList::from_vec(vec, 4); - assert_eq!(runtime_var_list.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); + assert_eq!(fixed.first(), Some(&0)); + assert_eq!(fixed.get(3), Some(&6)); + assert_eq!(fixed.get(4), None); } #[test] - fn round_trip() { - let item = RuntimeVariableList::::new(vec![42; 8], 8).unwrap(); - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(RuntimeVariableList::from_ssz_bytes(encoded, 8), Ok(item)); + fn encode() { + let vec: RuntimeVariableList = RuntimeVariableList::from_vec(vec![0; 2], 2); + assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); + assert_eq!( as Encode>::ssz_fixed_len(), 4); + } - let item = RuntimeVariableList::::new(vec![0; 8], 8).unwrap(); + fn round_trip(item: RuntimeVariableList) { + let max_len = item.max_len(); let encoded = &item.as_ssz_bytes(); assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(RuntimeVariableList::from_ssz_bytes(encoded, 8), Ok(item)); + assert_eq!( + RuntimeVariableList::from_ssz_bytes(encoded, max_len), + Ok(item) + ); + } + + #[test] + fn u16_len_8() { + round_trip::(RuntimeVariableList::from_vec(vec![42; 8], 8)); + round_trip::(RuntimeVariableList::from_vec(vec![0; 8], 8)); } } diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index a22df49ad7b..4d3279a7f77 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -498,7 +498,6 @@ impl SignedBeaconBlockElectra> { execution_payload: BlindedPayloadElectra { .. }, bls_to_execution_changes, blob_kzg_commitments, - consolidations, }, }, signature, @@ -522,7 +521,6 @@ impl SignedBeaconBlockElectra> { execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, - consolidations, }, }, signature, diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index b2a6b6a2a00..9bfe6fb261c 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,5 +1,6 @@ //! Identifies each shard by an integer identifier. use crate::{AttestationRef, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; +use alloy_primitives::{bytes::Buf, U256}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; @@ -77,7 +78,7 @@ impl SubnetId { /// along with the first epoch in which these subscriptions are no longer valid. #[allow(clippy::arithmetic_side_effects)] pub fn compute_subnets_for_epoch( - node_id: ethereum_types::U256, + raw_node_id: [u8; 32], epoch: Epoch, spec: &ChainSpec, ) -> Result<(impl Iterator, Epoch), &'static str> { @@ -85,10 +86,13 @@ impl SubnetId { let subscription_duration = spec.epochs_per_subnet_subscription; let prefix_bits = spec.attestation_subnet_prefix_bits as u64; let shuffling_prefix_bits = spec.attestation_subnet_shuffling_prefix_bits as u64; + let node_id = U256::from_be_slice(&raw_node_id); // calculate the prefixes used to compute the subnet and shuffling - let node_id_prefix = (node_id >> (256 - prefix_bits)).as_u64(); - let shuffling_prefix = (node_id >> (256 - (prefix_bits + shuffling_prefix_bits))).as_u64(); + let node_id_prefix = (node_id >> (256 - prefix_bits)).as_le_slice().get_u64_le(); + let shuffling_prefix = (node_id >> (256 - (prefix_bits + shuffling_prefix_bits))) + .as_le_slice() + .get_u64_le(); // number of groups the shuffling creates let shuffling_groups = 1 << shuffling_prefix_bits; @@ -170,6 +174,8 @@ impl AsRef for SubnetId { #[cfg(test)] mod tests { + use crate::Uint256; + use super::*; /// A set of tests compared to the python specification @@ -188,7 +194,7 @@ mod tests { "60930578857433095740782970114409273483106482059893286066493409689627770333527", "103822458477361691467064888613019442068586830412598673713899771287914656699997", ] - .map(|v| ethereum_types::U256::from_dec_str(v).unwrap()); + .map(|v| Uint256::from_str_radix(v, 10).unwrap().to_be_bytes::<32>()); let epochs = [ 54321u64, 1017090249, 1827566880, 846255942, 766597383, 1204990115, 1616209495, @@ -222,7 +228,7 @@ mod tests { for x in 0..node_ids.len() { println!("Test: {}", x); println!( - "NodeId: {}\n Epoch: {}\n, expected_update_time: {}\n, expected_subnets: {:?}", + "NodeId: {:?}\n Epoch: {}\n, expected_update_time: {}\n, expected_subnets: {:?}", node_ids[x], epochs[x], expected_valid_time[x], expected_subnets[x] ); diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs index 0b32c6981b6..4adb90b26e2 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_selection_proof.rs @@ -105,7 +105,7 @@ impl From for SyncSelectionProof { #[cfg(test)] mod test { use super::*; - use crate::MainnetEthSpec; + use crate::{FixedBytesExtended, MainnetEthSpec}; use eth2_interop_keypairs::keypair; #[test] diff --git a/consensus/types/src/test_utils/test_random/uint256.rs b/consensus/types/src/test_utils/test_random/uint256.rs index 5eccc0a9fa5..30077f0e0f6 100644 --- a/consensus/types/src/test_utils/test_random/uint256.rs +++ b/consensus/types/src/test_utils/test_random/uint256.rs @@ -4,6 +4,6 @@ impl TestRandom for Uint256 { fn random_for_test(rng: &mut impl RngCore) -> Self { let mut key_bytes = [0; 32]; rng.fill_bytes(&mut key_bytes); - Self::from_little_endian(&key_bytes[..]) + Self::from_le_slice(&key_bytes[..]) } } diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index b5e92d1f5d8..3c6037e23e3 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, ForkName, - Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, + FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -129,7 +129,7 @@ impl Validator { /// Returns `true` if the validator has eth1 withdrawal credential. pub fn has_eth1_withdrawal_credential(&self, spec: &ChainSpec) -> bool { self.withdrawal_credentials - .as_bytes() + .as_slice() .first() .map(|byte| *byte == spec.eth1_address_withdrawal_prefix_byte) .unwrap_or(false) @@ -145,7 +145,7 @@ impl Validator { self.has_execution_withdrawal_credential(spec) .then(|| { self.withdrawal_credentials - .as_bytes() + .as_slice() .get(12..) .map(Address::from_slice) }) @@ -158,7 +158,7 @@ impl Validator { pub fn change_withdrawal_credentials(&mut self, execution_address: &Address, spec: &ChainSpec) { let mut bytes = [0u8; 32]; bytes[0] = spec.eth1_address_withdrawal_prefix_byte; - bytes[12..].copy_from_slice(execution_address.as_bytes()); + bytes[12..].copy_from_slice(execution_address.as_slice()); self.withdrawal_credentials = Hash256::from(bytes); } @@ -219,6 +219,7 @@ impl Validator { } } + /// TODO(electra): refactor these functions and make it simpler.. this is a mess /// Returns `true` if the validator is partially withdrawable. fn is_partially_withdrawable_validator_capella(&self, balance: u64, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) @@ -282,7 +283,7 @@ impl Default for Validator { fn default() -> Self { Self { pubkey: PublicKeyBytes::empty(), - withdrawal_credentials: Hash256::default(), + withdrawal_credentials: Hash256::zero(), activation_eligibility_epoch: Epoch::from(u64::MAX), activation_epoch: Epoch::from(u64::MAX), exit_epoch: Epoch::from(u64::MAX), @@ -298,7 +299,7 @@ pub fn is_compounding_withdrawal_credential( spec: &ChainSpec, ) -> bool { withdrawal_credentials - .as_bytes() + .as_slice() .first() .map(|prefix_byte| *prefix_byte == spec.compounding_withdrawal_prefix_byte) .unwrap_or(false) diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs index 174014df8ec..cdafd355e7c 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator_registration_data.rs @@ -12,6 +12,7 @@ pub struct SignedValidatorRegistrationData { #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, TreeHash)] pub struct ValidatorRegistrationData { + #[serde(with = "serde_utils::address_hex")] pub fee_recipient: Address, #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 3e611565541..7f98ff1e60a 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -24,6 +24,7 @@ pub struct Withdrawal { pub index: u64, #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, + #[serde(with = "serde_utils::address_hex")] pub address: Address, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, diff --git a/consensus/types/src/withdrawal_credentials.rs b/consensus/types/src/withdrawal_credentials.rs index 8d42d4eafd4..52d51ed559c 100644 --- a/consensus/types/src/withdrawal_credentials.rs +++ b/consensus/types/src/withdrawal_credentials.rs @@ -13,7 +13,7 @@ impl WithdrawalCredentials { pub fn eth1(withdrawal_address: Address, spec: &ChainSpec) -> Self { let mut withdrawal_credentials = [0; 32]; withdrawal_credentials[0] = spec.eth1_address_withdrawal_prefix_byte; - withdrawal_credentials[12..].copy_from_slice(withdrawal_address.as_bytes()); + withdrawal_credentials[12..].copy_from_slice(withdrawal_address.as_slice()); Self(Hash256::from_slice(&withdrawal_credentials)) } } @@ -39,7 +39,7 @@ mod test { get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte); let hash: Hash256 = credentials.into(); assert_eq!(hash[0], spec.bls_withdrawal_prefix_byte); - assert_eq!(hash.as_bytes(), &manually_generated_credentials); + assert_eq!(hash.as_slice(), &manually_generated_credentials); } #[test] diff --git a/consensus/types/src/execution_layer_withdrawal_request.rs b/consensus/types/src/withdrawal_request.rs similarity index 82% rename from consensus/types/src/execution_layer_withdrawal_request.rs rename to consensus/types/src/withdrawal_request.rs index b1d814c2834..b6db0efb26d 100644 --- a/consensus/types/src/execution_layer_withdrawal_request.rs +++ b/consensus/types/src/withdrawal_request.rs @@ -19,7 +19,8 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] -pub struct ExecutionLayerWithdrawalRequest { +pub struct WithdrawalRequest { + #[serde(with = "serde_utils::address_hex")] pub source_address: Address, pub validator_pubkey: PublicKeyBytes, #[serde(with = "serde_utils::quoted_u64")] @@ -30,5 +31,5 @@ pub struct ExecutionLayerWithdrawalRequest { mod tests { use super::*; - ssz_and_tree_hash_tests!(ExecutionLayerWithdrawalRequest); + ssz_and_tree_hash_tests!(WithdrawalRequest); } diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index 7aa8e02dcab..b65b51230c3 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] +alloy-primitives = { workspace = true } ethereum_ssz = { workspace = true } tree_hash = { workspace = true } rand = { workspace = true } @@ -12,10 +13,11 @@ serde = { workspace = true } ethereum_serde_utils = { workspace = true } hex = { workspace = true } ethereum_hashing = { workspace = true } -ethereum-types = { workspace = true } arbitrary = { workspace = true } zeroize = { workspace = true } blst = { version = "0.3.3", optional = true } +safe_arith = { workspace = true } +fixed_bytes = { workspace = true } [features] arbitrary = [] diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index 54c7ad2944e..baa704e05a9 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -68,7 +68,7 @@ pub fn verify_signature_sets<'a>( } // Grab a slice of the message, to satisfy the blst API. - msgs_refs.push(set.message.as_bytes()); + msgs_refs.push(set.message.as_slice()); if let Some(point) = set.signature.point() { // Subgroup check the signature @@ -196,7 +196,7 @@ impl TSignature for blst_core::Signature { fn verify(&self, pubkey: &blst_core::PublicKey, msg: Hash256) -> bool { // Public keys have already been checked for subgroup and infinity // Check Signature inside function for subgroup - self.verify(true, msg.as_bytes(), DST, &[], pubkey, false) == BLST_ERROR::BLST_SUCCESS + self.verify(true, msg.as_slice(), DST, &[], pubkey, false) == BLST_ERROR::BLST_SUCCESS } } @@ -256,7 +256,7 @@ impl TAggregateSignature], ) -> bool { let pubkeys = pubkeys.iter().map(|pk| pk.point()).collect::>(); - let msgs = msgs.iter().map(|hash| hash.as_bytes()).collect::>(); + let msgs = msgs.iter().map(|hash| hash.as_slice()).collect::>(); let signature = self.0.clone().to_signature(); // Public keys have already been checked for subgroup and infinity // Check Signature inside function for subgroup @@ -287,7 +287,7 @@ impl TSecretKey for blst_core::Secre } fn sign(&self, msg: Hash256) -> blst_core::Signature { - self.sign(msg.as_bytes(), DST, &[]) + self.sign(msg.as_slice(), DST, &[]) } fn serialize(&self) -> ZeroizeHash { diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index af269b943d7..6ea85548c0d 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -44,7 +44,8 @@ pub use zeroize_hash::ZeroizeHash; #[cfg(feature = "supranational")] use blst::BLST_ERROR as BlstError; -pub type Hash256 = ethereum_types::H256; +pub type Hash256 = fixed_bytes::Hash256; +pub use fixed_bytes::FixedBytesExtended; #[derive(Clone, Debug, PartialEq)] pub enum Error { diff --git a/crypto/bls/tests/tests.rs b/crypto/bls/tests/tests.rs index dac2e97f407..26215771b5f 100644 --- a/crypto/bls/tests/tests.rs +++ b/crypto/bls/tests/tests.rs @@ -1,4 +1,4 @@ -use bls::{Hash256, INFINITY_SIGNATURE, SECRET_KEY_BYTES_LEN}; +use bls::{FixedBytesExtended, Hash256, INFINITY_SIGNATURE, SECRET_KEY_BYTES_LEN}; use ssz::{Decode, Encode}; use std::borrow::Cow; use std::fmt::Debug; diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index d26dfe4992a..e940fe2e20c 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -17,3 +17,13 @@ ethereum_serde_utils = { workspace = true } hex = { workspace = true } ethereum_hashing = { workspace = true } c-kzg = { workspace = true } +rust_eth_kzg = { workspace = true } + +[dev-dependencies] +criterion = { workspace = true } +serde_json = { workspace = true } +eth2_network_config = { workspace = true } + +[[bench]] +name = "benchmark" +harness = false diff --git a/crypto/kzg/benches/benchmark.rs b/crypto/kzg/benches/benchmark.rs new file mode 100644 index 00000000000..35e370cd0fd --- /dev/null +++ b/crypto/kzg/benches/benchmark.rs @@ -0,0 +1,35 @@ +use c_kzg::KzgSettings; +use criterion::{criterion_group, criterion_main, Criterion}; +use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::TrustedSetup; +use rust_eth_kzg::{DASContext, TrustedSetup as PeerDASTrustedSetup}; + +pub fn bench_init_context(c: &mut Criterion) { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + + c.bench_function(&format!("Initialize context rust_eth_kzg"), |b| { + b.iter(|| { + let trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); + DASContext::new( + &trusted_setup, + rust_eth_kzg::UsePrecomp::Yes { + width: rust_eth_kzg::constants::RECOMMENDED_PRECOMP_WIDTH, + }, + ) + }) + }); + c.bench_function(&format!("Initialize context c-kzg (4844)"), |b| { + b.iter(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + KzgSettings::load_trusted_setup(&trusted_setup.g1_points(), &trusted_setup.g2_points()) + .unwrap() + }) + }); +} + +criterion_group!(benches, bench_init_context); +criterion_main!(benches); diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 181642df390..ebe93934fd7 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -2,6 +2,7 @@ mod kzg_commitment; mod kzg_proof; mod trusted_setup; +use rust_eth_kzg::{CellIndex, DASContext}; use std::fmt::Debug; pub use crate::{ @@ -9,18 +10,35 @@ pub use crate::{ kzg_proof::KzgProof, trusted_setup::TrustedSetup, }; + pub use c_kzg::{ Blob, Bytes32, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB, }; + +pub use rust_eth_kzg::{ + constants::{BYTES_PER_CELL, CELLS_PER_EXT_BLOB}, + Cell, CellIndex as CellID, CellRef, TrustedSetup as PeerDASTrustedSetup, +}; + +pub type CellsAndKzgProofs = ([Cell; CELLS_PER_EXT_BLOB], [KzgProof; CELLS_PER_EXT_BLOB]); + +pub type KzgBlobRef<'a> = &'a [u8; BYTES_PER_BLOB]; + #[derive(Debug)] pub enum Error { /// An error from the underlying kzg library. Kzg(c_kzg::Error), + /// A prover/verifier error from the rust-eth-kzg library. + PeerDASKZG(rust_eth_kzg::Error), /// The kzg verification failed KzgVerificationFailed, /// Misc indexing error InconsistentArrayLength(String), + /// Error reconstructing data columns. + ReconstructFailed(String), + /// Kzg was not initialized with PeerDAS enabled. + DASContextUninitialized, } impl From for Error { @@ -29,32 +47,11 @@ impl From for Error { } } -pub const CELLS_PER_EXT_BLOB: usize = 128; - -// TODO(das): use proper crypto once ckzg merges das branch -#[allow(dead_code)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct Cell { - bytes: [u8; 2048usize], -} - -impl Cell { - pub fn from_bytes(b: &[u8]) -> Result { - Ok(Self { - bytes: b - .try_into() - .map_err(|_| Error::Kzg(c_kzg::Error::MismatchLength("".to_owned())))?, - }) - } - pub fn into_inner(self) -> [u8; 2048usize] { - self.bytes - } -} - /// A wrapper over a kzg library that holds the trusted setup parameters. #[derive(Debug)] pub struct Kzg { trusted_setup: KzgSettings, + context: Option, } impl Kzg { @@ -65,9 +62,40 @@ impl Kzg { &trusted_setup.g1_points(), &trusted_setup.g2_points(), )?, + context: None, + }) + } + + pub fn new_from_trusted_setup_das_enabled(trusted_setup: TrustedSetup) -> Result { + // Initialize the trusted setup using default parameters + // + // Note: One can also use `from_json` to initialize it from the consensus-specs + // json string. + let peerdas_trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); + + // It's not recommended to change the config parameter for precomputation as storage + // grows exponentially, but the speedup is exponential - after a while the speedup + // starts to become sublinear. + let context = DASContext::new( + &peerdas_trusted_setup, + rust_eth_kzg::UsePrecomp::Yes { + width: rust_eth_kzg::constants::RECOMMENDED_PRECOMP_WIDTH, + }, + ); + + Ok(Self { + trusted_setup: KzgSettings::load_trusted_setup( + &trusted_setup.g1_points(), + &trusted_setup.g2_points(), + )?, + context: Some(context), }) } + fn context(&self) -> Result<&DASContext, Error> { + self.context.as_ref().ok_or(Error::DASContextUninitialized) + } + /// Compute the kzg proof given a blob and its kzg commitment. pub fn compute_blob_kzg_proof( &self, @@ -167,21 +195,18 @@ impl Kzg { } /// Computes the cells and associated proofs for a given `blob` at index `index`. - #[allow(clippy::type_complexity)] pub fn compute_cells_and_proofs( &self, - _blob: &Blob, - ) -> Result< - ( - Box<[Cell; CELLS_PER_EXT_BLOB]>, - Box<[KzgProof; CELLS_PER_EXT_BLOB]>, - ), - Error, - > { - // TODO(das): use proper crypto once ckzg merges das branch - let cells = Box::new(core::array::from_fn(|_| Cell { bytes: [0u8; 2048] })); - let proofs = Box::new([KzgProof([0u8; BYTES_PER_PROOF]); CELLS_PER_EXT_BLOB]); - Ok((cells, proofs)) + blob: KzgBlobRef<'_>, + ) -> Result { + let (cells, proofs) = self + .context()? + .compute_cells_and_kzg_proofs(blob) + .map_err(Error::PeerDASKZG)?; + + // Convert the proof type to a c-kzg proof type + let c_kzg_proof = proofs.map(KzgProof); + Ok((cells, c_kzg_proof)) } /// Verifies a batch of cell-proof-commitment triplets. @@ -191,35 +216,43 @@ impl Kzg { /// to the data column index. pub fn verify_cell_proof_batch( &self, - _cells: &[Cell], - _kzg_proofs: &[Bytes48], - _coordinates: &[(u64, u64)], - _kzg_commitments: &[Bytes48], + cells: &[CellRef<'_>], + kzg_proofs: &[Bytes48], + columns: Vec, + kzg_commitments: &[Bytes48], ) -> Result<(), Error> { - // TODO(das): use proper crypto once ckzg merges das branch - Ok(()) - } + let proofs: Vec<_> = kzg_proofs.iter().map(|proof| proof.as_ref()).collect(); + let commitments: Vec<_> = kzg_commitments + .iter() + .map(|commitment| commitment.as_ref()) + .collect(); + let verification_result = self.context()?.verify_cell_kzg_proof_batch( + commitments.to_vec(), + columns, + cells.to_vec(), + proofs.to_vec(), + ); - pub fn cells_to_blob(&self, _cells: &[Cell; CELLS_PER_EXT_BLOB]) -> Result { - // TODO(das): use proper crypto once ckzg merges das branch - Ok(Blob::new([0u8; 131072usize])) + // Modify the result so it matches roughly what the previous method was doing. + match verification_result { + Ok(_) => Ok(()), + Err(e) if e.invalid_proof() => Err(Error::KzgVerificationFailed), + Err(e) => Err(Error::PeerDASKZG(e)), + } } - pub fn recover_all_cells( + pub fn recover_cells_and_compute_kzg_proofs( &self, - _cell_ids: &[u64], - _cells: &[Cell], - ) -> Result, Error> { - // TODO(das): use proper crypto once ckzg merges das branch - let cells = Box::new(core::array::from_fn(|_| Cell { bytes: [0u8; 2048] })); - Ok(cells) - } -} - -impl TryFrom for Kzg { - type Error = Error; + cell_ids: &[u64], + cells: &[CellRef<'_>], + ) -> Result { + let (cells, proofs) = self + .context()? + .recover_cells_and_kzg_proofs(cell_ids.to_vec(), cells.to_vec()) + .map_err(Error::PeerDASKZG)?; - fn try_from(trusted_setup: TrustedSetup) -> Result { - Kzg::new_from_trusted_setup(trusted_setup) + // Convert the proof type to a c-kzg proof type + let c_kzg_proof = proofs.map(KzgProof); + Ok((cells, c_kzg_proof)) } } diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs index d930eabe224..6ddc33df5ab 100644 --- a/crypto/kzg/src/trusted_setup.rs +++ b/crypto/kzg/src/trusted_setup.rs @@ -1,3 +1,4 @@ +use crate::PeerDASTrustedSetup; use c_kzg::{BYTES_PER_G1_POINT, BYTES_PER_G2_POINT}; use serde::{ de::{self, Deserializer, Visitor}, @@ -43,6 +44,28 @@ impl TrustedSetup { } } +impl From<&TrustedSetup> for PeerDASTrustedSetup { + fn from(trusted_setup: &TrustedSetup) -> Self { + Self { + g1_monomial: trusted_setup + .g1_monomial_points + .iter() + .map(|g1_point| format!("0x{}", hex::encode(g1_point.0))) + .collect::>(), + g1_lagrange: trusted_setup + .g1_points + .iter() + .map(|g1_point| format!("0x{}", hex::encode(g1_point.0))) + .collect::>(), + g2_monomial: trusted_setup + .g2_points + .iter() + .map(|g2_point| format!("0x{}", hex::encode(g2_point.0))) + .collect::>(), + } + } +} + impl Serialize for G1Point { fn serialize(&self, serializer: S) -> Result where diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 3cddd8ee60b..77d122efb79 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "5.2.1" +version = "5.3.0" authors = ["Paul Hauner "] edition = { workspace = true } @@ -14,6 +14,7 @@ jemalloc = ["malloc_utils/jemalloc"] bls = { workspace = true } clap = { workspace = true } log = { workspace = true } +sloggers = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } serde_json = { workspace = true } diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 52960b929d8..e1acac12dfe 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -8,9 +8,9 @@ use std::io::Write; use std::path::PathBuf; use std::{fs, net::Ipv4Addr}; use std::{fs::File, num::NonZeroU16}; -use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; +use types::{ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, Hash256}; -pub fn run(matches: &ArgMatches) -> Result<(), String> { +pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), String> { let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; let udp_port: NonZeroU16 = clap_utils::parse_required(matches, "udp-port")?; let tcp_port: NonZeroU16 = clap_utils::parse_required(matches, "tcp-port")?; @@ -37,7 +37,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; - let enr = build_enr::(&enr_key, &config, &enr_fork_id) + let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec) .map_err(|e| format!("Unable to create ENR: {:?}", e))?; fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; diff --git a/lcli/src/http_sync.rs b/lcli/src/http_sync.rs new file mode 100644 index 00000000000..1ef40e63978 --- /dev/null +++ b/lcli/src/http_sync.rs @@ -0,0 +1,152 @@ +use clap::ArgMatches; +use clap_utils::{parse_optional, parse_required}; +use environment::Environment; +use eth2::{ + types::{BlockId, ChainSpec, ForkName, PublishBlockRequest, SignedBlockContents}, + BeaconNodeHttpClient, Error, SensitiveUrl, Timeouts, +}; +use eth2_network_config::Eth2NetworkConfig; +use ssz::Encode; +use std::fs; +use std::fs::File; +use std::io::{Read, Write}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::time::Duration; +use types::EthSpec; + +const HTTP_TIMEOUT: Duration = Duration::from_secs(3600); +const DEFAULT_CACHE_DIR: &str = "./cache"; + +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let executor = env.core_context().executor; + executor + .handle() + .ok_or("shutdown in progress")? + .block_on(async move { run_async::(network_config, matches).await }) +} + +pub async fn run_async( + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let spec = &network_config.chain_spec::()?; + let source_url: SensitiveUrl = parse_required(matches, "source-url")?; + let target_url: SensitiveUrl = parse_required(matches, "target-url")?; + let start_block: BlockId = parse_required(matches, "start-block")?; + let maybe_common_ancestor_block: Option = + parse_optional(matches, "known–common-ancestor")?; + let cache_dir_path: PathBuf = + parse_optional(matches, "block-cache-dir")?.unwrap_or(DEFAULT_CACHE_DIR.into()); + + let source = BeaconNodeHttpClient::new(source_url, Timeouts::set_all(HTTP_TIMEOUT)); + let target = BeaconNodeHttpClient::new(target_url, Timeouts::set_all(HTTP_TIMEOUT)); + + if !cache_dir_path.exists() { + fs::create_dir_all(&cache_dir_path) + .map_err(|e| format!("Unable to create block cache dir: {:?}", e))?; + } + + // 1. Download blocks back from head, looking for common ancestor. + let mut blocks = vec![]; + let mut next_block_id = start_block; + loop { + println!("downloading {next_block_id:?}"); + + let publish_block_req = + get_block_from_source::(&source, next_block_id, spec, &cache_dir_path).await; + let block = publish_block_req.signed_block(); + + next_block_id = BlockId::Root(block.parent_root()); + blocks.push((block.slot(), publish_block_req)); + + if let Some(ref common_ancestor_block) = maybe_common_ancestor_block { + if common_ancestor_block == &next_block_id { + println!("reached known common ancestor: {next_block_id:?}"); + break; + } + } + + let block_exists_in_target = target + .get_beacon_blocks_ssz::(next_block_id, spec) + .await + .unwrap() + .is_some(); + if block_exists_in_target { + println!("common ancestor found: {next_block_id:?}"); + break; + } + } + + // 2. Apply blocks to target. + for (slot, block) in blocks.iter().rev() { + println!("posting block at slot {slot}"); + if let Err(e) = target.post_beacon_blocks(block).await { + if let Error::ServerMessage(ref e) = e { + if e.code == 202 { + println!("duplicate block detected while posting block at slot {slot}"); + continue; + } + } + return Err(format!("error posting {slot}: {e:?}")); + } else { + println!("success"); + } + } + + println!("SYNCED!!!!"); + + Ok(()) +} + +async fn get_block_from_source( + source: &BeaconNodeHttpClient, + block_id: BlockId, + spec: &ChainSpec, + cache_dir_path: &Path, +) -> PublishBlockRequest { + let mut cache_path = cache_dir_path.join(format!("block_{block_id}")); + + if cache_path.exists() { + let mut f = File::open(&cache_path).unwrap(); + let mut bytes = vec![]; + f.read_to_end(&mut bytes).unwrap(); + PublishBlockRequest::from_ssz_bytes(&bytes, ForkName::Deneb).unwrap() + } else { + let block_from_source = source + .get_beacon_blocks_ssz::(block_id, spec) + .await + .unwrap() + .unwrap(); + let blobs_from_source = source + .get_blobs::(block_id, None) + .await + .unwrap() + .unwrap() + .data; + + let (kzg_proofs, blobs): (Vec<_>, Vec<_>) = blobs_from_source + .iter() + .cloned() + .map(|sidecar| (sidecar.kzg_proof, sidecar.blob.clone())) + .unzip(); + + let block_root = block_from_source.canonical_root(); + let block_contents = SignedBlockContents { + signed_block: Arc::new(block_from_source), + kzg_proofs: kzg_proofs.into(), + blobs: blobs.into(), + }; + let publish_block_req = PublishBlockRequest::BlockContents(block_contents); + + cache_path = cache_dir_path.join(format!("block_{block_root:?}")); + let mut f = File::create(&cache_path).unwrap(); + f.write_all(&publish_block_req.as_ssz_bytes()).unwrap(); + + publish_block_req + } +} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 85898b60ee4..f055a23b362 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -1,6 +1,7 @@ mod block_root; mod check_deposit_data; mod generate_bootnode_enr; +mod http_sync; mod indexed_attestations; mod mnemonic_validators; mod mock_el; @@ -552,6 +553,74 @@ fn main() { .display_order(0) ) ) + .subcommand( + Command::new("http-sync") + .about("Manual sync") + .arg( + Arg::new("start-block") + .long("start-block") + .value_name("BLOCK_ID") + .action(ArgAction::Set) + .help("Block ID of source's head") + .default_value("head") + .required(true) + .display_order(0) + ) + .arg( + Arg::new("source-url") + .long("source-url") + .value_name("URL") + .action(ArgAction::Set) + .help("URL to a synced beacon-API provider") + .required(true) + .display_order(0) + ) + .arg( + Arg::new("target-url") + .long("target-url") + .value_name("URL") + .action(ArgAction::Set) + .help("URL to an unsynced beacon-API provider") + .required(true) + .display_order(0) + ) + .arg( + Arg::new("testnet-dir") + .short('d') + .long("testnet-dir") + .value_name("PATH") + .action(ArgAction::Set) + .global(true) + .help("The testnet dir.") + .display_order(0) + ) + .arg( + Arg::new("network") + .long("network") + .value_name("NAME") + .action(ArgAction::Set) + .global(true) + .help("The network to use. Defaults to mainnet.") + .conflicts_with("testnet-dir") + .display_order(0) + ) + .arg( + Arg::new("known-common-ancestor") + .long("known-common-ancestor") + .value_name("BLOCK_ID") + .action(ArgAction::Set) + .help("Block ID of common ancestor, if known.") + .display_order(0) + ) + .arg( + Arg::new("block-cache-dir") + .long("block-cache-dir") + .value_name("PATH") + .action(ArgAction::Set) + .help("Directory to keep a cache of the downloaded SSZ blocks.") + .display_order(0) + ) + ) .get_matches(); let result = matches @@ -638,8 +707,10 @@ fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> } Some(("check-deposit-data", matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), - Some(("generate-bootnode-enr", matches)) => generate_bootnode_enr::run::(matches) - .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), + Some(("generate-bootnode-enr", matches)) => { + generate_bootnode_enr::run::(matches, &env.eth2_config.spec) + .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)) + } Some(("mnemonic-validators", matches)) => mnemonic_validators::run(matches) .map_err(|e| format!("Failed to run mnemonic-validators command: {}", e)), Some(("indexed-attestations", matches)) => indexed_attestations::run::(matches) @@ -656,6 +727,11 @@ fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> } Some(("mock-el", matches)) => mock_el::run::(env, matches) .map_err(|e| format!("Failed to run mock-el command: {}", e)), + Some(("http-sync", matches)) => { + let network_config = get_network_config()?; + http_sync::run::(env, network_config, matches) + .map_err(|e| format!("Failed to run http-sync command: {}", e)) + } Some((other, _)) => Err(format!("Unknown subcommand {}. See --help.", other)), _ => Err("No subcommand provided. See --help.".to_string()), } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 62ae602187b..ec3bb5b9edb 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -66,13 +66,14 @@ use beacon_chain::{ }; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; -use environment::{null_logger, Environment}; +use environment::Environment; use eth2::{ types::{BlockId, StateId}, BeaconNodeHttpClient, SensitiveUrl, Timeouts, }; use eth2_network_config::Eth2NetworkConfig; use log::{debug, info}; +use sloggers::{null::NullLoggerBuilder, Build}; use ssz::Encode; use state_processing::state_advance::complete_state_advance; use state_processing::{ @@ -196,7 +197,9 @@ pub fn run( let store = HotColdDB::open_ephemeral( <_>::default(), spec.clone(), - null_logger().map_err(|e| format!("Failed to create null_logger: {:?}", e))?, + NullLoggerBuilder + .build() + .map_err(|e| format!("Error on NullLoggerBuilder: {:?}", e))?, ) .map_err(|e| format!("Failed to create ephemeral store: {:?}", e))?; let store = Arc::new(store); diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index b720601e70f..7c37aa6d67d 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "5.2.1" +version = "5.3.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index a83a7a91571..aa2caa23507 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -12,7 +12,7 @@ use eth2_network_config::Eth2NetworkConfig; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; -use logging::SSELoggingComponents; +use logging::{test_logger, SSELoggingComponents}; use serde::{Deserialize, Serialize}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; @@ -34,8 +34,6 @@ use { #[cfg(not(target_family = "unix"))] use {futures::channel::oneshot, std::cell::RefCell}; -pub use task_executor::test_utils::null_logger; - const LOG_CHANNEL_SIZE: usize = 16384; const SSE_LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. @@ -184,9 +182,9 @@ impl EnvironmentBuilder { Ok(self) } - /// Specifies that all logs should be sent to `null` (i.e., ignored). - pub fn null_logger(mut self) -> Result { - self.log = Some(null_logger()?); + /// Sets a logger suitable for test usage. + pub fn test_logger(mut self) -> Result { + self.log = Some(test_logger()); Ok(self) } diff --git a/lighthouse/environment/tests/environment_builder.rs b/lighthouse/environment/tests/environment_builder.rs index ad775c99f5e..b0c847612a5 100644 --- a/lighthouse/environment/tests/environment_builder.rs +++ b/lighthouse/environment/tests/environment_builder.rs @@ -9,7 +9,7 @@ fn builder() -> EnvironmentBuilder { EnvironmentBuilder::mainnet() .multi_threaded_tokio_runtime() .expect("should set runtime") - .null_logger() + .test_logger() .expect("should set logger") } diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 4fc7bc2dcff..84e8274f06e 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -100,6 +100,6 @@ ATTESTATION_SUBNET_PREFIX_BITS: 6 ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # DAS -CUSTODY_REQUIREMENT: 1 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32 +CUSTODY_REQUIREMENT: 4 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 \ No newline at end of file diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 1a1da459912..e865fbd272e 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -1,6 +1,7 @@ mod cli; mod metrics; +use account_utils::STDIN_INPUTS_FLAG; use beacon_node::ProductionBeaconNode; use clap::FromArgMatches; use clap::Subcommand; @@ -104,6 +105,16 @@ fn main() { ) .long_version(LONG_VERSION.as_str()) .display_order(0) + .arg( + Arg::new(STDIN_INPUTS_FLAG) + .long(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help("If present, read all user inputs from stdin instead of tty.") + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) + .global(true) + .display_order(0), + ) .arg( Arg::new("env_log") .short('l') diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index f82e3ec713b..4d155937140 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -15,7 +15,7 @@ use account_manager::{ use account_utils::{ eth2_keystore::KeystoreBuilder, validator_definitions::{SigningDefinition, ValidatorDefinition, ValidatorDefinitions}, - ZeroizeString, + ZeroizeString, STDIN_INPUTS_FLAG, }; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::env; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4fdd967c65c..f3832a1a1e5 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -825,6 +825,26 @@ fn network_target_peers_flag() { }); } #[test] +fn network_subscribe_all_data_column_subnets_flag() { + CommandLineTest::new() + .flag("subscribe-all-data-column-subnets", None) + .run_with_zero_port() + .with_config(|config| assert!(config.network.subscribe_all_data_column_subnets)); +} +#[test] +fn network_enable_sampling_flag() { + CommandLineTest::new() + .flag("enable-sampling", None) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.enable_sampling)); +} +#[test] +fn network_enable_sampling_flag_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.enable_sampling)); +} +#[test] fn network_subscribe_all_subnets_flag() { CommandLineTest::new() .flag("subscribe-all-subnets", None) @@ -2022,6 +2042,13 @@ fn epochs_per_migration_override() { .run_with_zero_port() .with_config(|config| assert_eq!(config.chain.epochs_per_migration, 128)); } +#[test] +fn malicious_withhold_count_flag() { + CommandLineTest::new() + .flag("malicious-withhold-count", Some("128")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.malicious_withhold_count, 128)); +} // Tests for Slasher flags. // Using `--slasher-max-db-size` to work around https://github.com/sigp/lighthouse/issues/2342 diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index 1c25c30f060..b53d88e52c5 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -1,4 +1,4 @@ -# Full configuration reference [here](https://github.com/kurtosis-tech/ethereum-package?tab=readme-ov-file#configuration). +# Full configuration reference [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). participants: - el_type: geth el_image: ethereum/client-go:latest @@ -14,4 +14,4 @@ global_log_level: debug snooper_enabled: false additional_services: - dora - - prometheus_grafana \ No newline at end of file + - prometheus_grafana diff --git a/scripts/local_testnet/network_params_das.yaml b/scripts/local_testnet/network_params_das.yaml new file mode 100644 index 00000000000..ab2f07a24ec --- /dev/null +++ b/scripts/local_testnet/network_params_das.yaml @@ -0,0 +1,21 @@ +participants: + - cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --subscribe-all-data-column-subnets + - --target-peers=3 + count: 2 + - cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --target-peers=3 + count: 2 +network_params: + eip7594_fork_epoch: 0 + seconds_per_slot: 6 +snooper_enabled: false +global_log_level: debug +additional_services: + - dora + - goomy_blob + - prometheus_grafana diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 330df76d813..f90132764e4 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -7,6 +7,7 @@ set -Eeuo pipefail SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" ENCLAVE_NAME=local-testnet NETWORK_PARAMS_FILE=$SCRIPT_DIR/network_params.yaml +ETHEREUM_PKG_VERSION=4.2.0 BUILD_IMAGE=true BUILDER_PROPOSALS=false @@ -80,6 +81,6 @@ if [ "$KEEP_ENCLAVE" = false ]; then kurtosis enclave rm -f $ENCLAVE_NAME 2>/dev/null || true fi -kurtosis run --enclave $ENCLAVE_NAME github.com/ethpandaops/ethereum-package --args-file $NETWORK_PARAMS_FILE +kurtosis run --enclave $ENCLAVE_NAME github.com/ethpandaops/ethereum-package@$ETHEREUM_PKG_VERSION --args-file $NETWORK_PARAMS_FILE echo "Started!" diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 5c22c609828..20b4a337711 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -4,8 +4,8 @@ mod mdbx_impl; mod redb_impl; use crate::{ - metrics, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Error, - ProposerSlashingStatus, + metrics, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Database, + Error, ProposerSlashingStatus, }; use byteorder::{BigEndian, ByteOrder}; use interface::{Environment, OpenDatabases, RwTransaction}; @@ -174,7 +174,7 @@ impl IndexedAttestationIdKey { let mut data = [0; INDEXED_ATTESTATION_ID_KEY_SIZE]; data[0..8].copy_from_slice(&target_epoch.as_u64().to_be_bytes()); data[8..INDEXED_ATTESTATION_ID_KEY_SIZE] - .copy_from_slice(indexed_attestation_root.as_bytes()); + .copy_from_slice(indexed_attestation_root.as_slice()); Self { target_and_root: data, } @@ -350,6 +350,18 @@ impl SlasherDB { Ok(()) } + pub fn get_config(&self) -> &Config { + &self.config + } + + /// TESTING ONLY. + /// + /// Replace the config for this database. This is only a sane thing to do if the database + /// is empty (has been `reset`). + pub fn update_config(&mut self, config: Arc) { + self.config = config; + } + /// Load a config from disk. /// /// This is generic in order to allow loading of configs for different schema versions. @@ -799,6 +811,50 @@ impl SlasherDB { Ok(()) } + + /// Delete all data from the database, essentially re-initialising it. + /// + /// We use this reset pattern in tests instead of leaking tonnes of file descriptors and + /// exhausting our allocation by creating (and leaking) databases. + /// + /// THIS FUNCTION SHOULD ONLY BE USED IN TESTS. + pub fn reset(&self) -> Result<(), Error> { + // Clear the cache(s) first. + self.attestation_root_cache.lock().clear(); + + // Pattern match to avoid missing any database. + let OpenDatabases { + indexed_attestation_db, + indexed_attestation_id_db, + attesters_db, + attesters_max_targets_db, + min_targets_db, + max_targets_db, + current_epochs_db, + proposers_db, + metadata_db, + } = &self.databases; + let mut txn = self.begin_rw_txn()?; + self.reset_db(&mut txn, indexed_attestation_db)?; + self.reset_db(&mut txn, indexed_attestation_id_db)?; + self.reset_db(&mut txn, attesters_db)?; + self.reset_db(&mut txn, attesters_max_targets_db)?; + self.reset_db(&mut txn, min_targets_db)?; + self.reset_db(&mut txn, max_targets_db)?; + self.reset_db(&mut txn, current_epochs_db)?; + self.reset_db(&mut txn, proposers_db)?; + self.reset_db(&mut txn, metadata_db)?; + txn.commit() + } + + fn reset_db(&self, txn: &mut RwTransaction<'_>, db: &Database<'static>) -> Result<(), Error> { + let mut cursor = txn.cursor(db)?; + if cursor.first_key()?.is_none() { + return Ok(()); + } + cursor.delete_while(|_| Ok(true))?; + Ok(()) + } } #[cfg(test)] diff --git a/slasher/src/database/lmdb_impl.rs b/slasher/src/database/lmdb_impl.rs index 20d89a36fb0..74342968cfa 100644 --- a/slasher/src/database/lmdb_impl.rs +++ b/slasher/src/database/lmdb_impl.rs @@ -165,8 +165,12 @@ impl<'env> Cursor<'env> { } pub fn get_current(&mut self) -> Result, Value<'env>)>, Error> { + // FIXME: lmdb has an extremely broken API which can mutate the SHARED REFERENCE + // `value` after `get_current` is called. We need to convert it to a Vec here in order + // to avoid `value` changing after another cursor operation. I think this represents a bug + // in the LMDB bindings, as shared references should be immutable. if let Some((Some(key), value)) = self.cursor.get(None, None, MDB_GET_CURRENT).optional()? { - Ok(Some((Cow::Borrowed(key), Cow::Borrowed(value)))) + Ok(Some((Cow::Borrowed(key), Cow::Owned(value.to_vec())))) } else { Ok(None) } diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 0bb7c9c3ffe..19f2cd138de 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -33,6 +33,19 @@ impl Slasher { config.validate()?; let config = Arc::new(config); let db = SlasherDB::open(config.clone(), spec, log.clone())?; + Self::from_config_and_db(config, db, log) + } + + /// TESTING ONLY. + /// + /// Initialise a slasher database from an existing `db`. The caller must ensure that the + /// database's config matches the one provided. + pub fn from_config_and_db( + config: Arc, + db: SlasherDB, + log: Logger, + ) -> Result { + config.validate()?; let attester_slashings = Mutex::new(HashSet::new()); let proposer_slashings = Mutex::new(HashSet::new()); let attestation_queue = AttestationQueue::default(); @@ -48,6 +61,11 @@ impl Slasher { }) } + pub fn into_reset_db(self) -> Result, Error> { + self.db.reset()?; + Ok(self.db) + } + /// Harvest all attester slashings found, removing them from the slasher. pub fn get_attester_slashings(&self) -> HashSet> { std::mem::take(&mut self.attester_slashings.lock()) diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs index 453d0e66670..8054c0ad59a 100644 --- a/slasher/src/test_utils.rs +++ b/slasher/src/test_utils.rs @@ -3,8 +3,9 @@ use std::sync::Arc; use types::{ indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectra}, AggregateSignature, AttestationData, AttesterSlashing, AttesterSlashingBase, - AttesterSlashingElectra, BeaconBlockHeader, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, - IndexedAttestation, MainnetEthSpec, Signature, SignedBeaconBlockHeader, Slot, + AttesterSlashingElectra, BeaconBlockHeader, ChainSpec, Checkpoint, Epoch, EthSpec, + FixedBytesExtended, Hash256, IndexedAttestation, MainnetEthSpec, Signature, + SignedBeaconBlockHeader, Slot, }; pub type E = MainnetEthSpec; diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 0aaaa63f65c..ff234dff3fe 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -7,10 +7,11 @@ use slasher::{ block, chain_spec, indexed_att, slashed_validators_from_attestations, slashed_validators_from_slashings, E, }, - Config, Slasher, + Config, Slasher, SlasherDB, }; use std::cmp::max; -use tempfile::tempdir; +use std::sync::Arc; +use tempfile::{tempdir, TempDir}; use types::{Epoch, EthSpec}; #[derive(Debug)] @@ -32,7 +33,16 @@ impl Default for TestConfig { } } -fn random_test(seed: u64, test_config: TestConfig) { +fn make_db() -> (TempDir, SlasherDB) { + let tempdir = tempdir().unwrap(); + let initial_config = Arc::new(Config::new(tempdir.path().into())); + let logger = test_logger(); + let spec = chain_spec(); + let db = SlasherDB::open(initial_config.clone(), spec, logger).unwrap(); + (tempdir, db) +} + +fn random_test(seed: u64, mut db: SlasherDB, test_config: TestConfig) -> SlasherDB { let check_slashings = test_config.check_slashings; let num_validators = test_config.num_validators; let max_attestations = test_config.max_attestations; @@ -40,18 +50,17 @@ fn random_test(seed: u64, test_config: TestConfig) { println!("Running with seed {}", seed); let mut rng = StdRng::seed_from_u64(seed); - let tempdir = tempdir().unwrap(); - - let mut config = Config::new(tempdir.path().into()); + let mut config = Config::new(db.get_config().database_path.clone()); config.validator_chunk_size = 1 << rng.gen_range(1..4); let chunk_size_exponent = rng.gen_range(1..4); config.chunk_size = 1 << chunk_size_exponent; config.history_length = 1 << rng.gen_range(chunk_size_exponent..chunk_size_exponent + 3); - let spec = chain_spec(); + let config = Arc::new(config); + db.update_config(config.clone()); - let slasher = Slasher::::open(config.clone(), spec, test_logger()).unwrap(); + let slasher = Slasher::::from_config_and_db(config.clone(), db, test_logger()).unwrap(); let validators = (0..num_validators as u64).collect::>(); @@ -121,7 +130,7 @@ fn random_test(seed: u64, test_config: TestConfig) { } if !check_slashings { - return; + return slasher.into_reset_db().unwrap(); } slasher.process_queued(current_epoch).unwrap(); @@ -131,6 +140,9 @@ fn random_test(seed: u64, test_config: TestConfig) { let slashed_validators = slashed_validators_from_slashings(&slashings); let expected_slashed_validators = slashed_validators_from_attestations(&attestations); assert_eq!(slashed_validators, expected_slashed_validators); + + // Return the database for reuse. + slasher.into_reset_db().unwrap() } // Fuzz-like test that runs forever on different seeds looking for crashes. @@ -138,8 +150,9 @@ fn random_test(seed: u64, test_config: TestConfig) { #[ignore] fn no_crash() { let mut rng = thread_rng(); + let (_tempdir, mut db) = make_db(); loop { - random_test(rng.gen(), TestConfig::default()); + db = random_test(rng.gen(), db, TestConfig::default()); } } @@ -148,9 +161,11 @@ fn no_crash() { #[ignore] fn no_crash_with_blocks() { let mut rng = thread_rng(); + let (_tempdir, mut db) = make_db(); loop { - random_test( + db = random_test( rng.gen(), + db, TestConfig { add_blocks: true, ..TestConfig::default() @@ -164,9 +179,11 @@ fn no_crash_with_blocks() { #[ignore] fn check_slashings() { let mut rng = thread_rng(); + let (_tempdir, mut db) = make_db(); loop { - random_test( + db = random_test( rng.gen(), + db, TestConfig { check_slashings: true, ..TestConfig::default() @@ -177,8 +194,10 @@ fn check_slashings() { #[test] fn check_slashings_example1() { + let (_tempdir, db) = make_db(); random_test( 1, + db, TestConfig { check_slashings: true, ..TestConfig::default() @@ -188,8 +207,10 @@ fn check_slashings_example1() { #[test] fn check_slashings_example2() { + let (_tempdir, db) = make_db(); random_test( 2, + db, TestConfig { check_slashings: true, max_attestations: 3, @@ -200,8 +221,10 @@ fn check_slashings_example2() { #[test] fn check_slashings_example3() { + let (_tempdir, db) = make_db(); random_test( 3, + db, TestConfig { check_slashings: true, max_attestations: 100, @@ -212,26 +235,37 @@ fn check_slashings_example3() { #[test] fn no_crash_example1() { - random_test(1, TestConfig::default()); + let (_tempdir, db) = make_db(); + random_test(1, db, TestConfig::default()); } #[test] fn no_crash_example2() { - random_test(2, TestConfig::default()); + let (_tempdir, db) = make_db(); + random_test(2, db, TestConfig::default()); } #[test] fn no_crash_example3() { - random_test(3, TestConfig::default()); + let (_tempdir, db) = make_db(); + random_test(3, db, TestConfig::default()); } #[test] fn no_crash_blocks_example1() { + let (_tempdir, db) = make_db(); random_test( 1, + db, TestConfig { add_blocks: true, ..TestConfig::default() }, ); } + +#[test] +fn no_crash_aug_24() { + let (_tempdir, db) = make_db(); + random_test(13519442335106054152, db, TestConfig::default()); +} diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index fc4614f5d45..6012283e111 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -11,11 +11,11 @@ fake_crypto = ["bls/fake_crypto"] portable = ["beacon_chain/portable"] [dependencies] +alloy-primitives = { workspace = true } bls = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } derivative = { workspace = true } -ethereum-types = { workspace = true } hex = { workspace = true } kzg = { workspace = true } rayon = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 5dc3d2a0404..0aa5f1d38db 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.2 +TESTS_TAG := v1.5.0-alpha.5 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index e1a308f7a40..9495047e7f9 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -26,13 +26,15 @@ "tests/.*/.*/ssz_static/Eth1Block/", "tests/.*/.*/ssz_static/PowBlock/", # light_client - "tests/.*/.*/light_client", + "tests/.*/.*/light_client/single_merkle_proof", + "tests/.*/.*/light_client/sync", + "tests/.*/electra/light_client/update_ranking", # LightClientStore "tests/.*/.*/ssz_static/LightClientStore", # LightClientSnapshot "tests/.*/.*/ssz_static/LightClientSnapshot", # One of the EF researchers likes to pack the tarballs on a Mac - ".*\.DS_Store.*", + ".*\\.DS_Store.*", # More Mac weirdness. "tests/mainnet/bellatrix/operations/deposit/pyspec_tests/deposit_with_previous_fork_version__valid_ineffective/._meta.yaml", # bls tests are moved to bls12-381-tests directory @@ -48,9 +50,7 @@ "tests/.*/electra/ssz_static/LightClientUpdate", "tests/.*/electra/ssz_static/LightClientFinalityUpdate", "tests/.*/electra/ssz_static/LightClientBootstrap", - # TODO(electra) re-enable as DepositRequest when EF tests are updated - "tests/.*/electra/operations/deposit_receipt", - "tests/.*/electra/ssz_static/DepositReceipt" + "tests/.*/electra/merkle_proof", ] diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index f328fa64047..63274ee0c03 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -1,6 +1,6 @@ use super::*; use rayon::prelude::*; -use std::fmt::Debug; +use std::fmt::{Debug, Display, Formatter}; use std::path::{Path, PathBuf}; use types::ForkName; @@ -18,12 +18,17 @@ mod fork; mod fork_choice; mod genesis_initialization; mod genesis_validity; +mod get_custody_columns; mod kzg_blob_to_kzg_commitment; mod kzg_compute_blob_kzg_proof; +mod kzg_compute_cells_and_kzg_proofs; mod kzg_compute_kzg_proof; +mod kzg_recover_cells_and_kzg_proofs; mod kzg_verify_blob_kzg_proof; mod kzg_verify_blob_kzg_proof_batch; +mod kzg_verify_cell_kzg_proof_batch; mod kzg_verify_kzg_proof; +mod light_client_verify_is_better_update; mod merkle_proof_validity; mod operations; mod rewards; @@ -48,12 +53,17 @@ pub use epoch_processing::*; pub use fork::ForkTest; pub use genesis_initialization::*; pub use genesis_validity::*; +pub use get_custody_columns::*; pub use kzg_blob_to_kzg_commitment::*; pub use kzg_compute_blob_kzg_proof::*; +pub use kzg_compute_cells_and_kzg_proofs::*; pub use kzg_compute_kzg_proof::*; +pub use kzg_recover_cells_and_kzg_proofs::*; pub use kzg_verify_blob_kzg_proof::*; pub use kzg_verify_blob_kzg_proof_batch::*; +pub use kzg_verify_cell_kzg_proof_batch::*; pub use kzg_verify_kzg_proof::*; +pub use light_client_verify_is_better_update::*; pub use merkle_proof_validity::*; pub use operations::*; pub use rewards::RewardsTest; @@ -64,6 +74,19 @@ pub use ssz_generic::*; pub use ssz_static::*; pub use transition::TransitionTest; +#[derive(Debug, PartialEq)] +pub enum FeatureName { + Eip7594, +} + +impl Display for FeatureName { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + FeatureName::Eip7594 => f.write_str("eip7594"), + } + } +} + pub trait LoadCase: Sized { /// Load the test case from a test case directory. fn load_from_dir(_path: &Path, _fork_name: ForkName) -> Result; @@ -84,6 +107,13 @@ pub trait Case: Debug + Sync { true } + /// Whether or not this test exists for the given `feature_name`. + /// + /// Returns `true` by default. + fn is_enabled_for_feature(_feature_name: FeatureName) -> bool { + true + } + /// Execute a test and return the result. /// /// `case_index` reports the index of the case in the set of test cases. It is not strictly diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index 8b253919805..e16f5b257f9 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -17,7 +17,7 @@ macro_rules! uint_wrapper { type Error = String; fn try_from(s: String) -> Result { - <$wrapped_type>::from_dec_str(&s) + <$wrapped_type>::from_str_radix(&s, 10) .map(|x| Self { x }) .map_err(|e| format!("{:?}", e)) } @@ -43,8 +43,8 @@ macro_rules! uint_wrapper { }; } -uint_wrapper!(TestU128, ethereum_types::U128); -uint_wrapper!(TestU256, ethereum_types::U256); +uint_wrapper!(DecimalU128, alloy_primitives::U128); +uint_wrapper!(DecimalU256, alloy_primitives::U256); /// Trait for types that can be used in SSZ static tests. pub trait SszStaticType: diff --git a/testing/ef_tests/src/cases/get_custody_columns.rs b/testing/ef_tests/src/cases/get_custody_columns.rs new file mode 100644 index 00000000000..d31e72a473d --- /dev/null +++ b/testing/ef_tests/src/cases/get_custody_columns.rs @@ -0,0 +1,44 @@ +use super::*; +use alloy_primitives::U256; +use serde::Deserialize; +use std::marker::PhantomData; +use types::DataColumnSubnetId; + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct GetCustodyColumns { + pub node_id: String, + pub custody_subnet_count: u64, + pub result: Vec, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for GetCustodyColumns { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("meta.yaml").as_path()) + } +} + +impl Case for GetCustodyColumns { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let spec = E::default_spec(); + let node_id = U256::from_str_radix(&self.node_id, 10) + .map_err(|e| Error::FailedToParseTest(format!("{e:?}")))?; + let raw_node_id = node_id.to_be_bytes::<32>(); + let computed = DataColumnSubnetId::compute_custody_columns::( + raw_node_id, + self.custody_subnet_count, + &spec, + ) + .collect::>(); + let expected = &self.result; + if computed == *expected { + Ok(()) + } else { + Err(Error::NotEqual(format!( + "Got {computed:?}\nExpected {expected:?}" + ))) + } + } +} diff --git a/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs index aa48c127b20..fa16a5fcb7a 100644 --- a/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs +++ b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs @@ -31,9 +31,12 @@ impl Case for KZGBlobToKZGCommitment { fork_name == ForkName::Deneb } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { - let kzg = get_kzg()?; + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name != FeatureName::Eip7594 + } + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let kzg = get_kzg(); let commitment = parse_blob::(&self.input.blob).and_then(|blob| { blob_to_kzg_commitment::(&kzg, &blob).map_err(|e| { Error::InternalError(format!("Failed to compute kzg commitment: {:?}", e)) diff --git a/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs index 71e1ff8e23d..694013e2513 100644 --- a/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs @@ -32,6 +32,10 @@ impl Case for KZGComputeBlobKZGProof { fork_name == ForkName::Deneb } + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name != FeatureName::Eip7594 + } + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGComputeBlobKZGProofInput| -> Result<_, Error> { let blob = parse_blob::(&input.blob)?; @@ -39,7 +43,7 @@ impl Case for KZGComputeBlobKZGProof { Ok((blob, commitment)) }; - let kzg = get_kzg()?; + let kzg = get_kzg(); let proof = parse_input(&self.input).and_then(|(blob, commitment)| { compute_blob_kzg_proof::(&kzg, &blob, commitment) .map_err(|e| Error::InternalError(format!("Failed to compute kzg proof: {:?}", e))) diff --git a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs new file mode 100644 index 00000000000..2a9f8ceeef3 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs @@ -0,0 +1,67 @@ +use super::*; +use crate::case_result::compare_result; +use kzg::CellsAndKzgProofs; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGComputeCellsAndKzgProofsInput { + pub blob: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGComputeCellsAndKZGProofs { + pub input: KZGComputeCellsAndKzgProofsInput, + pub output: Option<(Vec, Vec)>, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for KZGComputeCellsAndKZGProofs { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl Case for KZGComputeCellsAndKZGProofs { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let cells_and_proofs = parse_blob::(&self.input.blob).and_then(|blob| { + let blob = blob.as_ref().try_into().map_err(|e| { + Error::InternalError(format!("Failed to convert blob to kzg blob: {e:?}")) + })?; + let kzg = get_kzg(); + kzg.compute_cells_and_proofs(blob).map_err(|e| { + Error::InternalError(format!("Failed to compute cells and kzg proofs: {e:?}")) + }) + }); + + let expected = self.output.as_ref().and_then(|(cells, proofs)| { + parse_cells_and_proofs(cells, proofs) + .map(|(cells, proofs)| { + ( + cells + .try_into() + .map_err(|e| { + Error::FailedToParseTest(format!("Failed to parse cells: {e:?}")) + }) + .unwrap(), + proofs + .try_into() + .map_err(|e| { + Error::FailedToParseTest(format!("Failed to parse proofs: {e:?}")) + }) + .unwrap(), + ) + }) + .ok() + }); + + compare_result::(&cells_and_proofs, &expected) + } +} diff --git a/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs index 98bb7492491..6f53038f28e 100644 --- a/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs @@ -39,6 +39,10 @@ impl Case for KZGComputeKZGProof { fork_name == ForkName::Deneb } + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name != FeatureName::Eip7594 + } + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGComputeKZGProofInput| -> Result<_, Error> { let blob = parse_blob::(&input.blob)?; @@ -46,7 +50,7 @@ impl Case for KZGComputeKZGProof { Ok((blob, z)) }; - let kzg = get_kzg()?; + let kzg = get_kzg(); let proof = parse_input(&self.input).and_then(|(blob, z)| { compute_kzg_proof::(&kzg, &blob, z) .map_err(|e| Error::InternalError(format!("Failed to compute kzg proof: {:?}", e))) diff --git a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs new file mode 100644 index 00000000000..10cc866fbe0 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs @@ -0,0 +1,71 @@ +use super::*; +use crate::case_result::compare_result; +use kzg::CellsAndKzgProofs; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGRecoverCellsAndKzgProofsInput { + pub cell_indices: Vec, + pub cells: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGRecoverCellsAndKZGProofs { + pub input: KZGRecoverCellsAndKzgProofsInput, + pub output: Option<(Vec, Vec)>, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for KZGRecoverCellsAndKZGProofs { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl Case for KZGRecoverCellsAndKZGProofs { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGRecoverCellsAndKzgProofsInput| { + let cells = input + .cells + .iter() + .map(|s| parse_cell(s)) + .collect::, Error>>()?; + + Ok((cells, input.cell_indices.clone())) + }; + + let result: Result<_, Error> = + parse_input(&self.input).and_then(|(input_cells, cell_indices)| { + let input_cells_ref: Vec<_> = input_cells.iter().map(|cell| &**cell).collect(); + let kzg = get_kzg(); + let (cells, proofs) = kzg + .recover_cells_and_compute_kzg_proofs( + cell_indices.as_slice(), + input_cells_ref.as_slice(), + ) + .map_err(|e| { + Error::InternalError(format!( + "Failed to recover cells and kzg proofs: {e:?}" + )) + })?; + + Ok((cells, proofs)) + }); + + let expected = self + .output + .as_ref() + .and_then(|(cells, proofs)| parse_cells_and_proofs(cells, proofs).ok()) + .map(|(cells, proofs)| (cells.try_into().unwrap(), proofs.try_into().unwrap())); + + compare_result::(&result, &expected) + } +} diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs index f68f0fd7ed0..f9b3009fded 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs @@ -2,16 +2,52 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::validate_blob; use eth2_network_config::TRUSTED_SETUP_BYTES; -use kzg::{Error as KzgError, Kzg, KzgCommitment, KzgProof, TrustedSetup}; +use kzg::{Cell, Error as KzgError, Kzg, KzgCommitment, KzgProof, TrustedSetup}; use serde::Deserialize; use std::marker::PhantomData; +use std::sync::Arc; +use std::sync::LazyLock; use types::Blob; -pub fn get_kzg() -> Result { +static KZG: LazyLock> = LazyLock::new(|| { let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) - .map_err(|e| Error::InternalError(format!("Failed to initialize kzg: {:?}", e)))?; - Kzg::new_from_trusted_setup(trusted_setup) + .map_err(|e| Error::InternalError(format!("Failed to initialize trusted setup: {:?}", e))) + .expect("failed to initialize trusted setup"); + let kzg = Kzg::new_from_trusted_setup_das_enabled(trusted_setup) .map_err(|e| Error::InternalError(format!("Failed to initialize kzg: {:?}", e))) + .expect("failed to initialize kzg"); + Arc::new(kzg) +}); + +pub fn get_kzg() -> Arc { + Arc::clone(&KZG) +} + +pub fn parse_cells_and_proofs( + cells: &[String], + proofs: &[String], +) -> Result<(Vec, Vec), Error> { + let cells = cells + .iter() + .map(|s| parse_cell(s.as_str())) + .collect::, Error>>()?; + + let proofs = proofs + .iter() + .map(|s| parse_proof(s.as_str())) + .collect::, Error>>()?; + + Ok((cells, proofs)) +} + +pub fn parse_cell(cell: &str) -> Result { + hex::decode(strip_0x(cell)?) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse cell: {:?}", e))) + .and_then(|bytes| { + bytes + .try_into() + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse cell: {:?}", e))) + }) } pub fn parse_proof(proof: &str) -> Result { @@ -80,6 +116,10 @@ impl Case for KZGVerifyBlobKZGProof { fork_name == ForkName::Deneb } + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name != FeatureName::Eip7594 + } + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGVerifyBlobKZGProofInput| -> Result<(Blob, KzgCommitment, KzgProof), Error> { let blob = parse_blob::(&input.blob)?; @@ -88,7 +128,7 @@ impl Case for KZGVerifyBlobKZGProof { Ok((blob, commitment, proof)) }; - let kzg = get_kzg()?; + let kzg = get_kzg(); let result = parse_input(&self.input).and_then(|(blob, commitment, proof)| { match validate_blob::(&kzg, &blob, commitment, proof) { Ok(_) => Ok(true), diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs index ae5caedf069..80cd0a28496 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs @@ -33,6 +33,10 @@ impl Case for KZGVerifyBlobKZGProofBatch { fork_name == ForkName::Deneb } + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name != FeatureName::Eip7594 + } + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGVerifyBlobKZGProofBatchInput| -> Result<_, Error> { let blobs = input @@ -53,8 +57,7 @@ impl Case for KZGVerifyBlobKZGProofBatch { Ok((commitments, blobs, proofs)) }; - let kzg = get_kzg()?; - + let kzg = get_kzg(); let result = parse_input(&self.input).and_then( |(commitments, blobs, proofs)| match validate_blobs::( diff --git a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs new file mode 100644 index 00000000000..5887d764cae --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs @@ -0,0 +1,66 @@ +use super::*; +use crate::case_result::compare_result; +use kzg::{Bytes48, Error as KzgError}; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGVerifyCellKZGProofBatchInput { + pub commitments: Vec, + pub cell_indices: Vec, + pub cells: Vec, + pub proofs: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGVerifyCellKZGProofBatch { + pub input: KZGVerifyCellKZGProofBatchInput, + pub output: Option, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for KZGVerifyCellKZGProofBatch { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl Case for KZGVerifyCellKZGProofBatch { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGVerifyCellKZGProofBatchInput| -> Result<_, Error> { + let (cells, proofs) = parse_cells_and_proofs(&input.cells, &input.proofs)?; + let commitments = input + .commitments + .iter() + .map(|s| parse_commitment(s)) + .collect::, _>>()?; + + Ok((cells, proofs, input.cell_indices.clone(), commitments)) + }; + + let result = + parse_input(&self.input).and_then(|(cells, proofs, cell_indices, commitments)| { + let proofs: Vec = proofs.iter().map(|&proof| proof.into()).collect(); + let commitments: Vec = commitments.iter().map(|&c| c.into()).collect(); + let cells = cells.iter().map(|c| c.as_ref()).collect::>(); + let kzg = get_kzg(); + match kzg.verify_cell_proof_batch(&cells, &proofs, cell_indices, &commitments) { + Ok(_) => Ok(true), + Err(KzgError::KzgVerificationFailed) => Ok(false), + Err(e) => Err(Error::InternalError(format!( + "Failed to validate cells: {:?}", + e + ))), + } + }); + + compare_result::(&result, &self.output) + } +} diff --git a/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs index e395558e0e1..ed7583dbd0a 100644 --- a/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs @@ -33,6 +33,10 @@ impl Case for KZGVerifyKZGProof { fork_name == ForkName::Deneb } + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name != FeatureName::Eip7594 + } + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGVerifyKZGProofInput| -> Result<_, Error> { let commitment = parse_commitment(&input.commitment)?; @@ -42,7 +46,7 @@ impl Case for KZGVerifyKZGProof { Ok((commitment, z, y, proof)) }; - let kzg = get_kzg()?; + let kzg = get_kzg(); let result = parse_input(&self.input).and_then(|(commitment, z, y, proof)| { verify_kzg_proof::(&kzg, commitment, proof, z, y) .map_err(|e| Error::InternalError(format!("Failed to validate proof: {:?}", e))) diff --git a/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs b/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs new file mode 100644 index 00000000000..de281d906c1 --- /dev/null +++ b/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs @@ -0,0 +1,110 @@ +use super::*; +use decode::ssz_decode_light_client_update; +use serde::Deserialize; +use types::{LightClientUpdate, Slot}; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct LightClientVerifyIsBetterUpdate { + light_client_updates: Vec>, +} + +#[derive(Debug, Clone, Default, Deserialize)] +pub struct Metadata { + updates_count: u64, +} + +impl LoadCase for LightClientVerifyIsBetterUpdate { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let mut light_client_updates = vec![]; + let metadata: Metadata = decode::yaml_decode_file(path.join("meta.yaml").as_path())?; + for index in 0..metadata.updates_count { + let light_client_update = ssz_decode_light_client_update( + &path.join(format!("updates_{}.ssz_snappy", index)), + &fork_name, + )?; + light_client_updates.push(light_client_update); + } + + Ok(Self { + light_client_updates, + }) + } +} + +impl Case for LightClientVerifyIsBetterUpdate { + // Light client updates in `self.light_client_updates` are ordered in descending precedence + // where the update at index = 0 is considered the best update. This test iterates through + // all light client updates in a nested loop to make all possible comparisons. If a light client update + // at index `i`` is considered 'better' than a light client update at index `j`` when `i > j`, this test fails. + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let spec = fork_name.make_genesis_spec(E::default_spec()); + for (i, ith_light_client_update) in self.light_client_updates.iter().enumerate() { + for (j, jth_light_client_update) in self.light_client_updates.iter().enumerate() { + eprintln!("{i} {j}"); + if i == j { + continue; + } + + let is_better_update = ith_light_client_update + .is_better_light_client_update(jth_light_client_update, &spec) + .unwrap(); + + let ith_summary = + LightClientUpdateSummary::from_update(ith_light_client_update, &spec); + let jth_summary = + LightClientUpdateSummary::from_update(jth_light_client_update, &spec); + + let (best_index, other_index, best_update, other_update, failed) = if i < j { + // i is better, so is_better_update must return false + (i, j, ith_summary, jth_summary, is_better_update) + } else { + // j is better, so is_better must return true + (j, i, jth_summary, ith_summary, !is_better_update) + }; + + if failed { + eprintln!("is_better_update: {is_better_update}"); + eprintln!("index {best_index} update {best_update:?}"); + eprintln!("index {other_index} update {other_update:?}"); + eprintln!( + "update at index {best_index} must be considered better than update at index {other_index}" + ); + return Err(Error::FailedComparison(format!( + "update at index {best_index} must be considered better than update at index {other_index}" + ))); + } + } + } + + Ok(()) + } +} + +#[derive(Debug)] +#[allow(dead_code)] +struct LightClientUpdateSummary { + participants: usize, + supermajority: bool, + relevant_sync_committee: bool, + has_finality: bool, + has_sync_committee_finality: bool, + header_slot: Slot, + signature_slot: Slot, +} + +impl LightClientUpdateSummary { + fn from_update(update: &LightClientUpdate, spec: &ChainSpec) -> Self { + let max_participants = update.sync_aggregate().sync_committee_bits.len(); + let participants = update.sync_aggregate().sync_committee_bits.num_set_bits(); + Self { + participants, + supermajority: participants * 3 > max_participants * 2, + relevant_sync_committee: update.is_sync_committee_update(spec).unwrap(), + has_finality: !update.is_finality_branch_empty(), + has_sync_committee_finality: update.has_sync_committee_finality(spec).unwrap(), + header_slot: update.attested_header_slot(), + signature_slot: *update.signature_slot(), + } + } +} diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 8d5c0687753..b68bbdc5d39 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -3,7 +3,8 @@ use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use tree_hash::Hash256; use types::{ - BeaconBlockBody, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, FullPayload, + BeaconBlockBody, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, FixedVector, + FullPayload, Unsigned, }; #[derive(Debug, Clone, Deserialize)] @@ -81,12 +82,18 @@ impl Case for MerkleProofValidity { } } -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] +#[derive(Debug, Clone)] pub struct KzgInclusionMerkleProofValidity { pub metadata: Option, pub block: BeaconBlockBody, pub merkle_proof: MerkleProof, + pub proof_type: KzgInclusionProofType, +} + +#[derive(Debug, Clone)] +pub enum KzgInclusionProofType { + Single, + List, } impl LoadCase for KzgInclusionMerkleProofValidity { @@ -115,21 +122,33 @@ impl LoadCase for KzgInclusionMerkleProofValidity { None }; + let file_name = path + .file_name() + .and_then(|file_name| file_name.to_str()) + .ok_or(Error::InternalError( + "failed to read file name from path".to_string(), + ))?; + + let proof_type = if file_name.starts_with("blob_kzg_commitments") { + KzgInclusionProofType::List + } else { + KzgInclusionProofType::Single + }; + Ok(Self { metadata, block, merkle_proof, + proof_type, }) } } -impl Case for KzgInclusionMerkleProofValidity { - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { - let Ok(proof) = self.block.to_ref().kzg_commitment_merkle_proof(0) else { - return Err(Error::FailedToParseTest( - "Could not retrieve merkle proof".to_string(), - )); - }; +impl KzgInclusionMerkleProofValidity { + fn verify_kzg_inclusion_proof( + &self, + proof: FixedVector, + ) -> Result<(), Error> { let proof_len = proof.len(); let branch_len = self.merkle_proof.branch.len(); if proof_len != branch_len { @@ -153,3 +172,29 @@ impl Case for KzgInclusionMerkleProofValidity { Ok(()) } } +impl Case for KzgInclusionMerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + match self.proof_type { + KzgInclusionProofType::Single => { + let proof = self + .block + .to_ref() + .kzg_commitment_merkle_proof(0) + .map_err(|e| { + Error::FailedToParseTest(format!("Could not retrieve merkle proof: {e:?}")) + })?; + self.verify_kzg_inclusion_proof(proof) + } + KzgInclusionProofType::List => { + let proof = self + .block + .to_ref() + .kzg_commitments_merkle_proof() + .map_err(|e| { + Error::FailedToParseTest(format!("Could not retrieve merkle proof: {e:?}")) + })?; + self.verify_kzg_inclusion_proof(proof) + } + } + } +} diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 0af2c818271..24184441047 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -7,7 +7,7 @@ use ssz::Decode; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::per_block_processing::process_operations::{ - process_consolidations, process_deposit_requests, process_execution_layer_withdrawal_requests, + process_consolidation_requests, process_deposit_requests, process_withdrawal_requests, }; use state_processing::{ per_block_processing::{ @@ -25,9 +25,9 @@ use std::fmt::Debug; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, - BlindedPayload, Deposit, DepositRequest, ExecutionLayerWithdrawalRequest, ExecutionPayload, - FullPayload, ProposerSlashing, SignedBlsToExecutionChange, SignedConsolidation, - SignedVoluntaryExit, SyncAggregate, + BlindedPayload, ConsolidationRequest, Deposit, DepositRequest, ExecutionPayload, FullPayload, + ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SyncAggregate, + WithdrawalRequest, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -445,9 +445,9 @@ impl Operation for SignedBlsToExecutionChange { } } -impl Operation for ExecutionLayerWithdrawalRequest { +impl Operation for WithdrawalRequest { fn handler_name() -> String { - "execution_layer_withdrawal_request".into() + "withdrawal_request".into() } fn is_enabled_for_fork(fork_name: ForkName) -> bool { @@ -464,7 +464,8 @@ impl Operation for ExecutionLayerWithdrawalRequest { spec: &ChainSpec, _extra: &Operations, ) -> Result<(), BlockProcessingError> { - process_execution_layer_withdrawal_requests(state, &[self.clone()], spec) + state.update_pubkey_cache()?; + process_withdrawal_requests(state, &[self.clone()], spec) } } @@ -491,9 +492,9 @@ impl Operation for DepositRequest { } } -impl Operation for SignedConsolidation { +impl Operation for ConsolidationRequest { fn handler_name() -> String { - "consolidation".into() + "consolidation_request".into() } fn is_enabled_for_fork(fork_name: ForkName) -> bool { @@ -510,7 +511,8 @@ impl Operation for SignedConsolidation { spec: &ChainSpec, _extra: &Operations, ) -> Result<(), BlockProcessingError> { - process_consolidations(state, &[self.clone()], VerifySignatures::True, spec) + state.update_pubkey_cache()?; + process_consolidation_requests(state, &[self.clone()], spec) } } diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 7933fc65c70..3dc2f179684 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -1,7 +1,7 @@ #![allow(non_snake_case)] use super::*; -use crate::cases::common::{SszStaticType, TestU128, TestU256}; +use crate::cases::common::{DecimalU128, DecimalU256, SszStaticType}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::{log_file_access, snappy_decode_file, yaml_decode_file}; use serde::{de::Error as SerdeError, Deserialize, Deserializer}; @@ -56,8 +56,8 @@ macro_rules! type_dispatch { "uint16" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u16>, $($rest)*), "uint32" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u32>, $($rest)*), "uint64" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u64>, $($rest)*), - "uint128" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* TestU128>, $($rest)*), - "uint256" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* TestU256>, $($rest)*), + "uint128" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* DecimalU128>, $($rest)*), + "uint256" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* DecimalU256>, $($rest)*), _ => Err(Error::FailedToParseTest(format!("unsupported: {}", $value))), } }; @@ -231,7 +231,7 @@ fn ssz_generic_test(path: &Path) -> R check_serialization(&value, &serialized, T::from_ssz_bytes)?; if let Some(ref meta) = meta { - check_tree_hash(&meta.root, value.tree_hash_root().as_bytes())?; + check_tree_hash(&meta.root, value.tree_hash_root().as_slice())?; } } // Invalid diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index e17aa469bfc..c80977a8ac7 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -104,7 +104,7 @@ pub fn check_tree_hash(expected_str: &str, actual_root: &[u8]) -> Result<(), Err impl Case for SszStatic { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { check_serialization(&self.value, &self.serialized, T::from_ssz_bytes)?; - check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; + check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_slice())?; Ok(()) } } @@ -118,7 +118,7 @@ impl Case for SszStaticTHC> { let mut state = self.value.clone(); let cached_tree_hash_root = state.update_tree_hash_cache().unwrap(); - check_tree_hash(&self.roots.root, cached_tree_hash_root.as_bytes())?; + check_tree_hash(&self.roots.root, cached_tree_hash_root.as_slice())?; Ok(()) } @@ -130,7 +130,7 @@ impl Case for SszStaticWithSpec> { check_serialization(&self.value, &self.serialized, |bytes| { BeaconBlock::from_ssz_bytes(bytes, spec) })?; - check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; + check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_slice())?; Ok(()) } } @@ -141,7 +141,7 @@ impl Case for SszStaticWithSpec> { check_serialization(&self.value, &self.serialized, |bytes| { SignedBeaconBlock::from_ssz_bytes(bytes, spec) })?; - check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; + check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_slice())?; Ok(()) } } diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index 51ab682f3dc..757b9bf3c43 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -5,7 +5,7 @@ use std::fs::{self}; use std::io::Write; use std::path::Path; use std::path::PathBuf; -use types::BeaconState; +use types::{BeaconState, LightClientUpdate}; /// See `log_file_access` for details. const ACCESSED_FILE_LOG_FILENAME: &str = ".accessed_file_log.txt"; @@ -95,3 +95,13 @@ pub fn ssz_decode_state( log_file_access(path); ssz_decode_file_with(path, |bytes| BeaconState::from_ssz_bytes(bytes, spec)) } + +pub fn ssz_decode_light_client_update( + path: &Path, + fork_name: &ForkName, +) -> Result, Error> { + log_file_access(path); + ssz_decode_file_with(path, |bytes| { + LightClientUpdate::from_ssz_bytes(bytes, fork_name) + }) +} diff --git a/testing/ef_tests/src/error.rs b/testing/ef_tests/src/error.rs index c5795777ada..389308377c7 100644 --- a/testing/ef_tests/src/error.rs +++ b/testing/ef_tests/src/error.rs @@ -14,6 +14,8 @@ pub enum Error { SkippedKnownFailure, /// The test failed due to some internal error preventing the test from running. InternalError(String), + /// The test failed while making some comparison. + FailedComparison(String), } impl Error { @@ -26,6 +28,7 @@ impl Error { Error::SkippedBls => "SkippedBls", Error::SkippedKnownFailure => "SkippedKnownFailure", Error::InternalError(_) => "InternalError", + Error::FailedComparison(_) => "FailedComparison", } } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 410a37e7682..dacaba1dcab 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -1,12 +1,15 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; -use crate::type_name; use crate::type_name::TypeName; +use crate::{type_name, FeatureName}; use derivative::Derivative; use std::fs::{self, DirEntry}; use std::marker::PhantomData; use std::path::PathBuf; use types::{BeaconState, EthSpec, ForkName}; +const EIP7594_FORK: ForkName = ForkName::Deneb; +const EIP7594_TESTS: [&str; 4] = ["ssz_static", "merkle_proof", "networking", "kzg"]; + pub trait Handler { type Case: Case + LoadCase; @@ -28,10 +31,21 @@ pub trait Handler { Self::Case::is_enabled_for_fork(fork_name) } + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + Self::Case::is_enabled_for_feature(feature_name) + } + fn run(&self) { for fork_name in ForkName::list_all() { if !self.disabled_forks().contains(&fork_name) && self.is_enabled_for_fork(fork_name) { - self.run_for_fork(fork_name) + self.run_for_fork(fork_name); + + if fork_name == EIP7594_FORK + && EIP7594_TESTS.contains(&Self::runner_name()) + && self.is_enabled_for_feature(FeatureName::Eip7594) + { + self.run_for_feature(EIP7594_FORK, FeatureName::Eip7594); + } } } } @@ -81,6 +95,47 @@ pub trait Handler { ); crate::results::assert_tests_pass(&name, &handler_path, &results); } + + fn run_for_feature(&self, fork_name: ForkName, feature_name: FeatureName) { + let feature_name_str = feature_name.to_string(); + + let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("consensus-spec-tests") + .join("tests") + .join(Self::config_name()) + .join(&feature_name_str) + .join(Self::runner_name()) + .join(self.handler_name()); + + // Iterate through test suites + let as_directory = |entry: Result| -> Option { + entry + .ok() + .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap()) + }; + + let test_cases = fs::read_dir(&handler_path) + .unwrap_or_else(|e| panic!("handler dir {} exists: {:?}", handler_path.display(), e)) + .filter_map(as_directory) + .flat_map(|suite| fs::read_dir(suite.path()).expect("suite dir exists")) + .filter_map(as_directory) + .map(|test_case_dir| { + let path = test_case_dir.path(); + let case = Self::Case::load_from_dir(&path, fork_name).expect("test should load"); + (path, case) + }) + .collect(); + + let results = Cases { test_cases }.test_results(fork_name, Self::use_rayon()); + + let name = format!( + "{}/{}/{}", + feature_name_str, + Self::runner_name(), + self.handler_name() + ); + crate::results::assert_tests_pass(&name, &handler_path, &results); + } } macro_rules! bls_eth_handler { @@ -784,6 +839,86 @@ impl Handler for KZGVerifyKZGProofHandler { } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct GetCustodyColumnsHandler(PhantomData); + +impl Handler for GetCustodyColumnsHandler { + type Case = cases::GetCustodyColumns; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "networking" + } + + fn handler_name(&self) -> String { + "get_custody_columns".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGComputeCellsAndKZGProofHandler(PhantomData); + +impl Handler for KZGComputeCellsAndKZGProofHandler { + type Case = cases::KZGComputeCellsAndKZGProofs; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "compute_cells_and_kzg_proofs".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGVerifyCellKZGProofBatchHandler(PhantomData); + +impl Handler for KZGVerifyCellKZGProofBatchHandler { + type Case = cases::KZGVerifyCellKZGProofBatch; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "verify_cell_kzg_proof_batch".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGRecoverCellsAndKZGProofHandler(PhantomData); + +impl Handler for KZGRecoverCellsAndKZGProofHandler { + type Case = cases::KZGRecoverCellsAndKZGProofs; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "recover_cells_and_kzg_proofs".into() + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct MerkleProofValidityHandler(PhantomData); @@ -837,6 +972,32 @@ impl Handler for KzgInclusionMerkleProofValidityHandler(PhantomData); + +impl Handler for LightClientUpdateHandler { + type Case = cases::LightClientVerifyIsBetterUpdate; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "update_ranking".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Enabled in Altair + // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved + fork_name != ForkName::Base && fork_name != ForkName::Electra + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct OperationsHandler(PhantomData<(E, O)>); diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index e55551be701..e7367719d72 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,10 +1,11 @@ pub use case_result::CaseResult; pub use cases::WithdrawalsPayload; pub use cases::{ - Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, HistoricalSummariesUpdate, - InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, - ParticipationRecordUpdates, PendingBalanceDeposits, PendingConsolidations, RandaoMixesReset, - RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, SyncCommitteeUpdates, + Case, EffectiveBalanceUpdates, Eth1DataReset, FeatureName, HistoricalRootsUpdate, + HistoricalSummariesUpdate, InactivityUpdates, JustificationAndFinalization, + ParticipationFlagUpdates, ParticipationRecordUpdates, PendingBalanceDeposits, + PendingConsolidations, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, + SlashingsReset, SyncCommitteeUpdates, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index d6ef873ead4..49de073d6ae 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -1,5 +1,4 @@ //! Mapping from types to canonical string identifiers used in testing. -use types::blob_sidecar::BlobIdentifier; use types::historical_summary::HistoricalSummary; use types::*; @@ -58,16 +57,18 @@ type_name_generic!(BeaconBlockBodyElectra, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); type_name!(BlobIdentifier); +type_name!(DataColumnIdentifier); type_name_generic!(BlobSidecar); +type_name_generic!(DataColumnSidecar); type_name!(Checkpoint); -type_name!(Consolidation); +type_name!(ConsolidationRequest); type_name_generic!(ContributionAndProof); type_name!(Deposit); type_name!(DepositData); type_name!(DepositMessage); type_name!(DepositRequest); type_name!(Eth1Data); -type_name!(ExecutionLayerWithdrawalRequest); +type_name!(WithdrawalRequest); type_name_generic!(ExecutionPayload); type_name_generic!(ExecutionPayloadBellatrix, "ExecutionPayload"); type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload"); @@ -139,7 +140,6 @@ type_name_generic!(SignedAggregateAndProofBase, "SignedAggregateAndProof"); type_name_generic!(SignedAggregateAndProofElectra, "SignedAggregateAndProof"); type_name_generic!(SignedBeaconBlock); type_name!(SignedBeaconBlockHeader); -type_name!(SignedConsolidation); type_name_generic!(SignedContributionAndProof); type_name!(SignedVoluntaryExit); type_name!(SigningData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 10a57a6b45e..a677736d519 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1,7 +1,7 @@ #![cfg(feature = "ef_tests")] use ef_tests::*; -use types::{ExecutionLayerWithdrawalRequest, MainnetEthSpec, MinimalEthSpec, *}; +use types::*; // Check that the hand-computed multiplications on EthSpec are correctly computed. // This test lives here because one is most likely to muck these up during a spec update. @@ -93,24 +93,22 @@ fn operations_withdrawals() { } #[test] -fn operations_execution_layer_withdrawal_reqeusts() { - OperationsHandler::::default().run(); - OperationsHandler::::default().run(); +fn operations_withdrawal_reqeusts() { + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); } #[test] #[cfg(not(feature = "fake_crypto"))] fn operations_deposit_requests() { - //TODO(electra): re-enable mainnet once they update the name for this - // OperationsHandler::::default().run(); - // OperationsHandler::::default().run(); + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); } #[test] fn operations_consolidations() { - OperationsHandler::::default().run(); - //TODO(electra): re-enable mainnet once they make tests for this - //OperationsHandler::::default().run(); + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); } #[test] @@ -239,13 +237,14 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { - use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; - use types::blob_sidecar::BlobIdentifier; + use ef_tests::{ + FeatureName, Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler, + }; use types::historical_summary::HistoricalSummary; use types::{ - AttesterSlashingBase, AttesterSlashingElectra, Consolidation, - ExecutionLayerWithdrawalRequest, LightClientBootstrapAltair, PendingBalanceDeposit, - PendingPartialWithdrawal, *, + AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DepositRequest, + LightClientBootstrapAltair, PendingBalanceDeposit, PendingPartialWithdrawal, + WithdrawalRequest, *, }; ssz_static_test!(attestation_data, AttestationData); @@ -629,25 +628,38 @@ mod ssz_static { SszStaticHandler::::capella_and_later().run(); } + #[test] + fn data_column_sidecar() { + SszStaticHandler::, MinimalEthSpec>::deneb_only() + .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + SszStaticHandler::, MainnetEthSpec>::deneb_only() + .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + } + + #[test] + fn data_column_identifier() { + SszStaticHandler::::deneb_only() + .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + SszStaticHandler::::deneb_only() + .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + } + #[test] fn consolidation() { - SszStaticHandler::::electra_and_later().run(); - SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); } - // TODO(electra) re-enable when EF tests are updated - // #[test] - // fn deposit_request() { - // SszStaticHandler::::electra_and_later().run(); - // SszStaticHandler::::electra_and_later().run(); - // } + #[test] + fn deposit_request() { + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + } #[test] - fn execution_layer_withdrawal_request() { - SszStaticHandler::::electra_and_later() - .run(); - SszStaticHandler::::electra_and_later() - .run(); + fn withdrawal_request() { + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); } #[test] @@ -667,12 +679,6 @@ mod ssz_static { SszStaticHandler::::electra_and_later().run(); SszStaticHandler::::electra_and_later().run(); } - - #[test] - fn signed_consolidation() { - SszStaticHandler::::electra_and_later().run(); - SszStaticHandler::::electra_and_later().run(); - } } #[test] @@ -895,11 +901,34 @@ fn kzg_verify_kzg_proof() { KZGVerifyKZGProofHandler::::default().run(); } +#[test] +fn kzg_compute_cells_and_proofs() { + KZGComputeCellsAndKZGProofHandler::::default() + .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); +} + +#[test] +fn kzg_verify_cell_proof_batch() { + KZGVerifyCellKZGProofBatchHandler::::default() + .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); +} + +#[test] +fn kzg_recover_cells_and_proofs() { + KZGRecoverCellsAndKZGProofHandler::::default() + .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); +} + #[test] fn merkle_proof_validity() { MerkleProofValidityHandler::::default().run(); } +#[test] +fn light_client_update() { + LightClientUpdateHandler::::default().run(); +} + #[test] #[cfg(feature = "fake_crypto")] fn kzg_inclusion_merkle_proof_validity() { @@ -914,3 +943,11 @@ fn rewards() { RewardsHandler::::new(handler).run(); } } + +#[test] +fn get_custody_columns() { + GetCustodyColumnsHandler::::default() + .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + GetCustodyColumnsHandler::::default() + .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); +} diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 55a71605940..015a632ff40 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -19,8 +19,8 @@ use ethers_core::{ pub use ethers_providers::{Http, Middleware, Provider}; use std::time::Duration; use tokio::time::sleep; -use types::DepositData; use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256, Keypair, Signature}; +use types::{DepositData, FixedBytesExtended}; pub const DEPLOYER_ACCOUNTS_INDEX: usize = 0; pub const DEPOSIT_ACCOUNTS_INDEX: usize = 0; diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 43d24cd1237..159561d5dd8 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -14,8 +14,8 @@ execution_layer = { workspace = true } sensitive_url = { workspace = true } types = { workspace = true } unused_port = { workspace = true } -ethers-core = { workspace = true } ethers-providers = { workspace = true } +ethers-core = { workspace = true } deposit_contract = { workspace = true } reqwest = { workspace = true } hex = { workspace = true } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index c7d5e704524..f3f5a72cb60 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -18,7 +18,7 @@ use tokio::time::sleep; use types::payload::BlockProductionVersion; use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, - ForkName, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, + FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(60); @@ -115,7 +115,7 @@ impl TestRig { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); let mut spec = TEST_FORK.make_genesis_spec(MainnetEthSpec::default_spec()); - spec.terminal_total_difficulty = Uint256::zero(); + spec.terminal_total_difficulty = Uint256::ZERO; let fee_recipient = None; diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs index 62b77d5024b..fd458ad205d 100644 --- a/testing/execution_engine_integration/src/transactions.rs +++ b/testing/execution_engine_integration/src/transactions.rs @@ -3,7 +3,7 @@ use ethers_core::types::{ transaction::{eip2718::TypedTransaction, eip2930::AccessList}, Address, Bytes, Eip1559TransactionRequest, TransactionRequest, U256, }; -use types::{DepositData, EthSpec, Hash256, Keypair, Signature}; +use types::{DepositData, EthSpec, FixedBytesExtended, Hash256, Keypair, Signature}; /// Hardcoded deposit contract address based on sender address and nonce pub const DEPOSIT_CONTRACT_ADDRESS: &str = "64f43BEc7F86526686C931d65362bB8698872F90"; diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index 58637b92d9e..7f0f697d61d 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -13,7 +13,7 @@ use std::sync::LazyLock; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, EthSpec, Keypair, SignedBeaconBlock, }; -use types::{Hash256, MainnetEthSpec, Slot}; +use types::{FixedBytesExtended, Hash256, MainnetEthSpec, Slot}; type E = MainnetEthSpec; diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 7321fc13849..db5c53e0ac6 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -27,3 +27,4 @@ eth2_network_config = { workspace = true } serde_json = { workspace = true } zip = { workspace = true } parking_lot = { workspace = true } +logging = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 13d92d2d855..f6ee01a4ba1 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -22,6 +22,7 @@ mod tests { }; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; + use logging::test_logger; use parking_lot::Mutex; use reqwest::Client; use serde::Serialize; @@ -318,7 +319,7 @@ mod tests { using_web3signer: bool, spec: ChainSpec, ) -> Self { - let log = environment::null_logger().unwrap(); + let log = test_logger(); let validator_dir = TempDir::new().unwrap(); let config = validator_client::Config::default(); diff --git a/validator_client/slashing_protection/src/attestation_tests.rs b/validator_client/slashing_protection/src/attestation_tests.rs index a162c4e150e..b577ccd9d85 100644 --- a/validator_client/slashing_protection/src/attestation_tests.rs +++ b/validator_client/slashing_protection/src/attestation_tests.rs @@ -2,7 +2,7 @@ use crate::test_utils::*; use crate::*; -use types::{AttestationData, Checkpoint, Epoch, Slot}; +use types::{AttestationData, Checkpoint, Epoch, FixedBytesExtended, Slot}; pub fn build_checkpoint(epoch_num: u64) -> Checkpoint { Checkpoint { diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs index c95cb6917c5..ff5866f9866 100644 --- a/validator_client/slashing_protection/src/bin/test_generator.rs +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -7,7 +7,7 @@ use slashing_protection::SUPPORTED_INTERCHANGE_FORMAT_VERSION; use std::fs::{self, File}; use std::io::Write; use std::path::Path; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, FixedBytesExtended, Hash256, Slot}; fn metadata(genesis_validators_root: Hash256) -> InterchangeMetadata { InterchangeMetadata { diff --git a/validator_client/slashing_protection/src/block_tests.rs b/validator_client/slashing_protection/src/block_tests.rs index abd452a0b67..b3273015f42 100644 --- a/validator_client/slashing_protection/src/block_tests.rs +++ b/validator_client/slashing_protection/src/block_tests.rs @@ -2,7 +2,7 @@ use super::*; use crate::test_utils::*; -use types::{BeaconBlockHeader, Slot}; +use types::{BeaconBlockHeader, FixedBytesExtended, Slot}; pub fn block(slot: u64) -> BeaconBlockHeader { BeaconBlockHeader { diff --git a/validator_client/slashing_protection/src/extra_interchange_tests.rs b/validator_client/slashing_protection/src/extra_interchange_tests.rs index dd1c1882158..0f88ec8b1dc 100644 --- a/validator_client/slashing_protection/src/extra_interchange_tests.rs +++ b/validator_client/slashing_protection/src/extra_interchange_tests.rs @@ -3,6 +3,7 @@ use crate::test_utils::pubkey; use crate::*; use tempfile::tempdir; +use types::FixedBytesExtended; #[test] fn export_non_existent_key() { diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index d99647bc936..e1ac841905f 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -6,7 +6,7 @@ use crate::{ use serde::{Deserialize, Serialize}; use std::collections::HashSet; use tempfile::tempdir; -use types::{Epoch, Hash256, PublicKeyBytes, Slot}; +use types::{Epoch, FixedBytesExtended, Hash256, PublicKeyBytes, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/validator_client/slashing_protection/src/lib.rs b/validator_client/slashing_protection/src/lib.rs index e5606d4042a..51dd3e31642 100644 --- a/validator_client/slashing_protection/src/lib.rs +++ b/validator_client/slashing_protection/src/lib.rs @@ -130,6 +130,8 @@ impl Display for NotSafe { #[cfg(test)] mod test { + use types::FixedBytesExtended; + use super::*; #[test] diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 04554786f6f..baaf930c68b 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -513,7 +513,7 @@ impl SlashingDatabase { txn.execute( "INSERT INTO signed_blocks (validator_id, slot, signing_root) VALUES (?1, ?2, ?3)", - params![validator_id, slot, signing_root.to_hash256_raw().as_bytes()], + params![validator_id, slot, signing_root.to_hash256_raw().as_slice()], )?; Ok(()) } @@ -539,7 +539,7 @@ impl SlashingDatabase { validator_id, att_source_epoch, att_target_epoch, - att_signing_root.to_hash256_raw().as_bytes() + att_signing_root.to_hash256_raw().as_slice() ], )?; Ok(()) diff --git a/validator_client/slashing_protection/src/test_utils.rs b/validator_client/slashing_protection/src/test_utils.rs index efdeb9bc6ba..8cbca12a10b 100644 --- a/validator_client/slashing_protection/src/test_utils.rs +++ b/validator_client/slashing_protection/src/test_utils.rs @@ -3,8 +3,8 @@ use tempfile::{tempdir, TempDir}; use types::{test_utils::generate_deterministic_keypair, AttestationData, BeaconBlockHeader}; pub const DEFAULT_VALIDATOR_INDEX: usize = 0; -pub const DEFAULT_DOMAIN: Hash256 = Hash256::zero(); -pub const DEFAULT_GENESIS_VALIDATORS_ROOT: Hash256 = Hash256::zero(); +pub const DEFAULT_DOMAIN: Hash256 = Hash256::ZERO; +pub const DEFAULT_GENESIS_VALIDATORS_ROOT: Hash256 = Hash256::ZERO; pub fn pubkey(index: usize) -> PublicKeyBytes { generate_deterministic_keypair(index).pk.compress() diff --git a/validator_client/slashing_protection/tests/migration.rs b/validator_client/slashing_protection/tests/migration.rs index cd3561f2114..3d4ec7ea9a8 100644 --- a/validator_client/slashing_protection/tests/migration.rs +++ b/validator_client/slashing_protection/tests/migration.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use std::fs; use std::path::{Path, PathBuf}; use tempfile::tempdir; -use types::Hash256; +use types::{FixedBytesExtended, Hash256}; fn test_data_dir() -> PathBuf { Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap()).join("migration-tests") diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 4467b807865..58d7f9d8eef 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -134,6 +134,12 @@ impl fmt::Display for Errors { } } +impl Errors { + pub fn num_errors(&self) -> usize { + self.0.len() + } +} + /// Reasons why a candidate might not be ready. #[derive(Debug, Clone, Copy)] pub enum CandidateError { @@ -599,46 +605,41 @@ impl BeaconNodeFallback { F: Fn(&'a BeaconNodeHttpClient) -> R, R: Future>, { - let mut results = vec![]; let mut to_retry = vec![]; let mut retry_unsynced = vec![]; // Run `func` using a `candidate`, returning the value or capturing errors. - // - // We use a macro instead of a closure here since it is not trivial to move `func` into a - // closure. - macro_rules! try_func { - ($candidate: ident) => {{ - inc_counter_vec(&ENDPOINT_REQUESTS, &[$candidate.beacon_node.as_ref()]); + let run_on_candidate = |candidate: &'a CandidateBeaconNode| async { + inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.beacon_node.as_ref()]); - // There exists a race condition where `func` may be called when the candidate is - // actually not ready. We deem this an acceptable inefficiency. - match func(&$candidate.beacon_node).await { - Ok(val) => results.push(Ok(val)), - Err(e) => { - // If we have an error on this function, make the client as not-ready. - // - // There exists a race condition where the candidate may have been marked - // as ready between the `func` call and now. We deem this an acceptable - // inefficiency. - if matches!(offline_on_failure, OfflineOnFailure::Yes) { - $candidate.set_offline().await; - } - results.push(Err(( - $candidate.beacon_node.to_string(), - Error::RequestFailed(e), - ))); - inc_counter_vec(&ENDPOINT_ERRORS, &[$candidate.beacon_node.as_ref()]); + // There exists a race condition where `func` may be called when the candidate is + // actually not ready. We deem this an acceptable inefficiency. + match func(&candidate.beacon_node).await { + Ok(val) => Ok(val), + Err(e) => { + // If we have an error on this function, mark the client as not-ready. + // + // There exists a race condition where the candidate may have been marked + // as ready between the `func` call and now. We deem this an acceptable + // inefficiency. + if matches!(offline_on_failure, OfflineOnFailure::Yes) { + candidate.set_offline().await; } + inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.beacon_node.as_ref()]); + Err((candidate.beacon_node.to_string(), Error::RequestFailed(e))) } - }}; - } + } + }; // First pass: try `func` on all synced and ready candidates. // // This ensures that we always choose a synced node if it is available. + let mut first_batch_futures = vec![]; for candidate in &self.candidates { match candidate.status(RequireSynced::Yes).await { + Ok(_) => { + first_batch_futures.push(run_on_candidate(candidate)); + } Err(CandidateError::NotSynced) if require_synced == false => { // This client is unsynced we will try it after trying all synced clients retry_unsynced.push(candidate); @@ -647,22 +648,24 @@ impl BeaconNodeFallback { // This client was not ready on the first pass, we might try it again later. to_retry.push(candidate); } - Ok(_) => try_func!(candidate), } } + let first_batch_results = futures::future::join_all(first_batch_futures).await; // Second pass: try `func` on ready unsynced candidates. This only runs if we permit // unsynced candidates. // // Due to async race-conditions, it is possible that we will send a request to a candidate // that has been set to an offline/unready status. This is acceptable. - if require_synced == false { - for candidate in retry_unsynced { - try_func!(candidate); - } - } + let second_batch_results = if require_synced == false { + futures::future::join_all(retry_unsynced.into_iter().map(run_on_candidate)).await + } else { + vec![] + }; // Third pass: try again, attempting to make non-ready clients become ready. + let mut third_batch_futures = vec![]; + let mut third_batch_results = vec![]; for candidate in to_retry { // If the candidate hasn't luckily transferred into the correct state in the meantime, // force an update of the state. @@ -676,16 +679,21 @@ impl BeaconNodeFallback { }; match new_status { - Ok(()) => try_func!(candidate), - Err(CandidateError::NotSynced) if require_synced == false => try_func!(candidate), - Err(e) => { - results.push(Err(( - candidate.beacon_node.to_string(), - Error::Unavailable(e), - ))); + Ok(()) => third_batch_futures.push(run_on_candidate(candidate)), + Err(CandidateError::NotSynced) if require_synced == false => { + third_batch_futures.push(run_on_candidate(candidate)) } + Err(e) => third_batch_results.push(Err(( + candidate.beacon_node.to_string(), + Error::Unavailable(e), + ))), } } + third_batch_results.extend(futures::future::join_all(third_batch_futures).await); + + let mut results = first_batch_results; + results.extend(second_batch_results); + results.extend(third_batch_results); let errors: Vec<_> = results.into_iter().filter_map(|res| res.err()).collect(); diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index 9f93795e29f..2c8eca85601 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -687,8 +687,8 @@ impl DoppelgangerService { #[cfg(test)] mod test { use super::*; - use environment::null_logger; use futures::executor::block_on; + use logging::test_logger; use slot_clock::TestingSlotClock; use std::future; use std::time::Duration; @@ -732,7 +732,7 @@ mod test { fn build(self) -> TestScenario { let mut rng = XorShiftRng::from_seed([42; 16]); let slot_clock = TestingSlotClock::new(Slot::new(0), GENESIS_TIME, SLOT_DURATION); - let log = null_logger().unwrap(); + let log = test_logger(); TestScenario { validators: (0..self.validator_count) diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 880f0eaa488..faa157a8592 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -86,7 +86,8 @@ const _: () = assert!({ /// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the /// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid /// bringing in the entire crate. -const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > 2); +const MIN_ATTESTATION_SUBSCRIPTION_LOOKAHEAD: u64 = 2; +const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > MIN_ATTESTATION_SUBSCRIPTION_LOOKAHEAD); // The info in the enum variants is displayed in logging, clippy thinks it's dead code. #[derive(Debug)] @@ -121,6 +122,8 @@ pub struct DutyAndProof { pub struct SubscriptionSlots { /// Pairs of `(slot, already_sent)` in slot-descending order. slots: Vec<(Slot, AtomicBool)>, + /// The slot of the duty itself. + duty_slot: Slot, } /// Create a selection proof for `duty`. @@ -172,18 +175,20 @@ impl SubscriptionSlots { .filter(|scheduled_slot| *scheduled_slot > current_slot) .map(|scheduled_slot| (scheduled_slot, AtomicBool::new(false))) .collect(); - Arc::new(Self { slots }) + Arc::new(Self { slots, duty_slot }) } /// Return `true` if we should send a subscription at `slot`. fn should_send_subscription_at(&self, slot: Slot) -> bool { // Iterate slots from smallest to largest looking for one that hasn't been completed yet. - self.slots - .iter() - .rev() - .any(|(scheduled_slot, already_sent)| { - slot >= *scheduled_slot && !already_sent.load(Ordering::Relaxed) - }) + slot + MIN_ATTESTATION_SUBSCRIPTION_LOOKAHEAD <= self.duty_slot + && self + .slots + .iter() + .rev() + .any(|(scheduled_slot, already_sent)| { + slot >= *scheduled_slot && !already_sent.load(Ordering::Relaxed) + }) } /// Update our record of subscribed slots to account for successful subscription at `slot`. @@ -737,7 +742,7 @@ async fn poll_beacon_attesters( // If there are any subscriptions, push them out to beacon nodes if !subscriptions.is_empty() { let subscriptions_ref = &subscriptions; - if let Err(e) = duties_service + let subscription_result = duties_service .beacon_nodes .request( RequireSynced::No, @@ -753,15 +758,8 @@ async fn poll_beacon_attesters( .await }, ) - .await - { - error!( - log, - "Failed to subscribe validators"; - "error" => %e - ) - } else { - // Record that subscriptions were successfully sent. + .await; + if subscription_result.as_ref().is_ok() { debug!( log, "Broadcast attestation subscriptions"; @@ -770,6 +768,25 @@ async fn poll_beacon_attesters( for subscription_slots in subscription_slots_to_confirm { subscription_slots.record_successful_subscription_at(current_slot); } + } else if let Err(e) = subscription_result { + if e.num_errors() < duties_service.beacon_nodes.num_total() { + warn!( + log, + "Some subscriptions failed"; + "error" => %e, + ); + // If subscriptions were sent to at least one node, regard that as a success. + // There is some redundancy built into the subscription schedule to handle failures. + for subscription_slots in subscription_slots_to_confirm { + subscription_slots.record_successful_subscription_at(current_slot); + } + } else { + error!( + log, + "All subscriptions failed"; + "error" => %e + ); + } } } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 729ff62ee30..dff50582dfe 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -75,6 +75,7 @@ const WAITING_FOR_GENESIS_POLL_TIME: Duration = Duration::from_secs(12); /// This can help ensure that proper endpoint fallback occurs. const HTTP_ATTESTATION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_ATTESTATION_SUBSCRIPTIONS_TIMEOUT_QUOTIENT: u32 = 24; const HTTP_LIVENESS_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; @@ -323,6 +324,8 @@ impl ProductionValidatorClient { Timeouts { attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, + attestation_subscriptions: slot_duration + / HTTP_ATTESTATION_SUBSCRIPTIONS_TIMEOUT_QUOTIENT, liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs index 871c5362030..4a35791b322 100644 --- a/validator_manager/src/common.rs +++ b/validator_manager/src/common.rs @@ -1,3 +1,4 @@ +pub use account_utils::STDIN_INPUTS_FLAG; use account_utils::{strip_off_newlines, ZeroizeString}; use eth2::lighthouse_vc::std_types::{InterchangeJsonStr, KeystoreJsonStr}; use eth2::{ @@ -15,7 +16,6 @@ use tree_hash::TreeHash; use types::*; pub const IGNORE_DUPLICATES_FLAG: &str = "ignore-duplicates"; -pub const STDIN_INPUTS_FLAG: &str = "stdin-inputs"; pub const COUNT_FLAG: &str = "count"; /// When the `ethereum/staking-deposit-cli` tool generates deposit data JSON, it adds a diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index d53e92deb30..d06fce1d094 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -105,15 +105,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0), ) - .arg( - Arg::new(STDIN_INPUTS_FLAG) - .action(ArgAction::SetTrue) - .hide(cfg!(windows)) - .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty.") - .display_order(0) - .help_heading(FLAG_HEADER), - ) .arg( Arg::new(DISABLE_DEPOSITS_FLAG) .long(DISABLE_DEPOSITS_FLAG) @@ -725,16 +716,16 @@ pub mod tests { assert_eq!(deposit.pubkey, validator_pubkey.clone().into()); if let Some(address) = config.eth1_withdrawal_address { assert_eq!( - deposit.withdrawal_credentials.as_bytes()[0], + deposit.withdrawal_credentials.as_slice()[0], spec.eth1_address_withdrawal_prefix_byte ); assert_eq!( - &deposit.withdrawal_credentials.as_bytes()[12..], - address.as_bytes() + &deposit.withdrawal_credentials.as_slice()[12..], + address.as_slice() ); } else { assert_eq!( - deposit.withdrawal_credentials.as_bytes()[0], + deposit.withdrawal_credentials.as_slice()[0], spec.bls_withdrawal_prefix_byte ); } diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index d2149d742c1..91bc2b0ef85 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -184,14 +184,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0), ) - .arg( - Arg::new(STDIN_INPUTS_FLAG) - .action(ArgAction::SetTrue) - .hide(cfg!(windows)) - .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty.") - .display_order(0), - ) .arg( Arg::new(BUILDER_BOOST_FACTOR_FLAG) .long(BUILDER_BOOST_FACTOR_FLAG) diff --git a/watch/src/database/watch_types.rs b/watch/src/database/watch_types.rs index 0b3ba2c304d..c2b67084c94 100644 --- a/watch/src/database/watch_types.rs +++ b/watch/src/database/watch_types.rs @@ -72,7 +72,7 @@ impl WatchHash { } pub fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() + self.0.as_slice() } pub fn from_bytes(src: &[u8]) -> Result {