diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 25d2cdab302..155d3182ef2 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -6,6 +6,7 @@ on: - unstable - stable - capella + - eip4844 tags: - v* @@ -40,6 +41,12 @@ jobs: run: | echo "VERSION=capella" >> $GITHUB_ENV echo "VERSION_SUFFIX=" >> $GITHUB_ENV + echo "CROSS_FEATURES=withdrawals-processing" >> $GITHUB_ENV + - name: Extract version (if eip4844) + if: github.event.ref == 'refs/heads/eip4844' + run: | + echo "VERSION=eip4844" >> $GITHUB_ENV + echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | @@ -48,6 +55,7 @@ jobs: outputs: VERSION: ${{ env.VERSION }} VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} + CROSS_FEATURES: ${{ env.CROSS_FEATURES }} build-docker-single-arch: name: build-docker-${{ matrix.binary }} runs-on: ubuntu-22.04 @@ -66,7 +74,7 @@ jobs: DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} - CROSS_FEATURES: withdrawals,withdrawals-processing + CROSS_FEATURES: ${{ needs.extract-version.outputs.CROSS_FEATURES }} steps: - uses: actions/checkout@v3 - name: Update Rust diff --git a/Cargo.lock b/Cargo.lock index 4c9a52ba3ab..b808170999f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -721,7 +721,7 @@ dependencies = [ [[package]] name = "c-kzg" version = "0.1.0" -source = "git+https://github.com/pawanjay176/c-kzg-4844?rev=48c048b12a7d29ae3e7bf09000e07870da4cb701#48c048b12a7d29ae3e7bf09000e07870da4cb701" +source = "git+https://github.com/pawanjay176/c-kzg-4844?rev=69bde8f4e0bbf0da30d92601b7db138bdd7e6a04#69bde8f4e0bbf0da30d92601b7db138bdd7e6a04" dependencies = [ "hex", "libc", diff --git a/Dockerfile b/Dockerfile index 7a0602a2213..a4779447be5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM rust:1.65.0-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES ENV FEATURES $FEATURES diff --git a/Makefile b/Makefile index 5aee24a22b0..15d09c5867f 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx CROSS_PROFILE ?= release # List of features to use when running EF tests. -EF_TEST_FEATURES ?= withdrawals,withdrawals-processing +EF_TEST_FEATURES ?= beacon_chain/withdrawals-processing # Cargo profile for regular builds. PROFILE ?= release diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 4cdd4c1df9e..e54acfffb48 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -13,7 +13,6 @@ node_test_rig = { path = "../testing/node_test_rig" } [features] write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing. -withdrawals = ["beacon_chain/withdrawals", "types/withdrawals", "store/withdrawals", "execution_layer/withdrawals"] withdrawals-processing = [ "beacon_chain/withdrawals-processing", "store/withdrawals-processing", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 87ed0e8e557..553bf992055 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -10,7 +10,6 @@ default = ["participation_metrics"] write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing. participation_metrics = [] # Exposes validator participation metrics to Prometheus. fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable -withdrawals = ["state_processing/withdrawals", "types/withdrawals", "store/withdrawals", "execution_layer/withdrawals"] withdrawals-processing = [ "state_processing/withdrawals-processing", "store/withdrawals-processing", diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index 24963a125d2..8e2dfdab82c 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -42,6 +42,7 @@ pub enum Error { // Boxed to avoid an infinite-size recursion issue. BeaconChain(Box), MissingBeaconState(Hash256), + MissingBlobs, FailedToTransitionState(StateAdvanceError), CannotAttestToFutureState { state_slot: Slot, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4c73da695b1..37d6df979b1 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -62,7 +62,7 @@ use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, Ca use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, - PayloadAttributes, PayloadAttributesV2, PayloadStatus, + PayloadAttributes, PayloadStatus, }; pub use fork_choice::CountUnrealized; use fork_choice::{ @@ -80,14 +80,12 @@ use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; -#[cfg(feature = "withdrawals")] -use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::{ common::get_attesting_indices_from_state, per_block_processing, per_block_processing::{ - errors::AttestationValidationError, verify_attestation_for_block_inclusion, - VerifySignatures, + errors::AttestationValidationError, get_expected_withdrawals, + verify_attestation_for_block_inclusion, VerifySignatures, }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, @@ -290,7 +288,6 @@ struct PartialBeaconBlock> { voluntary_exits: Vec, sync_aggregate: Option>, prepare_payload_handle: Option>, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: Vec, } @@ -2938,7 +2935,7 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - let (signed_block, blobs) = signed_block.deconstruct(); + let (signed_block, blobs) = signed_block.deconstruct(Some(block_root)); let block = signed_block.message(); let mut ops: Vec<_> = confirmed_state_roots .into_iter() @@ -2947,7 +2944,9 @@ impl BeaconChain { ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); - if let Some(blobs) = blobs { + if let Some(blobs) = blobs? { + //FIXME(sean) using this for debugging for now + info!(self.log, "Writing blobs to store"; "block_root" => ?block_root); ops.push(StoreOp::PutBlobs(block_root, blobs)); }; let txn_lock = self.store.hot_db.begin_rw_transaction(); @@ -4206,7 +4205,6 @@ impl BeaconChain { let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; - #[cfg(feature = "withdrawals")] let bls_to_execution_changes = self .op_pool .get_bls_to_execution_changes(&state, &self.spec); @@ -4369,7 +4367,6 @@ impl BeaconChain { voluntary_exits, sync_aggregate, prepare_payload_handle, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }) } @@ -4398,7 +4395,6 @@ impl BeaconChain { // this function. We can assume that the handle has already been consumed in order to // produce said `execution_payload`. prepare_payload_handle: _, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, } = partial_beacon_block; @@ -4499,7 +4495,6 @@ impl BeaconChain { execution_payload: payload .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.into(), }, }), @@ -4531,7 +4526,6 @@ impl BeaconChain { execution_payload: payload .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.into(), blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, @@ -4831,7 +4825,6 @@ impl BeaconChain { return Ok(()); } - #[cfg(feature = "withdrawals")] let withdrawals = match self.spec.fork_name_at_slot::(prepare_slot) { ForkName::Base | ForkName::Altair | ForkName::Merge => None, ForkName::Capella | ForkName::Eip4844 => { @@ -4866,10 +4859,7 @@ impl BeaconChain { execution_layer .get_suggested_fee_recipient(proposer as u64) .await, - #[cfg(feature = "withdrawals")] withdrawals, - #[cfg(not(feature = "withdrawals"))] - None, ); debug!( diff --git a/beacon_node/beacon_chain/src/blob_cache.rs b/beacon_node/beacon_chain/src/blob_cache.rs index 7f057ad9ed1..d03e62ab646 100644 --- a/beacon_node/beacon_chain/src/blob_cache.rs +++ b/beacon_node/beacon_chain/src/blob_cache.rs @@ -1,7 +1,6 @@ use lru::LruCache; use parking_lot::Mutex; -use tree_hash::TreeHash; -use types::{BlobsSidecar, EthSpec, ExecutionPayload, Hash256}; +use types::{BlobsSidecar, EthSpec, Hash256}; pub const DEFAULT_BLOB_CACHE_SIZE: usize = 10; diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index ea4ed6e14d9..06cebf9ea74 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -2,9 +2,8 @@ use slot_clock::SlotClock; use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use crate::{kzg_utils, BeaconChainError}; -use bls::PublicKey; use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions; -use types::consts::eip4844::BLS_MODULUS; +use types::signed_beacon_block::BlobReconstructionError; use types::{BeaconStateError, BlobsSidecar, Hash256, KzgCommitment, Slot, Transactions}; #[derive(Debug)] @@ -87,6 +86,14 @@ pub enum BlobError { /// We were unable to process this sync committee message due to an internal error. It's unclear if the /// sync committee message is valid. BeaconChainError(BeaconChainError), + /// No blobs for the specified block where we would expect blobs. + MissingBlobs, +} + +impl From for BlobError { + fn from(_: BlobReconstructionError) -> Self { + BlobError::MissingBlobs + } } impl From for BlobError { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 589c0656dbe..2b759e4ad96 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -87,13 +87,14 @@ use std::time::Duration; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; +use types::signed_beacon_block::BlobReconstructionError; use types::signed_block_and_blobs::BlockWrapper; +use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; -use types::{BlobsSidecar, ExecPayload}; pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, @@ -479,6 +480,12 @@ impl From for BlockError { } } +impl From for BlockError { + fn from(e: BlobReconstructionError) -> Self { + BlockError::BlobValidation(BlobError::from(e)) + } +} + /// Stores information about verifying a payload against an execution engine. pub struct PayloadVerificationOutcome { pub payload_verification_status: PayloadVerificationStatus, @@ -579,10 +586,8 @@ pub fn signature_verify_chain_segment( let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len()); for (block_root, block) in &chain_segment { - let mut consensus_context = ConsensusContext::new(block.slot()) - .set_current_block_root(*block_root) - //FIXME(sean) Consider removing this is we pass the blob wrapper everywhere - .set_blobs_sidecar(block.blobs_sidecar()); + let mut consensus_context = + ConsensusContext::new(block.slot()).set_current_block_root(*block_root); signature_verifier.include_all_signatures(block.block(), &mut consensus_context)?; @@ -907,7 +912,7 @@ impl GossipVerifiedBlock { // Validate the block's execution_payload (if any). validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?; - if let Some(blobs_sidecar) = block.blobs() { + if let Some(blobs_sidecar) = block.blobs(Some(block_root))? { let kzg_commitments = block .message() .body() @@ -921,7 +926,7 @@ impl GossipVerifiedBlock { .map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))? .ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?; validate_blob_for_gossip( - blobs_sidecar, + &blobs_sidecar, kzg_commitments, transactions, block.slot(), @@ -936,8 +941,7 @@ impl GossipVerifiedBlock { .set_current_block_root(block_root) .set_proposer_index(block.message().proposer_index()) .set_blobs_sidecar_validated(true) // Validated in `validate_blob_for_gossip` - .set_blobs_verified_vs_txs(true) // Validated in `validate_blob_for_gossip` - .set_blobs_sidecar(block.blobs_sidecar()); // TODO: potentially remove + .set_blobs_verified_vs_txs(true); Ok(Self { block, @@ -1009,9 +1013,8 @@ impl SignatureVerifiedBlock { let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - let mut consensus_context = ConsensusContext::new(block.slot()) - .set_current_block_root(block_root) - .set_blobs_sidecar(block.blobs_sidecar()); + let mut consensus_context = + ConsensusContext::new(block.slot()).set_current_block_root(block_root); signature_verifier.include_all_signatures(block.block(), &mut consensus_context)?; @@ -1138,12 +1141,8 @@ impl IntoExecutionPendingBlock for Arc &SignedBeaconBlock { @@ -1564,49 +1563,51 @@ impl ExecutionPendingBlock { * Verify kzg proofs and kzg commitments against transactions if required */ //FIXME(sean) should this be prior to applying attestions to fork choice above? done in parallel? - if let Some(ref sidecar) = consensus_context.blobs_sidecar() { - if let Some(data_availability_boundary) = chain.data_availability_boundary() { - if block_slot.epoch(T::EthSpec::slots_per_epoch()) > data_availability_boundary { - let kzg = chain.kzg.as_ref().ok_or(BlockError::BlobValidation( - BlobError::TrustedSetupNotInitialized, - ))?; - let transactions = block - .message() - .body() - .execution_payload_eip4844() - .map(|payload| payload.transactions()) - .map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))? - .ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?; - let kzg_commitments = - block.message().body().blob_kzg_commitments().map_err(|_| { - BlockError::BlobValidation(BlobError::KzgCommitmentMissing) - })?; - if !consensus_context.blobs_sidecar_validated() { - if !kzg_utils::validate_blobs_sidecar( - &kzg, - block.slot(), - block_root, - kzg_commitments, - sidecar, - ) - .map_err(|e| BlockError::BlobValidation(BlobError::KzgError(e)))? - { - return Err(BlockError::BlobValidation(BlobError::InvalidKzgProof)); - } - } - if !consensus_context.blobs_verified_vs_txs() - && verify_kzg_commitments_against_transactions::( - transactions, - kzg_commitments, - ) - //FIXME(sean) we should maybe just map this error so we have more info about the mismatch - .is_err() + if let Some(data_availability_boundary) = chain.data_availability_boundary() { + if block_slot.epoch(T::EthSpec::slots_per_epoch()) >= data_availability_boundary { + let sidecar = block + .blobs(Some(block_root))? + .ok_or(BlockError::BlobValidation(BlobError::MissingBlobs))?; + let kzg = chain.kzg.as_ref().ok_or(BlockError::BlobValidation( + BlobError::TrustedSetupNotInitialized, + ))?; + let transactions = block + .message() + .body() + .execution_payload_eip4844() + .map(|payload| payload.transactions()) + .map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))? + .ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?; + let kzg_commitments = block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_| BlockError::BlobValidation(BlobError::KzgCommitmentMissing))?; + if !consensus_context.blobs_sidecar_validated() { + if !kzg_utils::validate_blobs_sidecar( + &kzg, + block.slot(), + block_root, + kzg_commitments, + &sidecar, + ) + .map_err(|e| BlockError::BlobValidation(BlobError::KzgError(e)))? { - return Err(BlockError::BlobValidation( - BlobError::TransactionCommitmentMismatch, - )); + return Err(BlockError::BlobValidation(BlobError::InvalidKzgProof)); } } + if !consensus_context.blobs_verified_vs_txs() + && verify_kzg_commitments_against_transactions::( + transactions, + kzg_commitments, + ) + //FIXME(sean) we should maybe just map this error so we have more info about the mismatch + .is_err() + { + return Err(BlockError::BlobValidation( + BlobError::TransactionCommitmentMismatch, + )); + } } } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index f7b69a0d783..1216d5d4d84 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -69,7 +69,7 @@ impl EarlyAttesterCache { }, }; - let (block, blobs) = block.deconstruct(); + let (block, blobs) = block.deconstruct(Some(beacon_block_root)); let item = CacheItem { epoch, committee_lengths, @@ -77,7 +77,7 @@ impl EarlyAttesterCache { source, target, block, - blobs, + blobs: blobs.map_err(|_| Error::MissingBlobs)?, proto_block, }; diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 1982bdbf022..d52df4853df 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -17,11 +17,9 @@ use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; use slot_clock::SlotClock; -#[cfg(feature = "withdrawals")] -use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_block_processing::{ - compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, - partially_verify_execution_payload, + compute_timestamp_at_slot, get_expected_withdrawals, is_execution_enabled, + is_merge_transition_complete, partially_verify_execution_payload, }; use std::sync::Arc; use tokio::task::JoinHandle; @@ -382,7 +380,6 @@ pub fn get_execution_payload< let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = state.latest_execution_payload_header()?.block_hash(); - #[cfg(feature = "withdrawals")] let withdrawals = match state { &BeaconState::Capella(_) | &BeaconState::Eip4844(_) => { Some(get_expected_withdrawals(state, spec)?.into()) @@ -407,7 +404,6 @@ pub fn get_execution_payload< proposer_index, latest_execution_payload_header_block_hash, builder_params, - #[cfg(feature = "withdrawals")] withdrawals, ) .await @@ -442,7 +438,7 @@ pub async fn prepare_execution_payload( proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, - #[cfg(feature = "withdrawals")] withdrawals: Option>, + withdrawals: Option>, ) -> Result, BlockProductionError> where T: BeaconChainTypes, @@ -504,15 +500,8 @@ where let suggested_fee_recipient = execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = PayloadAttributes::new( - timestamp, - random, - suggested_fee_recipient, - #[cfg(feature = "withdrawals")] - withdrawals, - #[cfg(not(feature = "withdrawals"))] - None, - ); + let payload_attributes = + PayloadAttributes::new(timestamp, random, suggested_fee_recipient, withdrawals); // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index f73223fa540..3d21a14c830 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -186,7 +186,7 @@ async fn state_advance_timer( head_slot, }) => debug!( log, - "Refused to advance head state"; + "Refused to advance head state. Chain may be syncing or lagging too far behind"; "head_slot" => head_slot, "current_slot" => current_slot, ), diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index b3bdc54d02a..47c1e0341b6 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -5,7 +5,6 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -withdrawals = ["state_processing/withdrawals", "types/withdrawals", "eth2/withdrawals"] withdrawals-processing = ["state_processing/withdrawals-processing", "eth2/withdrawals-processing"] [dependencies] diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 424ca30d137..80cdeacb34f 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -165,7 +165,6 @@ pub struct ExecutionBlockWithTransactions { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, pub transactions: Vec, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub withdrawals: Vec, } @@ -215,7 +214,6 @@ impl TryFrom> for ExecutionBlockWithTransactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) .collect::, _>>()?, - #[cfg(feature = "withdrawals")] withdrawals: Vec::from(block.withdrawals) .into_iter() .map(|withdrawal| withdrawal.into()) @@ -243,7 +241,6 @@ impl TryFrom> for ExecutionBlockWithTransactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) .collect::, _>>()?, - #[cfg(feature = "withdrawals")] withdrawals: Vec::from(block.withdrawals) .into_iter() .map(|withdrawal| withdrawal.into()) diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index d0741716b35..5cde2ef0b54 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -779,7 +779,7 @@ impl HttpJsonRpc { ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let payload_v2: JsonExecutionPayloadV2 = self + let response: JsonGetPayloadResponse = self .rpc_request( ENGINE_GET_PAYLOAD_V2, params, @@ -787,7 +787,7 @@ impl HttpJsonRpc { ) .await?; - JsonExecutionPayload::V2(payload_v2).try_into_execution_payload(fork_name) + JsonExecutionPayload::V2(response.execution_payload).try_into_execution_payload(fork_name) } pub async fn get_payload_v3( @@ -889,11 +889,11 @@ impl HttpJsonRpc { pub async fn supported_apis_v1(&self) -> Result { Ok(SupportedApis { new_payload_v1: true, - new_payload_v2: cfg!(all(feature = "withdrawals", not(test))), + new_payload_v2: cfg!(feature = "withdrawals-processing"), forkchoice_updated_v1: true, - forkchoice_updated_v2: cfg!(all(feature = "withdrawals", not(test))), + forkchoice_updated_v2: cfg!(feature = "withdrawals-processing"), get_payload_v1: true, - get_payload_v2: cfg!(all(feature = "withdrawals", not(test))), + get_payload_v2: cfg!(feature = "withdrawals-processing"), exchange_transition_configuration_v1: true, }) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index e83c48f6af6..38f51de4e0a 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -164,7 +164,6 @@ impl JsonExecutionPayload { base_fee_per_gas: v2.base_fee_per_gas, block_hash: v2.block_hash, transactions: v2.transactions, - #[cfg(feature = "withdrawals")] withdrawals: v2 .withdrawals .map(|v| { @@ -192,7 +191,6 @@ impl JsonExecutionPayload { excess_data_gas: v2.excess_data_gas.ok_or_else(|| Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?, block_hash: v2.block_hash, transactions: v2.transactions, - #[cfg(feature = "withdrawals")] withdrawals: v2 .withdrawals .map(|v| { @@ -280,7 +278,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { excess_data_gas: None, block_hash: capella.block_hash, transactions: capella.transactions, - #[cfg(feature = "withdrawals")] withdrawals: Some( Vec::from(capella.withdrawals) .into_iter() @@ -288,8 +285,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { .collect::>() .into(), ), - #[cfg(not(feature = "withdrawals"))] - withdrawals: None, }), ExecutionPayload::Eip4844(eip4844) => Ok(JsonExecutionPayloadV2 { parent_hash: eip4844.parent_hash, @@ -307,7 +302,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { excess_data_gas: Some(eip4844.excess_data_gas), block_hash: eip4844.block_hash, transactions: eip4844.transactions, - #[cfg(feature = "withdrawals")] withdrawals: Some( Vec::from(eip4844.withdrawals) .into_iter() @@ -315,13 +309,20 @@ impl TryFrom> for JsonExecutionPayloadV2 { .collect::>() .into(), ), - #[cfg(not(feature = "withdrawals"))] - withdrawals: None, }), } } } +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase")] +pub struct JsonGetPayloadResponse { + pub execution_payload: JsonExecutionPayloadV2, + // uncomment this when geth fixes its serialization + //#[serde(with = "eth2_serde_utils::u256_hex_be")] + //pub block_value: Uint256, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonWithdrawal { diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 432cc85cd4e..e0b7c1dc3f9 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; -use types::{Address, ExecutionBlockHash, ForkName, Hash256}; +use types::{ExecutionBlockHash, ForkName}; /// The number of payload IDs that will be stored for each `Engine`. /// diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 90a41a9c85d..2361223da1c 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1690,7 +1690,6 @@ impl ExecutionLayer { }) } ExecutionBlockWithTransactions::Capella(capella_block) => { - #[cfg(feature = "withdrawals")] let withdrawals = VariableList::new( capella_block .withdrawals @@ -1699,7 +1698,6 @@ impl ExecutionLayer { .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; - ExecutionPayload::Capella(ExecutionPayloadCapella { parent_hash: capella_block.parent_hash, fee_recipient: capella_block.fee_recipient, @@ -1715,12 +1713,10 @@ impl ExecutionLayer { base_fee_per_gas: capella_block.base_fee_per_gas, block_hash: capella_block.block_hash, transactions, - #[cfg(feature = "withdrawals")] withdrawals, }) } ExecutionBlockWithTransactions::Eip4844(eip4844_block) => { - #[cfg(feature = "withdrawals")] let withdrawals = VariableList::new( eip4844_block .withdrawals @@ -1729,7 +1725,6 @@ impl ExecutionLayer { .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; - ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { parent_hash: eip4844_block.parent_hash, fee_recipient: eip4844_block.fee_recipient, @@ -1746,7 +1741,6 @@ impl ExecutionLayer { excess_data_gas: eip4844_block.excess_data_gas, block_hash: eip4844_block.block_hash, transactions, - #[cfg(feature = "withdrawals")] withdrawals, }) } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index f0f84491258..e552b7ca7ab 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -103,10 +103,7 @@ impl MockExecutionLayer { prev_randao, Address::repeat_byte(42), // FIXME: think about how to handle different forks / withdrawals here.. - #[cfg(feature = "withdrawals")] Some(vec![]), - #[cfg(not(feature = "withdrawals"))] - None, ); // Insert a proposer to ensure the fork choice updated command works. diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6764784a54e..e5336ef6ad4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1125,9 +1125,6 @@ pub fn serve( .map(|()| warp::reply()) }, ); - /* - * beacon/blinded_blocks - */ // POST beacon/blinded_blocks let post_beacon_blinded_blocks = eth_v1 diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index f1c9b5331fd..4ddef78d8c0 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -9,10 +9,10 @@ use slot_clock::SlotClock; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; +use types::signed_block_and_blobs::BlockWrapper; use types::{ - AbstractExecPayload, BlindedPayload, BlobsSidecar, EthSpec, ExecPayload, ExecutionBlockHash, - FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, - SignedBeaconBlockEip4844, + AbstractExecPayload, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload, + Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, }; use warp::Rejection; @@ -32,31 +32,38 @@ pub async fn publish_block( // Send the block, regardless of whether or not it is valid. The API // specification is very clear that this is the desired behaviour. - let message = if matches!(block.as_ref(), &SignedBeaconBlock::Eip4844(_)) { - if let Some(sidecar) = chain.blob_cache.pop(&block_root) { - PubsubMessage::BeaconBlockAndBlobsSidecars(SignedBeaconBlockAndBlobsSidecar { - beacon_block: block.clone(), - blobs_sidecar: Arc::new(sidecar), - }) + let wrapped_block: BlockWrapper = + if matches!(block.as_ref(), &SignedBeaconBlock::Eip4844(_)) { + if let Some(sidecar) = chain.blob_cache.pop(&block_root) { + let block_and_blobs = SignedBeaconBlockAndBlobsSidecar { + beacon_block: block, + blobs_sidecar: Arc::new(sidecar), + }; + crate::publish_pubsub_message( + network_tx, + PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blobs.clone()), + )?; + block_and_blobs.into() + } else { + //FIXME(sean): This should probably return a specific no-blob-cached error code, beacon API coordination required + return Err(warp_utils::reject::broadcast_without_import(format!( + "no blob cached for block" + ))); + } } else { - //FIXME(sean): This should probably return a specific no-blob-cached error code, beacon API coordination required - return Err(warp_utils::reject::broadcast_without_import(format!( - "no blob cached for block" - ))); - } - } else { - PubsubMessage::BeaconBlock(block.clone()) - }; - crate::publish_pubsub_message(network_tx, message)?; + crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + block.into() + }; // Determine the delay after the start of the slot, register it with metrics. + let block = wrapped_block.block(); let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); match chain .process_block( block_root, - block.clone(), + wrapped_block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes, ) diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 17a3624afed..04d527d531c 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -5,7 +5,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, }; use eth2::types::DepositContractData; -use execution_layer::{ForkChoiceState, PayloadAttributes}; +use execution_layer::{ForkchoiceState, PayloadAttributes}; use parking_lot::Mutex; use slot_clock::SlotClock; use state_processing::state_advance::complete_state_advance; @@ -55,7 +55,7 @@ struct ForkChoiceUpdates { #[derive(Debug, Clone)] struct ForkChoiceUpdateMetadata { received_at: Duration, - state: ForkChoiceState, + state: ForkchoiceState, payload_attributes: Option, } @@ -86,7 +86,7 @@ impl ForkChoiceUpdates { .payload_attributes .as_ref() .map_or(false, |payload_attributes| { - payload_attributes.timestamp == proposal_timestamp + payload_attributes.timestamp() == proposal_timestamp }) }) .cloned() @@ -342,7 +342,7 @@ pub async fn proposer_boost_re_org_test( .lock() .set_forkchoice_updated_hook(Box::new(move |state, payload_attributes| { let received_at = chain_inner.slot_clock.now_duration().unwrap(); - let state = ForkChoiceState::from(state); + let state = ForkchoiceState::from(state); let payload_attributes = payload_attributes.map(Into::into); let update = ForkChoiceUpdateMetadata { received_at, @@ -521,16 +521,20 @@ pub async fn proposer_boost_re_org_test( if !misprediction { assert_eq!( - lookahead, payload_lookahead, + lookahead, + payload_lookahead, "lookahead={lookahead:?}, timestamp={}, prev_randao={:?}", - payload_attribs.timestamp, payload_attribs.prev_randao, + payload_attribs.timestamp(), + payload_attribs.prev_randao(), ); } else { // On a misprediction we issue the first fcU 500ms before creating a block! assert_eq!( - lookahead, fork_choice_lookahead, + lookahead, + fork_choice_lookahead, "timestamp={}, prev_randao={:?}", - payload_attribs.timestamp, payload_attribs.prev_randao, + payload_attribs.timestamp(), + payload_attribs.prev_randao(), ); } } diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 8644dcbf1ad..86733cf63ad 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1372,9 +1372,9 @@ impl ApiTester { pub async fn test_get_config_spec(self) -> Self { let result = self .client - .get_config_spec::() + .get_config_spec::() .await - .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .map(|res| ConfigAndPreset::Capella(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec, None); diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index f85ec980df0..64401308cd5 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -473,6 +473,11 @@ impl PeerManager { RPCError::ErrorResponse(code, _) => match code { RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError, RPCResponseErrorCode::ResourceUnavailable => { + // Don't ban on this because we want to retry with a block by root request. + if matches!(protocol, Protocol::BlobsByRoot) { + return; + } + // NOTE: This error only makes sense for the `BlocksByRange` and `BlocksByRoot` // protocols. // diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index a70aac34961..eb5cc7f27fa 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -73,7 +73,7 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), - RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(), + RPCResponse::BlockAndBlobsByRoot(res) => res.as_ssz_bytes(), RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => @@ -298,8 +298,8 @@ impl Decoder for SSZSnappyOutboundCodec { .rpc_response_limits::(&self.fork_context); if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { return Err(RPCError::InvalidData(format!( - "RPC response length is out of bounds, length {}", - length + "RPC response length is out of bounds, length {}, max {}, min {}", + length, ssz_limits.max, ssz_limits.min ))); } // Calculate worst case compression length for given uncompressed length @@ -439,6 +439,10 @@ fn context_bytes( SignedBeaconBlock::Base { .. } => Some(fork_context.genesis_context_bytes()), }; } + if let RPCResponse::BlobsByRange(_) | RPCResponse::BlockAndBlobsByRoot(_) = rpc_variant + { + return fork_context.to_context_bytes(ForkName::Eip4844); + } } } None @@ -531,9 +535,6 @@ fn handle_v2_request( Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, }))), - Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange( - BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, - ))), // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. Protocol::MetaData => { @@ -585,7 +586,7 @@ fn handle_v1_response( )))), _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, - "Invalid forkname for blobsbyrange".to_string(), + "Invalid fork name for blobs by range".to_string(), )), } } @@ -597,12 +598,12 @@ fn handle_v1_response( ) })?; match fork_name { - ForkName::Eip4844 => Ok(Some(RPCResponse::BlobsByRoot(Arc::new( + ForkName::Eip4844 => Ok(Some(RPCResponse::BlockAndBlobsByRoot( SignedBeaconBlockAndBlobsSidecar::from_ssz_bytes(decoded_buffer)?, - )))), + ))), _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, - "Invalid forkname for blobsbyroot".to_string(), + "Invalid fork name for block and blobs by root".to_string(), )), } } @@ -716,9 +717,13 @@ fn context_bytes_to_fork_name( .from_context_bytes(context_bytes) .cloned() .ok_or_else(|| { + let encoded = hex::encode(context_bytes); RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, - "Context bytes does not correspond to a valid fork".to_string(), + format!( + "Context bytes {} do not correspond to a valid fork", + encoded + ), ) }) } @@ -826,12 +831,25 @@ mod tests { } } + fn blbrange_request() -> BlobsByRangeRequest { + BlobsByRangeRequest { + start_slot: 0, + count: 10, + } + } + fn bbroot_request() -> BlocksByRootRequest { BlocksByRootRequest { block_roots: VariableList::from(vec![Hash256::zero()]), } } + fn blbroot_request() -> BlobsByRootRequest { + BlobsByRootRequest { + block_roots: VariableList::from(vec![Hash256::zero()]), + } + } + fn ping_message() -> Ping { Ping { data: 1 } } @@ -1454,6 +1472,8 @@ mod tests { OutboundRequest::Goodbye(GoodbyeReason::Fault), OutboundRequest::BlocksByRange(bbrange_request()), OutboundRequest::BlocksByRoot(bbroot_request()), + OutboundRequest::BlobsByRange(blbrange_request()), + OutboundRequest::BlobsByRoot(blbroot_request()), OutboundRequest::MetaData(PhantomData::), ]; for req in requests.iter() { diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 53e6b675990..02e24d8e1d1 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -281,7 +281,7 @@ pub enum RPCResponse { LightClientBootstrap(LightClientBootstrap), /// A response to a get BLOBS_BY_ROOT request. - BlobsByRoot(Arc>), + BlockAndBlobsByRoot(SignedBeaconBlockAndBlobsSidecar), /// A PONG response to a PING request. Pong(Ping), @@ -372,7 +372,7 @@ impl RPCCodedResponse { RPCResponse::BlocksByRange(_) => true, RPCResponse::BlocksByRoot(_) => true, RPCResponse::BlobsByRange(_) => true, - RPCResponse::BlobsByRoot(_) => true, + RPCResponse::BlockAndBlobsByRoot(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, RPCResponse::LightClientBootstrap(_) => false, @@ -409,7 +409,7 @@ impl RPCResponse { RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, - RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, + RPCResponse::BlockAndBlobsByRoot(_) => Protocol::BlobsByRoot, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -449,7 +449,7 @@ impl std::fmt::Display for RPCResponse { RPCResponse::BlobsByRange(blob) => { write!(f, "BlobsByRange: Blob slot: {}", blob.beacon_block_slot) } - RPCResponse::BlobsByRoot(blob) => { + RPCResponse::BlockAndBlobsByRoot(blob) => { write!( f, "BlobsByRoot: Blob slot: {}", diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 549a0e72073..1164688cda8 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -22,7 +22,7 @@ use tokio_util::{ }; use types::BlobsSidecar; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Blob, EmptyBlock, EthSpec, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, }; @@ -107,6 +107,12 @@ lazy_static! { .as_ssz_bytes() .len(); + pub static ref BLOBS_SIDECAR_MIN: usize = BlobsSidecar::::empty().as_ssz_bytes().len(); + pub static ref BLOBS_SIDECAR_MAX: usize = BlobsSidecar::::max_size(); + + //FIXME(sean) these are underestimates + pub static ref SIGNED_BLOCK_AND_BLOBS_MIN: usize = *BLOBS_SIDECAR_MIN + *SIGNED_BEACON_BLOCK_BASE_MIN; + pub static ref SIGNED_BLOCK_AND_BLOBS_MAX: usize =*BLOBS_SIDECAR_MAX + *SIGNED_BEACON_BLOCK_EIP4844_MAX; } /// The maximum bytes that can be sent across the RPC pre-merge. @@ -261,6 +267,8 @@ impl UpgradeInfo for RPCProtocol { ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlobsByRoot, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlobsByRange, Version::V1, Encoding::SSZSnappy), ]; if self.enable_light_client_server { supported_protocols.push(ProtocolId::new( @@ -356,11 +364,10 @@ impl ProtocolId { Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), - - //FIXME(sean) add blob sizes - Protocol::BlobsByRange => rpc_block_limits_by_fork(fork_context.current_fork()), - Protocol::BlobsByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), - + Protocol::BlobsByRange => RpcLimits::new(*BLOBS_SIDECAR_MIN, *BLOBS_SIDECAR_MAX), + Protocol::BlobsByRoot => { + RpcLimits::new(*SIGNED_BLOCK_AND_BLOBS_MIN, *SIGNED_BLOCK_AND_BLOBS_MAX) + } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -379,13 +386,16 @@ impl ProtocolId { /// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the /// beginning of the stream, else returns `false`. pub fn has_context_bytes(&self) -> bool { - if self.version == Version::V2 { - match self.message_name { + match self.version { + Version::V2 => match self.message_name { Protocol::BlocksByRange | Protocol::BlocksByRoot => return true, _ => return false, - } + }, + Version::V1 => match self.message_name { + Protocol::BlobsByRange | Protocol::BlobsByRoot => return true, + _ => return false, + }, } - false } } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index b6a03302008..c9c239d8cf4 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -83,7 +83,7 @@ pub enum Response { /// A response to a LightClientUpdate request. LightClientBootstrap(LightClientBootstrap), /// A response to a get BLOBS_BY_ROOT request. - BlobsByRoot(Option>>), + BlobsByRoot(Option>), } impl std::convert::From> for RPCCodedResponse { @@ -98,7 +98,7 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), }, Response::BlobsByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRoot(b)), + Some(b) => RPCCodedResponse::Success(RPCResponse::BlockAndBlobsByRoot(b)), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRoot), }, Response::BlobsByRange(r) => match r { diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 549fb220957..d59bc4bfd6c 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -75,6 +75,8 @@ pub enum NetworkEvent { id: AppReqId, /// The peer to which this request was sent. peer_id: PeerId, + /// The error of the failed request. + error: RPCError, }, RequestReceived { /// The peer that sent the request. @@ -1177,9 +1179,9 @@ impl Network { &error, ConnectionDirection::Outgoing, ); - // inform failures of requests comming outside the behaviour + // inform failures of requests coming outside the behaviour if let RequestId::Application(id) = id { - Some(NetworkEvent::RPCFailed { peer_id, id }) + Some(NetworkEvent::RPCFailed { peer_id, id, error }) } else { None } @@ -1313,7 +1315,7 @@ impl Network { RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } - RPCResponse::BlobsByRoot(resp) => { + RPCResponse::BlockAndBlobsByRoot(resp) => { self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) } // Should never be reached diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 7b9e6a7b47f..7951a072438 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -3,20 +3,16 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::TopicHash; use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage}; -use serde_derive::{Deserialize, Serialize}; use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; -use tree_hash_derive::TreeHash; use types::{ - Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockAndBlobsSidecar, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockEip4844, SignedBeaconBlockMerge, SignedBlsToExecutionChange, + Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockAndBlobsSidecar, SignedBeaconBlockBase, + SignedBeaconBlockCapella, SignedBeaconBlockMerge, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 887ecc49776..37d6edef82f 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -1699,7 +1699,7 @@ impl BeaconProcessor { message_id, peer_id, peer_client, - BlockWrapper::Block { block }, + block.into(), work_reprocessing_tx, duplicate_cache, seen_timestamp, @@ -1721,7 +1721,7 @@ impl BeaconProcessor { message_id, peer_id, peer_client, - BlockWrapper::BlockAndBlob { block_sidecar_pair }, + block_sidecar_pair.into(), work_reprocessing_tx, duplicate_cache, seen_timestamp, diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index e0c93474512..6f433005558 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -23,7 +23,6 @@ use slog::{crit, debug, error, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::pin::Pin; -use std::sync::Arc; use std::task::Context; use std::time::Duration; use task_executor::TaskExecutor; @@ -31,7 +30,7 @@ use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; use types::signed_block_and_blobs::BlockWrapper; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId}; +use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SubnetId}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index eac8175d511..2c7c94079e9 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -15,15 +15,13 @@ use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerI use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; -use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::signed_block_and_blobs::BlockWrapper; use types::{ - Attestation, AttesterSlashing, BlobsSidecar, EthSpec, Hash256, IndexedAttestation, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, + Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 87e8a3fc4ea..69bd7da11c6 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -12,7 +12,6 @@ use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error}; use slot_clock::SlotClock; -use ssz_types::VariableList; use std::sync::Arc; use task_executor::TaskExecutor; use types::light_client_bootstrap::LightClientBootstrap; @@ -231,10 +230,10 @@ impl Worker { Ok((Some(block), Some(blobs))) => { self.send_response( peer_id, - Response::BlobsByRoot(Some(Arc::new(SignedBeaconBlockAndBlobsSidecar { + Response::BlobsByRoot(Some(SignedBeaconBlockAndBlobsSidecar { beacon_block: block, blobs_sidecar: blobs, - }))), + })), request_id, ); send_block_count += 1; @@ -254,6 +253,14 @@ impl Worker { "peer" => %peer_id, "request_root" => ?root ); + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "No blob for requested block".into(), + request_id, + ); + send_response = false; + break; } Ok((None, Some(_))) => { debug!( @@ -425,7 +432,17 @@ impl Worker { }; // Pick out the required blocks, ignoring skip-slots. - let mut last_block_root = None; + let mut last_block_root = req + .start_slot + .checked_sub(1) + .map(|prev_slot| { + self.chain + .block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev) + }) + .transpose() + .ok() + .flatten() + .flatten(); let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) // map skip slots to None @@ -503,6 +520,15 @@ impl Worker { "block_root" => ?root, "error" => ?e ); + + // send the stream terminator + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "Failed fetching blocks".into(), + request_id, + ); + send_response = false; break; } } @@ -554,7 +580,7 @@ impl Worker { /// Handle a `BlobsByRange` request from the peer. pub fn handle_blobs_by_range_request( self, - executor: TaskExecutor, + _executor: TaskExecutor, send_on_drop: SendOnDrop, peer_id: PeerId, request_id: PeerRequestId, @@ -594,7 +620,17 @@ impl Worker { }; // Pick out the required blocks, ignoring skip-slots. - let mut last_block_root = None; + let mut last_block_root = req + .start_slot + .checked_sub(1) + .map(|prev_slot| { + self.chain + .block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev) + }) + .transpose() + .ok() + .flatten() + .flatten(); let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) // map skip slots to None @@ -619,7 +655,7 @@ impl Worker { let block_roots = block_roots.into_iter().flatten().collect::>(); let mut blobs_sent = 0; - let mut send_response = true; + let send_response = true; for root in block_roots { match self.chain.store.get_blobs(&root) { @@ -661,7 +697,7 @@ impl Worker { self.log, "BlobsByRange Response processed"; "peer" => %peer_id, - "msg" => "Failed to return all requested blocks", + "msg" => "Failed to return all requested blobs", "start_slot" => req.start_slot, "current_slot" => current_slot, "requested" => req.count, diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index dafc00bddb9..284f96da7ca 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -17,10 +17,7 @@ use slog::{debug, error, info, warn}; use std::sync::Arc; use tokio::sync::mpsc; use types::signed_block_and_blobs::BlockWrapper; -use types::{ - Epoch, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, - SignedBeaconBlockAndBlobsSidecarDecode, -}; +use types::{Epoch, Hash256, SignedBeaconBlock}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. #[derive(Clone, Debug, PartialEq)] @@ -193,13 +190,8 @@ impl Worker { let unwrapped = downloaded_blocks .into_iter() - .map(|block| match block { - BlockWrapper::Block { block } => block, - //FIXME(sean) handle blobs in backfill - BlockWrapper::BlockAndBlob { - block_sidecar_pair: _, - } => todo!(), - }) + //FIXME(sean) handle blobs in backfill + .map(|block| block.block_cloned()) .collect(); match self.process_backfill_blocks(unwrapped) { diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 5675cb0adf6..31f2092049f 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -11,6 +11,7 @@ use crate::error; use crate::service::{NetworkMessage, RequestId}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::prelude::*; +use lighthouse_network::rpc::RPCError; use lighthouse_network::{ MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, }; @@ -58,6 +59,7 @@ pub enum RouterMessage { RPCFailed { peer_id: PeerId, request_id: RequestId, + error: RPCError, }, /// A gossip message has been received. The fields are: message id, the peer that sent us this /// message, the message itself and a bool which indicates if the message should be processed @@ -140,8 +142,9 @@ impl Router { RouterMessage::RPCFailed { peer_id, request_id, + error, } => { - self.processor.on_rpc_error(peer_id, request_id); + self.processor.on_rpc_error(peer_id, request_id, error); } RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { self.handle_gossip(id, peer_id, gossip, should_process); diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 97c2b226438..d0879babacb 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -103,12 +103,13 @@ impl Processor { /// An error occurred during an RPC request. The state is maintained by the sync manager, so /// this function notifies the sync manager of the error. - pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId) { + pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { // Check if the failed RPC belongs to sync if let RequestId::Sync(request_id) = request_id { self.send_to_sync(SyncMessage::RpcError { peer_id, request_id, + error, }); } } @@ -222,10 +223,10 @@ impl Processor { SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => { unreachable!("Block lookups do not request BBRange requests") } - id @ (SyncId::BackFillSync { .. } - | SyncId::RangeSync { .. } - | SyncId::BackFillSidecarPair { .. } - | SyncId::RangeSidecarPair { .. }) => id, + id @ (SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::BackFillBlobs { .. } + | SyncId::RangeBlobs { .. }) => id, }, RequestId::Router => unreachable!("All BBRange requests belong to sync"), }; @@ -257,7 +258,7 @@ impl Processor { ); if let RequestId::Sync(id) = request_id { - self.send_to_sync(SyncMessage::RpcGlob { + self.send_to_sync(SyncMessage::RpcBlobs { peer_id, request_id: id, blob_sidecar, @@ -281,10 +282,10 @@ impl Processor { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, - SyncId::BackFillSync { .. } - | SyncId::RangeSync { .. } - | SyncId::RangeSidecarPair { .. } - | SyncId::BackFillSidecarPair { .. } => { + SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::RangeBlobs { .. } + | SyncId::BackFillBlobs { .. } => { unreachable!("Batch syncing do not request BBRoot requests") } }, @@ -309,15 +310,15 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - block_and_blobs: Option>>, + block_and_blobs: Option>, ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, - SyncId::BackFillSync { .. } - | SyncId::RangeSync { .. } - | SyncId::RangeSidecarPair { .. } - | SyncId::BackFillSidecarPair { .. } => { + SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::RangeBlobs { .. } + | SyncId::BackFillBlobs { .. } => { unreachable!("Batch syncing does not request BBRoot requests") } }, @@ -329,7 +330,7 @@ impl Processor { "Received BlockAndBlobssByRoot Response"; "peer" => %peer_id, ); - self.send_to_sync(SyncMessage::RpcBlockAndGlob { + self.send_to_sync(SyncMessage::RpcBlockAndBlobs { peer_id, request_id, block_and_blobs, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4568ed1a229..201494a3450 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -499,10 +499,11 @@ impl NetworkService { response, }); } - NetworkEvent::RPCFailed { id, peer_id } => { + NetworkEvent::RPCFailed { id, peer_id, error } => { self.send_to_router(RouterMessage::RPCFailed { peer_id, request_id: id, + error, }); } NetworkEvent::StatusPeer(peer_id) => { diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 76850a54546..ad1bfb1d42d 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -33,7 +33,7 @@ use types::{Epoch, EthSpec}; /// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which /// case the responder will fill the response up to the max request size, assuming they have the /// bandwidth to do so. -pub const BACKFILL_EPOCHS_PER_BATCH: u64 = 2; +pub const BACKFILL_EPOCHS_PER_BATCH: u64 = 1; /// The maximum number of batches to queue before requesting more. const BACKFILL_BATCH_BUFFER_SIZE: u8 = 20; @@ -536,7 +536,7 @@ impl BackFillSync { let process_id = ChainSegmentProcessId::BackSyncBatchId(batch_id); self.current_processing_batch = Some(batch_id); - let work_event = BeaconWorkEvent::chain_segment(process_id, blocks.into_wrapped_blocks()); + let work_event = BeaconWorkEvent::chain_segment(process_id, blocks); if let Err(e) = network.processor_channel().try_send(work_event) { crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch", "error" => %e, "batch" => self.processing_target); diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 3b0600775f8..84b49e25f11 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -4,14 +4,12 @@ use std::time::Duration; use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; -use futures::StreamExt; -use itertools::{Either, Itertools}; +use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; use slog::{debug, error, trace, warn, Logger}; use smallvec::SmallVec; -use std::sync::Arc; -use store::{Hash256, SignedBeaconBlock}; +use store::Hash256; use types::signed_block_and_blobs::BlockWrapper; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; @@ -40,6 +38,13 @@ pub type RootBlockTuple = (Hash256, BlockWrapper); const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; +/// This is used to resolve the scenario where we request a parent from before the data availability +/// boundary and need to retry with a request for only the block. +pub enum ForceBlockRequest { + True, + False, +} + pub(crate) struct BlockLookups { /// Parent chain lookups being downloaded. parent_lookups: SmallVec<[ParentLookup; 3]>, @@ -165,7 +170,7 @@ impl BlockLookups { } let parent_lookup = ParentLookup::new(block_root, block, peer_id); - self.request_parent(parent_lookup, cx); + self.request_parent(parent_lookup, cx, ForceBlockRequest::False); } /* Lookup responses */ @@ -291,7 +296,7 @@ impl BlockLookups { cx.report_peer(peer_id, PeerAction::LowToleranceError, e); // We try again if possible. - self.request_parent(parent_lookup, cx); + self.request_parent(parent_lookup, cx, ForceBlockRequest::False); } VerifyError::PreviousFailure { parent_root } => { debug!( @@ -367,7 +372,7 @@ impl BlockLookups { { let parent_lookup = self.parent_lookups.remove(pos); trace!(self.log, "Parent lookup's peer disconnected"; &parent_lookup); - self.request_parent(parent_lookup, cx); + self.request_parent(parent_lookup, cx, ForceBlockRequest::False); } } @@ -377,6 +382,7 @@ impl BlockLookups { id: Id, peer_id: PeerId, cx: &mut SyncNetworkContext, + error: RPCError, ) { if let Some(pos) = self .parent_lookups @@ -386,7 +392,19 @@ impl BlockLookups { let mut parent_lookup = self.parent_lookups.remove(pos); parent_lookup.download_failed(); trace!(self.log, "Parent lookup request failed"; &parent_lookup); - self.request_parent(parent_lookup, cx); + + // `ResourceUnavailable` indicates we requested a parent block from prior to the 4844 fork epoch. + let force_block_request = if let RPCError::ErrorResponse( + RPCResponseErrorCode::ResourceUnavailable, + _, + ) = error + { + debug!(self.log, "RPC parent lookup for block and blobs failed. Retrying the request for just a block"; "peer_id" => %peer_id); + ForceBlockRequest::True + } else { + ForceBlockRequest::False + }; + self.request_parent(parent_lookup, cx, force_block_request); } else { return debug!(self.log, "RPC failure for a parent lookup request that was not found"; "peer_id" => %peer_id); }; @@ -542,7 +560,7 @@ impl BlockLookups { // need to keep looking for parents // add the block back to the queue and continue the search parent_lookup.add_block(block); - self.request_parent(parent_lookup, cx); + self.request_parent(parent_lookup, cx, ForceBlockRequest::False); } BlockProcessResult::Ok | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { @@ -604,7 +622,7 @@ impl BlockLookups { // Try again if possible parent_lookup.processing_failed(); - self.request_parent(parent_lookup, cx); + self.request_parent(parent_lookup, cx, ForceBlockRequest::False); } BlockProcessResult::Ignored => { // Beacon processor signalled to ignore the block processing result. @@ -697,8 +715,9 @@ impl BlockLookups { &mut self, mut parent_lookup: ParentLookup, cx: &mut SyncNetworkContext, + force_block_request: ForceBlockRequest, ) { - match parent_lookup.request_parent(cx) { + match parent_lookup.request_parent(cx, force_block_request) { Err(e) => { debug!(self.log, "Failed to request parent"; &parent_lookup, "error" => e.as_static()); match e { diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 0e9036ceee9..2aabbb563d1 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,11 +1,11 @@ use super::RootBlockTuple; use beacon_chain::BeaconChainTypes; use lighthouse_network::PeerId; -use std::sync::Arc; -use store::{Hash256, SignedBeaconBlock}; +use store::Hash256; use strum::IntoStaticStr; use types::signed_block_and_blobs::BlockWrapper; +use crate::sync::block_lookups::ForceBlockRequest; use crate::sync::{ manager::{Id, SLOT_IMPORT_TOLERANCE}, network_context::SyncNetworkContext, @@ -72,14 +72,18 @@ impl ParentLookup { } /// Attempts to request the next unknown parent. If the request fails, it should be removed. - pub fn request_parent(&mut self, cx: &mut SyncNetworkContext) -> Result<(), RequestError> { + pub fn request_parent( + &mut self, + cx: &mut SyncNetworkContext, + force_block_request: ForceBlockRequest, + ) -> Result<(), RequestError> { // check to make sure this request hasn't failed if self.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { return Err(RequestError::ChainTooLong); } let (peer_id, request) = self.current_parent_request.request_block()?; - match cx.parent_lookup_request(peer_id, request) { + match cx.parent_lookup_request(peer_id, request, force_block_request) { Ok(request_id) => { self.current_parent_request_id = Some(request_id); Ok(()) diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 0e84fb0bbc6..05df18a0dac 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -4,8 +4,7 @@ use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use rand::seq::IteratorRandom; use ssz_types::VariableList; use std::collections::HashSet; -use std::sync::Arc; -use store::{EthSpec, Hash256, SignedBeaconBlock}; +use store::{EthSpec, Hash256}; use strum::IntoStaticStr; use types::signed_block_and_blobs::BlockWrapper; diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 21b6d6658d3..004d0479a41 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -10,11 +10,11 @@ use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use lighthouse_network::{NetworkGlobals, Request}; use slog::{Drain, Level}; -use slot_clock::{SlotClock, SystemTimeSlotClock}; +use slot_clock::SystemTimeSlotClock; use store::MemoryStore; use tokio::sync::mpsc; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::{EthSpec, MainnetEthSpec, MinimalEthSpec as E, Slot}; +use types::MinimalEthSpec as E; type T = Witness, E, MemoryStore, MemoryStore>; diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs new file mode 100644 index 00000000000..46ac5bd0fbd --- /dev/null +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -0,0 +1,69 @@ +use std::{collections::VecDeque, sync::Arc}; + +use types::{signed_block_and_blobs::BlockWrapper, BlobsSidecar, EthSpec, SignedBeaconBlock}; + +#[derive(Debug, Default)] +pub struct BlocksAndBlobsRequestInfo { + /// Blocks we have received awaiting for their corresponding sidecar. + accumulated_blocks: VecDeque>>, + /// Sidecars we have received awaiting for their corresponding block. + accumulated_sidecars: VecDeque>>, + /// Whether the individual RPC request for blocks is finished or not. + is_blocks_stream_terminated: bool, + /// Whether the individual RPC request for sidecars is finished or not. + is_sidecars_stream_terminated: bool, +} + +impl BlocksAndBlobsRequestInfo { + pub fn add_block_response(&mut self, maybe_block: Option>>) { + match maybe_block { + Some(block) => self.accumulated_blocks.push_back(block), + None => self.is_blocks_stream_terminated = true, + } + } + + pub fn add_sidecar_response(&mut self, maybe_sidecar: Option>>) { + match maybe_sidecar { + Some(sidecar) => self.accumulated_sidecars.push_back(sidecar), + None => self.is_sidecars_stream_terminated = true, + } + } + + pub fn into_responses(self) -> Result>, &'static str> { + let BlocksAndBlobsRequestInfo { + accumulated_blocks, + mut accumulated_sidecars, + .. + } = self; + + // ASSUMPTION: There can't be more more blobs than blocks. i.e. sending any blob (empty + // included) for a skipped slot is not permitted. + let pairs = accumulated_blocks + .into_iter() + .map(|beacon_block| { + if accumulated_sidecars + .front() + .map(|sidecar| sidecar.beacon_block_slot == beacon_block.slot()) + .unwrap_or(false) + { + let blobs_sidecar = + accumulated_sidecars.pop_front().ok_or("missing sidecar")?; + Ok(BlockWrapper::new_with_blobs(beacon_block, blobs_sidecar)) + } else { + Ok(beacon_block.into()) + } + }) + .collect::, _>>(); + + // if accumulated sidecars is not empty, throw an error. + if !accumulated_sidecars.is_empty() { + return Err("Received more sidecars than blocks"); + } + + pairs + } + + pub fn is_finished(&self) -> bool { + self.is_blocks_stream_terminated && self.is_sidecars_stream_terminated + } +} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c55e90cf46b..5da203e0e77 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -35,20 +35,21 @@ use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; use super::block_lookups::BlockLookups; -use super::network_context::SyncNetworkContext; +use super::network_context::{BlockOrBlobs, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use crate::sync::range_sync::ExpectedBatchTy; +use crate::sync::range_sync::ByRangeRequestType; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; use futures::StreamExt; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; +use lighthouse_network::rpc::RPCError; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use std::boxed::Box; use std::ops::Sub; use std::sync::Arc; @@ -78,13 +79,13 @@ pub enum RequestId { /// Request searching for a block's parent. The id is the chain ParentLookup { id: Id }, /// Request was from the backfill sync algorithm. - BackFillSync { id: Id }, - /// Backfill request for blocks and sidecars. - BackFillSidecarPair { id: Id }, + BackFillBlocks { id: Id }, + /// Backfill request for blob sidecars. + BackFillBlobs { id: Id }, /// The request was from a chain in the range sync algorithm. - RangeSync { id: Id }, - /// The request was from a chain in range, asking for ranges of blocks and sidecars. - RangeSidecarPair { id: Id }, + RangeBlocks { id: Id }, + /// The request was from a chain in range, asking for ranges blob sidecars. + RangeBlobs { id: Id }, } #[derive(Debug)] @@ -102,7 +103,7 @@ pub enum SyncMessage { }, /// A blob has been received from the RPC. - RpcGlob { + RpcBlobs { request_id: RequestId, peer_id: PeerId, blob_sidecar: Option>>, @@ -110,10 +111,10 @@ pub enum SyncMessage { }, /// A block and blobs have been received from the RPC. - RpcBlockAndGlob { + RpcBlockAndBlobs { request_id: RequestId, peer_id: PeerId, - block_and_blobs: Option>>, + block_and_blobs: Option>, seen_timestamp: Duration, }, @@ -131,6 +132,7 @@ pub enum SyncMessage { RpcError { peer_id: PeerId, request_id: RequestId, + error: RPCError, }, /// A batch has been processed by the block processor thread. @@ -282,7 +284,7 @@ impl SyncManager { } /// Handles RPC errors related to requests that were emitted from the sync manager. - fn inject_error(&mut self, peer_id: PeerId, request_id: RequestId) { + fn inject_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { trace!(self.log, "Sync manager received a failed RPC"); match request_id { RequestId::SingleBlock { id } => { @@ -291,12 +293,12 @@ impl SyncManager { } RequestId::ParentLookup { id } => { self.block_lookups - .parent_lookup_failed(id, peer_id, &mut self.network); + .parent_lookup_failed(id, peer_id, &mut self.network, error); } - RequestId::BackFillSync { id } => { + RequestId::BackFillBlocks { id } => { if let Some(batch_id) = self .network - .backfill_request_failed(id, ExpectedBatchTy::OnlyBlock) + .backfill_request_failed(id, ByRangeRequestType::Blocks) { match self .backfill_sync @@ -308,10 +310,10 @@ impl SyncManager { } } - RequestId::BackFillSidecarPair { id } => { + RequestId::BackFillBlobs { id } => { if let Some(batch_id) = self .network - .backfill_request_failed(id, ExpectedBatchTy::OnlyBlockBlobs) + .backfill_request_failed(id, ByRangeRequestType::BlocksAndBlobs) { match self .backfill_sync @@ -322,10 +324,10 @@ impl SyncManager { } } } - RequestId::RangeSync { id } => { + RequestId::RangeBlocks { id } => { if let Some((chain_id, batch_id)) = self .network - .range_sync_request_failed(id, ExpectedBatchTy::OnlyBlock) + .range_sync_request_failed(id, ByRangeRequestType::Blocks) { self.range_sync.inject_error( &mut self.network, @@ -337,10 +339,10 @@ impl SyncManager { self.update_sync_state() } } - RequestId::RangeSidecarPair { id } => { + RequestId::RangeBlobs { id } => { if let Some((chain_id, batch_id)) = self .network - .range_sync_request_failed(id, ExpectedBatchTy::OnlyBlockBlobs) + .range_sync_request_failed(id, ByRangeRequestType::BlocksAndBlobs) { self.range_sync.inject_error( &mut self.network, @@ -603,7 +605,8 @@ impl SyncManager { SyncMessage::RpcError { peer_id, request_id, - } => self.inject_error(peer_id, request_id), + error, + } => self.inject_error(peer_id, request_id, error), SyncMessage::BlockProcessed { process_type, result, @@ -645,18 +648,18 @@ impl SyncManager { .block_lookups .parent_chain_processed(chain_hash, result, &mut self.network), }, - SyncMessage::RpcGlob { + SyncMessage::RpcBlobs { request_id, peer_id, blob_sidecar, seen_timestamp, - } => self.rpc_sidecar_received(request_id, peer_id, blob_sidecar, seen_timestamp), - SyncMessage::RpcBlockAndGlob { + } => self.rpc_blobs_received(request_id, peer_id, blob_sidecar, seen_timestamp), + SyncMessage::RpcBlockAndBlobs { request_id, peer_id, block_and_blobs, seen_timestamp, - } => self.rpc_block_sidecar_pair_received( + } => self.rpc_block_block_and_blobs_received( request_id, peer_id, block_and_blobs, @@ -731,29 +734,29 @@ impl SyncManager { RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response( id, peer_id, - beacon_block.map(|block| BlockWrapper::Block { block }), + beacon_block.map(|block| block.into()), seen_timestamp, &mut self.network, ), RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response( id, peer_id, - beacon_block.map(|block| BlockWrapper::Block { block }), + beacon_block.map(|block| block.into()), seen_timestamp, &mut self.network, ), - RequestId::BackFillSync { id } => { - if let Some((batch_id, block)) = self.network.backfill_sync_block_response( - id, - beacon_block, - ExpectedBatchTy::OnlyBlock, - ) { + RequestId::BackFillBlocks { id } => { + let is_stream_terminator = beacon_block.is_none(); + if let Some(batch_id) = self + .network + .backfill_sync_only_blocks_response(id, is_stream_terminator) + { match self.backfill_sync.on_block_response( &mut self.network, batch_id, &peer_id, id, - block, + beacon_block.map(|block| block.into()), ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::Successful) => {} @@ -765,156 +768,183 @@ impl SyncManager { } } } - RequestId::RangeSync { id } => { - if let Some((chain_id, batch_id, block)) = self.network.range_sync_block_response( - id, - beacon_block, - ExpectedBatchTy::OnlyBlock, - ) { + RequestId::RangeBlocks { id } => { + let is_stream_terminator = beacon_block.is_none(); + if let Some((chain_id, batch_id)) = self + .network + .range_sync_block_response(id, is_stream_terminator) + { self.range_sync.blocks_by_range_response( &mut self.network, peer_id, chain_id, batch_id, id, - block, + beacon_block.map(|block| block.into()), ); self.update_sync_state(); } } - RequestId::BackFillSidecarPair { id } => { - if let Some((batch_id, block)) = self.network.backfill_sync_block_response( - id, - beacon_block, - ExpectedBatchTy::OnlyBlockBlobs, - ) { - match self.backfill_sync.on_block_response( - &mut self.network, - batch_id, - &peer_id, - id, - block, - ) { - Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), - Ok(ProcessResult::Successful) => {} - Err(_error) => { - // The backfill sync has failed, errors are reported - // within. - self.update_sync_state(); - } + RequestId::BackFillBlobs { id } => { + self.blobs_backfill_response(id, peer_id, beacon_block.into()) + } + RequestId::RangeBlobs { id } => { + self.blobs_range_response(id, peer_id, beacon_block.into()) + } + } + } + + /// Handles receiving a response for a range sync request that should have both blocks and + /// blobs. + fn blobs_range_response( + &mut self, + id: Id, + peer_id: PeerId, + block_or_blob: BlockOrBlobs, + ) { + if let Some((chain_id, batch_id, block_responses)) = self + .network + .range_sync_block_and_blob_response(id, block_or_blob) + { + match block_responses { + Ok(blocks) => { + for block in blocks + .into_iter() + .map(|block| Some(block)) + // chain the stream terminator + .chain(vec![None]) + { + self.range_sync.blocks_by_range_response( + &mut self.network, + peer_id, + chain_id, + batch_id, + id, + block, + ); + self.update_sync_state(); } } + Err(e) => { + // inform range that the request needs to be treated as failed + // With time we will want to downgrade this log + warn!( + self.log, "Blocks and blobs request for range received invalid data"; + "peer_id" => %peer_id, "batch_id" => batch_id, "error" => e + ); + // TODO: penalize the peer for being a bad boy + let id = RequestId::RangeBlobs { id }; + self.inject_error(peer_id, id, RPCError::InvalidData(e.into())) + } } - RequestId::RangeSidecarPair { id } => { - if let Some((chain_id, batch_id, block)) = self.network.range_sync_block_response( - id, - beacon_block, - ExpectedBatchTy::OnlyBlockBlobs, - ) { - self.range_sync.blocks_by_range_response( - &mut self.network, - peer_id, - chain_id, - batch_id, - id, - block, + } + } + + /// Handles receiving a response for a Backfill sync request that should have both blocks and + /// blobs. + fn blobs_backfill_response( + &mut self, + id: Id, + peer_id: PeerId, + block_or_blob: BlockOrBlobs, + ) { + if let Some((batch_id, block_responses)) = self + .network + .backfill_sync_block_and_blob_response(id, block_or_blob) + { + match block_responses { + Ok(blocks) => { + for block in blocks + .into_iter() + .map(|block| Some(block)) + // chain the stream terminator + .chain(vec![None]) + { + match self.backfill_sync.on_block_response( + &mut self.network, + batch_id, + &peer_id, + id, + block, + ) { + Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), + Ok(ProcessResult::Successful) => {} + Err(_error) => { + // The backfill sync has failed, errors are reported + // within. + self.update_sync_state(); + } + } + } + } + Err(e) => { + // inform backfill that the request needs to be treated as failed + // With time we will want to downgrade this log + warn!( + self.log, "Blocks and blobs request for backfill received invalid data"; + "peer_id" => %peer_id, "batch_id" => batch_id, "error" => e ); - self.update_sync_state(); + // TODO: penalize the peer for being a bad boy + let id = RequestId::BackFillBlobs { id }; + self.inject_error(peer_id, id, RPCError::InvalidData(e.into())) } } } } - fn rpc_sidecar_received( + fn rpc_blobs_received( &mut self, request_id: RequestId, peer_id: PeerId, maybe_sidecar: Option::EthSpec>>>, - seen_timestamp: Duration, + _seen_timestamp: Duration, ) { match request_id { - RequestId::SingleBlock { id } | RequestId::ParentLookup { id } => { + RequestId::SingleBlock { .. } | RequestId::ParentLookup { .. } => { unreachable!("There is no such thing as a singular 'by root' glob request that is not accompanied by the block") } - RequestId::BackFillSync { .. } => { + RequestId::BackFillBlocks { .. } => { unreachable!("An only blocks request does not receive sidecars") } - RequestId::BackFillSidecarPair { id } => { - if let Some((batch_id, block)) = self - .network - .backfill_sync_sidecar_response(id, maybe_sidecar) - { - match self.backfill_sync.on_block_response( - &mut self.network, - batch_id, - &peer_id, - id, - block, - ) { - Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), - Ok(ProcessResult::Successful) => {} - Err(_error) => { - // The backfill sync has failed, errors are reported - // within. - self.update_sync_state(); - } - } - } + RequestId::BackFillBlobs { id } => { + self.blobs_backfill_response(id, peer_id, maybe_sidecar.into()) } - RequestId::RangeSync { .. } => { - unreachable!("And only blocks range request does not receive sidecars") + RequestId::RangeBlocks { .. } => { + unreachable!("Only-blocks range requests don't receive sidecars") } - RequestId::RangeSidecarPair { id } => { - if let Some((chain_id, batch_id, block)) = - self.network.range_sync_sidecar_response(id, maybe_sidecar) - { - self.range_sync.blocks_by_range_response( - &mut self.network, - peer_id, - chain_id, - batch_id, - id, - block, - ); - self.update_sync_state(); - } + RequestId::RangeBlobs { id } => { + self.blobs_range_response(id, peer_id, maybe_sidecar.into()) } } } - fn rpc_block_sidecar_pair_received( + fn rpc_block_block_and_blobs_received( &mut self, request_id: RequestId, peer_id: PeerId, - block_sidecar_pair: Option>>, + block_sidecar_pair: Option>, seen_timestamp: Duration, ) { match request_id { RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response( id, peer_id, - block_sidecar_pair.map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { - // TODO: why is this in an arc - block_sidecar_pair: (*block_sidecar_pair).clone(), - }), + block_sidecar_pair.map(|block_sidecar_pair| block_sidecar_pair.into()), seen_timestamp, &mut self.network, ), RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response( id, peer_id, - block_sidecar_pair.map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { - // TODO: why is this in an arc - block_sidecar_pair: (*block_sidecar_pair).clone(), - }), + block_sidecar_pair.map(|block_sidecar_pair| block_sidecar_pair.into()), seen_timestamp, &mut self.network, ), - RequestId::BackFillSync { .. } - | RequestId::BackFillSidecarPair { .. } - | RequestId::RangeSync { .. } - | RequestId::RangeSidecarPair { .. } => unreachable!( + RequestId::BackFillBlocks { .. } + | RequestId::BackFillBlobs { .. } + | RequestId::RangeBlocks { .. } + | RequestId::RangeBlobs { .. } => unreachable!( "since range requests are not block-glob coupled, this should never be reachable" ), } diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index dc18a5c981e..7b244bceceb 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -3,6 +3,7 @@ //! Stores the various syncing methods for the beacon chain. mod backfill_sync; mod block_lookups; +mod block_sidecar_coupling; pub mod manager; mod network_context; mod peer_sync_info; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 94801aa8711..c54b3b1a983 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1,70 +1,24 @@ //! Provides network functionality for the Syncing thread. This fundamentally wraps a network //! channel and stores a global RPC ID to perform requests. +use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use super::manager::{Id, RequestId as SyncRequestId}; -use super::range_sync::{BatchId, ChainId, ExpectedBatchTy}; +use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; use crate::beacon_processor::WorkEvent; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; +use crate::sync::block_lookups::ForceBlockRequest; use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; use fnv::FnvHashMap; use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; use slog::{debug, trace, warn}; -use slot_clock::SlotClock; use std::collections::hash_map::Entry; -use std::collections::VecDeque; use std::sync::Arc; use tokio::sync::mpsc; use types::signed_block_and_blobs::BlockWrapper; -use types::{ - BlobsSidecar, ChainSpec, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, -}; - -#[derive(Debug, Default)] -struct BlockBlobRequestInfo { - /// Blocks we have received awaiting for their corresponding sidecar. - accumulated_blocks: VecDeque>>, - /// Sidecars we have received awaiting for their corresponding block. - accumulated_sidecars: VecDeque>>, - /// Whether the individual RPC request for blocks is finished or not. - is_blocks_rpc_finished: bool, - /// Whether the individual RPC request for sidecars is finished or not. - is_sidecar_rpc_finished: bool, -} - -impl BlockBlobRequestInfo { - pub fn add_block_response(&mut self, maybe_block: Option>>) { - match maybe_block { - Some(block) => self.accumulated_blocks.push_back(block), - None => self.is_blocks_rpc_finished = true, - } - } - - pub fn add_sidecar_response(&mut self, maybe_sidecar: Option>>) { - match maybe_sidecar { - Some(sidecar) => self.accumulated_sidecars.push_back(sidecar), - None => self.is_sidecar_rpc_finished = true, - } - } - - pub fn pop_response(&mut self) -> Option> { - if !self.accumulated_blocks.is_empty() && !self.accumulated_blocks.is_empty() { - let beacon_block = self.accumulated_blocks.pop_front().expect("non empty"); - let blobs_sidecar = self.accumulated_sidecars.pop_front().expect("non empty"); - return Some(SignedBeaconBlockAndBlobsSidecar { - beacon_block, - blobs_sidecar, - }); - } - None - } - - pub fn is_finished(&self) -> bool { - self.is_blocks_rpc_finished && self.is_sidecar_rpc_finished - } -} +use types::{BlobsSidecar, EthSpec, SignedBeaconBlock}; /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. pub struct SyncNetworkContext { @@ -84,11 +38,12 @@ pub struct SyncNetworkContext { backfill_requests: FnvHashMap, /// BlocksByRange requests paired with BlobsByRange requests made by the range. - range_sidecar_pair_requests: - FnvHashMap)>, + range_blocks_and_blobs_requests: + FnvHashMap)>, /// BlocksByRange requests paired with BlobsByRange requests made by the backfill sync. - backfill_sidecar_pair_requests: FnvHashMap)>, + backfill_blocks_and_blobs_requests: + FnvHashMap)>, /// Whether the ee is online. If it's not, we don't allow access to the /// `beacon_processor_send`. @@ -103,6 +58,24 @@ pub struct SyncNetworkContext { log: slog::Logger, } +/// Small enumeration to make dealing with block and blob requests easier. +pub enum BlockOrBlobs { + Block(Option>>), + Blobs(Option>>), +} + +impl From>>> for BlockOrBlobs { + fn from(block: Option>>) -> Self { + BlockOrBlobs::Block(block) + } +} + +impl From>>> for BlockOrBlobs { + fn from(blob: Option>>) -> Self { + BlockOrBlobs::Blobs(blob) + } +} + impl SyncNetworkContext { pub fn new( network_send: mpsc::UnboundedSender>, @@ -117,8 +90,8 @@ impl SyncNetworkContext { request_id: 1, range_requests: Default::default(), backfill_requests: Default::default(), - range_sidecar_pair_requests: Default::default(), - backfill_sidecar_pair_requests: Default::default(), + range_blocks_and_blobs_requests: Default::default(), + backfill_blocks_and_blobs_requests: Default::default(), execution_engine_state: EngineState::Online, // always assume `Online` at the start beacon_processor_send, chain, @@ -168,23 +141,23 @@ impl SyncNetworkContext { pub fn blocks_by_range_request( &mut self, peer_id: PeerId, - batch_type: ExpectedBatchTy, + batch_type: ByRangeRequestType, request: BlocksByRangeRequest, chain_id: ChainId, batch_id: BatchId, ) -> Result { match batch_type { - ExpectedBatchTy::OnlyBlock => { + ByRangeRequestType::Blocks => { trace!( self.log, - "Sending BlocksByRange Request"; + "Sending BlocksByRange request"; "method" => "BlocksByRange", "count" => request.count, "peer" => %peer_id, ); let request = Request::BlocksByRange(request); let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::RangeSync { id }); + let request_id = RequestId::Sync(SyncRequestId::RangeBlocks { id }); self.send_network_msg(NetworkMessage::SendRequest { peer_id, request, @@ -193,10 +166,10 @@ impl SyncNetworkContext { self.range_requests.insert(id, (chain_id, batch_id)); Ok(id) } - ExpectedBatchTy::OnlyBlockBlobs => { + ByRangeRequestType::BlocksAndBlobs => { debug!( self.log, - "Sending BlockBlock by range request"; + "Sending BlocksByRange and BlobsByRange requests"; "method" => "Mixed by range request", "count" => request.count, "peer" => %peer_id, @@ -204,7 +177,7 @@ impl SyncNetworkContext { // create the shared request id. This is fine since the rpc handles substream ids. let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::RangeSidecarPair { id }); + let request_id = RequestId::Sync(SyncRequestId::RangeBlobs { id }); // Create the blob request based on the blob request. let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { @@ -224,8 +197,8 @@ impl SyncNetworkContext { request: blobs_request, request_id, })?; - let block_blob_info = BlockBlobRequestInfo::default(); - self.range_sidecar_pair_requests + let block_blob_info = BlocksAndBlobsRequestInfo::default(); + self.range_blocks_and_blobs_requests .insert(id, (chain_id, batch_id, block_blob_info)); Ok(id) } @@ -236,22 +209,22 @@ impl SyncNetworkContext { pub fn backfill_blocks_by_range_request( &mut self, peer_id: PeerId, - batch_type: ExpectedBatchTy, + batch_type: ByRangeRequestType, request: BlocksByRangeRequest, batch_id: BatchId, ) -> Result { match batch_type { - ExpectedBatchTy::OnlyBlock => { + ByRangeRequestType::Blocks => { trace!( self.log, - "Sending backfill BlocksByRange Request"; + "Sending backfill BlocksByRange request"; "method" => "BlocksByRange", "count" => request.count, "peer" => %peer_id, ); let request = Request::BlocksByRange(request); let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::BackFillSync { id }); + let request_id = RequestId::Sync(SyncRequestId::BackFillBlocks { id }); self.send_network_msg(NetworkMessage::SendRequest { peer_id, request, @@ -260,10 +233,10 @@ impl SyncNetworkContext { self.backfill_requests.insert(id, batch_id); Ok(id) } - ExpectedBatchTy::OnlyBlockBlobs => { + ByRangeRequestType::BlocksAndBlobs => { debug!( self.log, - "Sending BlockBlock by range request"; + "Sending backfill BlocksByRange and BlobsByRange requests"; "method" => "Mixed by range request", "count" => request.count, "peer" => %peer_id, @@ -271,7 +244,7 @@ impl SyncNetworkContext { // create the shared request id. This is fine since the rpc handles substream ids. let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::RangeSidecarPair { id }); + let request_id = RequestId::Sync(SyncRequestId::BackFillBlobs { id }); // Create the blob request based on the blob request. let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { @@ -291,78 +264,51 @@ impl SyncNetworkContext { request: blobs_request, request_id, })?; - let block_blob_info = BlockBlobRequestInfo::default(); - self.backfill_sidecar_pair_requests + let block_blob_info = BlocksAndBlobsRequestInfo::default(); + self.backfill_blocks_and_blobs_requests .insert(id, (batch_id, block_blob_info)); Ok(id) } } } - /// Received a blocks by range response. + /// Response for a request that is only for blocks. pub fn range_sync_block_response( &mut self, request_id: Id, - maybe_block: Option>>, - batch_type: ExpectedBatchTy, - ) -> Option<(ChainId, BatchId, Option>)> { - match batch_type { - ExpectedBatchTy::OnlyBlockBlobs => { - match self.range_sidecar_pair_requests.entry(request_id) { - Entry::Occupied(mut entry) => { - let (chain_id, batch_id, info) = entry.get_mut(); - let chain_id = chain_id.clone(); - let batch_id = batch_id.clone(); - info.add_block_response(maybe_block); - let maybe_block = info.pop_response().map(|block_sidecar_pair| { - BlockWrapper::BlockAndBlob { block_sidecar_pair } - }); - if info.is_finished() { - entry.remove(); - } - Some((chain_id, batch_id, maybe_block)) - } - Entry::Vacant(_) => None, - } - } - ExpectedBatchTy::OnlyBlock => { - // if the request is just for blocks then it can be removed on a stream termination - match maybe_block { - Some(block) => { - self.range_requests - .get(&request_id) - .cloned() - .map(|(chain_id, batch_id)| { - (chain_id, batch_id, Some(BlockWrapper::Block { block })) - }) - } - None => self - .range_requests - .remove(&request_id) - .map(|(chain_id, batch_id)| (chain_id, batch_id, None)), - } - } + is_stream_terminator: bool, + ) -> Option<(ChainId, BatchId)> { + if is_stream_terminator { + self.range_requests.remove(&request_id) + } else { + self.range_requests.get(&request_id).copied() } } - pub fn range_sync_sidecar_response( + /// Received a blocks by range response for a request that couples blocks and blobs. + pub fn range_sync_block_and_blob_response( &mut self, request_id: Id, - maybe_sidecar: Option>>, - ) -> Option<(ChainId, BatchId, Option>)> { - match self.range_sidecar_pair_requests.entry(request_id) { + block_or_blob: BlockOrBlobs, + ) -> Option<( + ChainId, + BatchId, + Result>, &'static str>, + )> { + match self.range_blocks_and_blobs_requests.entry(request_id) { Entry::Occupied(mut entry) => { - let (chain_id, batch_id, info) = entry.get_mut(); - let chain_id = chain_id.clone(); - let batch_id = batch_id.clone(); - info.add_sidecar_response(maybe_sidecar); - let maybe_block = info - .pop_response() - .map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { block_sidecar_pair }); + let (_, _, info) = entry.get_mut(); + match block_or_blob { + BlockOrBlobs::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlobs::Blobs(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + } if info.is_finished() { - entry.remove(); + // If the request is finished, dequeue everything + let (chain_id, batch_id, info) = entry.remove(); + Some((chain_id, batch_id, info.into_responses())) + } else { + None } - Some((chain_id, batch_id, maybe_block)) } Entry::Vacant(_) => None, } @@ -371,90 +317,66 @@ impl SyncNetworkContext { pub fn range_sync_request_failed( &mut self, request_id: Id, - batch_type: ExpectedBatchTy, + batch_type: ByRangeRequestType, ) -> Option<(ChainId, BatchId)> { match batch_type { - ExpectedBatchTy::OnlyBlockBlobs => self - .range_sidecar_pair_requests + ByRangeRequestType::BlocksAndBlobs => self + .range_blocks_and_blobs_requests .remove(&request_id) .map(|(chain_id, batch_id, _info)| (chain_id, batch_id)), - ExpectedBatchTy::OnlyBlock => self.range_requests.remove(&request_id), + ByRangeRequestType::Blocks => self.range_requests.remove(&request_id), } } pub fn backfill_request_failed( &mut self, request_id: Id, - batch_type: ExpectedBatchTy, + batch_type: ByRangeRequestType, ) -> Option { match batch_type { - ExpectedBatchTy::OnlyBlockBlobs => self - .backfill_sidecar_pair_requests + ByRangeRequestType::BlocksAndBlobs => self + .backfill_blocks_and_blobs_requests .remove(&request_id) .map(|(batch_id, _info)| batch_id), - ExpectedBatchTy::OnlyBlock => self.backfill_requests.remove(&request_id), + ByRangeRequestType::Blocks => self.backfill_requests.remove(&request_id), } } - /// Received a blocks by range response. - pub fn backfill_sync_block_response( + /// Response for a request that is only for blocks. + pub fn backfill_sync_only_blocks_response( &mut self, request_id: Id, - maybe_block: Option>>, - batch_type: ExpectedBatchTy, - ) -> Option<(BatchId, Option>)> { - match batch_type { - ExpectedBatchTy::OnlyBlockBlobs => { - match self.backfill_sidecar_pair_requests.entry(request_id) { - Entry::Occupied(mut entry) => { - let (batch_id, info) = entry.get_mut(); - let batch_id = batch_id.clone(); - info.add_block_response(maybe_block); - let maybe_block = info.pop_response().map(|block_sidecar_pair| { - BlockWrapper::BlockAndBlob { block_sidecar_pair } - }); - if info.is_finished() { - entry.remove(); - } - Some((batch_id, maybe_block)) - } - Entry::Vacant(_) => None, - } - } - ExpectedBatchTy::OnlyBlock => { - // if the request is just for blocks then it can be removed on a stream termination - match maybe_block { - Some(block) => self - .backfill_requests - .get(&request_id) - .cloned() - .map(|batch_id| (batch_id, Some(BlockWrapper::Block { block }))), - None => self - .backfill_requests - .remove(&request_id) - .map(|batch_id| (batch_id, None)), - } - } + is_stream_terminator: bool, + ) -> Option { + if is_stream_terminator { + self.backfill_requests + .remove(&request_id) + .map(|batch_id| batch_id) + } else { + self.backfill_requests.get(&request_id).copied() } } - pub fn backfill_sync_sidecar_response( + /// Received a blocks by range response for a request that couples blocks and blobs. + pub fn backfill_sync_block_and_blob_response( &mut self, request_id: Id, - maybe_sidecar: Option>>, - ) -> Option<(BatchId, Option>)> { - match self.backfill_sidecar_pair_requests.entry(request_id) { + block_or_blob: BlockOrBlobs, + ) -> Option<(BatchId, Result>, &'static str>)> { + match self.backfill_blocks_and_blobs_requests.entry(request_id) { Entry::Occupied(mut entry) => { - let (batch_id, info) = entry.get_mut(); - let batch_id = batch_id.clone(); - info.add_sidecar_response(maybe_sidecar); - let maybe_block = info - .pop_response() - .map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { block_sidecar_pair }); + let (_, info) = entry.get_mut(); + match block_or_blob { + BlockOrBlobs::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlobs::Blobs(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + } if info.is_finished() { - entry.remove(); + // If the request is finished, dequeue everything + let (batch_id, info) = entry.remove(); + Some((batch_id, info.into_responses())) + } else { + None } - Some((batch_id, maybe_block)) } Entry::Vacant(_) => None, } @@ -504,11 +426,13 @@ impl SyncNetworkContext { &mut self, peer_id: PeerId, request: BlocksByRootRequest, + force_block_request: ForceBlockRequest, ) -> Result { let request = if self .chain .is_data_availability_check_required() .map_err(|_| "Unable to read slot clock")? + && matches!(force_block_request, ForceBlockRequest::False) { trace!( self.log, @@ -608,33 +532,29 @@ impl SyncNetworkContext { id } - pub fn batch_type(&self, epoch: types::Epoch) -> ExpectedBatchTy { - // Keep tests only for blocks. + /// Check whether a batch for this epoch (and only this epoch) should request just blocks or + /// blocks and blobs. + pub fn batch_type(&self, epoch: types::Epoch) -> ByRangeRequestType { + const _: () = assert!( + super::backfill_sync::BACKFILL_EPOCHS_PER_BATCH == 1 + && super::range_sync::EPOCHS_PER_BATCH == 1, + "To deal with alignment with 4844 boundaries, batches need to be of just one epoch" + ); #[cfg(test)] { - return ExpectedBatchTy::OnlyBlock; + // Keep tests only for blocks. + return ByRangeRequestType::Blocks; } #[cfg(not(test))] { - use super::range_sync::EPOCHS_PER_BATCH; - assert_eq!( - EPOCHS_PER_BATCH, 1, - "If this is not one, everything will fail horribly" - ); - - // Here we need access to the beacon chain, check the fork boundary, the current epoch, the - // blob period to serve and check with that if the batch is a blob batch or not. - // NOTE: This would carelessly assume batch sizes are always 1 epoch, to avoid needing to - // align with the batch boundary. - if let Some(data_availability_boundary) = self.chain.data_availability_boundary() { if epoch >= data_availability_boundary { - ExpectedBatchTy::OnlyBlockBlobs + ByRangeRequestType::BlocksAndBlobs } else { - ExpectedBatchTy::OnlyBlock + ByRangeRequestType::Blocks } } else { - ExpectedBatchTy::OnlyBlock + ByRangeRequestType::Blocks } } } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index b0d266e0740..184dcffc47d 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -4,9 +4,9 @@ use lighthouse_network::PeerId; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; -use std::sync::Arc; +use strum::Display; use types::signed_block_and_blobs::BlockWrapper; -use types::{Epoch, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, Slot}; +use types::{Epoch, EthSpec, Slot}; /// The number of times to retry a batch before it is considered failed. const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5; @@ -15,35 +15,12 @@ const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5; /// after `MAX_BATCH_PROCESSING_ATTEMPTS` times, it is considered faulty. const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 3; -pub enum BatchTy { - Blocks(Vec>>), - BlocksAndBlobs(Vec>), -} - -impl BatchTy { - pub fn into_wrapped_blocks(self) -> Vec> { - match self { - BatchTy::Blocks(blocks) => blocks - .into_iter() - .map(|block| BlockWrapper::Block { block }) - .collect(), - BatchTy::BlocksAndBlobs(block_sidecar_pair) => block_sidecar_pair - .into_iter() - .map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { block_sidecar_pair }) - .collect(), - } - } -} - -/// Error representing a batch with mixed block types. -#[derive(Debug)] -pub struct MixedBlockTyErr; - /// Type of expected batch. -#[derive(Debug, Clone)] -pub enum ExpectedBatchTy { - OnlyBlockBlobs, - OnlyBlock, +#[derive(Debug, Copy, Clone, Display)] +#[strum(serialize_all = "snake_case")] +pub enum ByRangeRequestType { + BlocksAndBlobs, + Blocks, } /// Allows customisation of the above constants used in other sync methods such as BackFillSync. @@ -129,7 +106,7 @@ pub struct BatchInfo { /// State of the batch. state: BatchState, /// Whether this batch contains all blocks or all blocks and blobs. - batch_type: ExpectedBatchTy, + batch_type: ByRangeRequestType, /// Pin the generic marker: std::marker::PhantomData, } @@ -178,7 +155,7 @@ impl BatchInfo { /// fork boundary will be of mixed type (all blocks and one last blockblob), and I don't want to /// deal with this for now. /// This means finalization might be slower in eip4844 - pub fn new(start_epoch: &Epoch, num_of_epochs: u64, batch_type: ExpectedBatchTy) -> Self { + pub fn new(start_epoch: &Epoch, num_of_epochs: u64, batch_type: ByRangeRequestType) -> Self { let start_slot = start_epoch.start_slot(T::slots_per_epoch()); let end_slot = start_slot + num_of_epochs * T::slots_per_epoch(); BatchInfo { @@ -241,13 +218,13 @@ impl BatchInfo { } /// Returns a BlocksByRange request associated with the batch. - pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ExpectedBatchTy) { + pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) { ( BlocksByRangeRequest { start_slot: self.start_slot.into(), count: self.end_slot.sub(self.start_slot).into(), }, - self.batch_type.clone(), + self.batch_type, ) } @@ -406,30 +383,11 @@ impl BatchInfo { } } - pub fn start_processing(&mut self) -> Result, WrongState> { + pub fn start_processing(&mut self) -> Result>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); - match self.batch_type { - ExpectedBatchTy::OnlyBlockBlobs => { - let blocks = blocks.into_iter().map(|block| { - let BlockWrapper::BlockAndBlob { block_sidecar_pair: block_and_blob } = block else { - panic!("Batches should never have a mixed type. This is a bug. Contact D") - }; - block_and_blob - }).collect(); - Ok(BatchTy::BlocksAndBlobs(blocks)) - } - ExpectedBatchTy::OnlyBlock => { - let blocks = blocks.into_iter().map(|block| { - let BlockWrapper::Block { block } = block else { - panic!("Batches should never have a mixed type. This is a bug. Contact D") - }; - block - }).collect(); - Ok(BatchTy::Blocks(blocks)) - } - } + Ok(blocks) } BatchState::Poisoned => unreachable!("Poisoned batch"), other => { @@ -557,6 +515,7 @@ impl slog::KV for BatchInfo { serializer.emit_usize("processed", self.failed_processing_attempts.len())?; serializer.emit_u8("processed_no_penalty", self.non_faulty_processing_attempts)?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; + serializer.emit_arguments("batch_ty", &format_args!("{}", self.batch_type))?; slog::Result::Ok(()) } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 199be788e70..d60de322467 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,5 +1,4 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; -use super::BatchTy; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::sync::{ manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, @@ -137,10 +136,16 @@ impl SyncingChain { let id = SyncingChain::::id(&target_head_root, &target_head_slot); + let target_slot = if is_finalized_segment { + target_head_slot + (2 * T::EthSpec::slots_per_epoch()) + 1 + } else { + target_head_slot + }; + SyncingChain { id, start_epoch, - target_head_slot, + target_head_slot: target_slot, target_head_root, batches: BTreeMap::new(), peers, @@ -327,7 +332,7 @@ impl SyncingChain { let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized); self.current_processing_batch = Some(batch_id); - let work_event = BeaconWorkEvent::chain_segment(process_id, blocks.into_wrapped_blocks()); + let work_event = BeaconWorkEvent::chain_segment(process_id, blocks); if let Err(e) = beacon_processor_send.try_send(work_event) { crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch", diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index 28426032191..d0f2f9217eb 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -9,8 +9,8 @@ mod range; mod sync_type; pub use batch::{ - BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, BatchTy, - ExpectedBatchTy, + BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, + ByRangeRequestType, }; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; pub use range::RangeSync; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 73e6f49eb0e..09d93b0e8f3 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -373,7 +373,7 @@ where #[cfg(test)] mod tests { use crate::service::RequestId; - use crate::sync::range_sync::ExpectedBatchTy; + use crate::sync::range_sync::ByRangeRequestType; use crate::NetworkMessage; use super::*; @@ -388,12 +388,11 @@ mod tests { use slog::{o, Drain}; use tokio::sync::mpsc; - use slot_clock::{SlotClock, SystemTimeSlotClock}; + use slot_clock::SystemTimeSlotClock; use std::collections::HashSet; use std::sync::Arc; - use std::time::Duration; use store::MemoryStore; - use types::{Hash256, MainnetEthSpec, MinimalEthSpec as E}; + use types::{Hash256, MinimalEthSpec as E}; #[derive(Debug)] struct FakeStorage { @@ -686,13 +685,10 @@ mod tests { // add some peers let (peer1, local_info, head_info) = rig.head_peer(); range.add_peer(&mut rig.cx, local_info, peer1, head_info); - let ((chain1, batch1, _), id1) = match rig.grab_request(&peer1).0 { - RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => ( - rig.cx - .range_sync_block_response(id, None, ExpectedBatchTy::OnlyBlock) - .unwrap(), - id, - ), + let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { + (rig.cx.range_sync_response(id, true).unwrap(), id) + } other => panic!("unexpected request {:?}", other), }; @@ -708,13 +704,10 @@ mod tests { // while the ee is offline, more peers might arrive. Add a new finalized peer. let (peer2, local_info, finalized_info) = rig.finalized_peer(); range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); - let ((chain2, batch2, _), id2) = match rig.grab_request(&peer2).0 { - RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => ( - rig.cx - .range_sync_block_response(id, None, ExpectedBatchTy::OnlyBlock) - .unwrap(), - id, - ), + let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { + (rig.cx.range_sync_response(id, true).unwrap(), id) + } other => panic!("unexpected request {:?}", other), }; diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index b3e8e1fc6b5..897f6b020c7 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -28,5 +28,4 @@ directory = { path = "../../common/directory" } strum = { version = "0.24.0", features = ["derive"] } [features] -withdrawals = ["state_processing/withdrawals", "types/withdrawals"] withdrawals-processing = ["state_processing/withdrawals-processing"] \ No newline at end of file diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 732795fce2d..00aa0b2af13 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -503,9 +503,9 @@ impl, Cold: ItemStore> HotColdDB } pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { - if let Some(blobs) = self.blob_cache.lock().get(block_root) { - Ok(Some(blobs.clone())) - } else if let Some(bytes) = self + // FIXME(sean) I was attempting to use a blob cache here but was getting deadlocks, + // may want to attempt to use one again + if let Some(bytes) = self .hot_db .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? { diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index d9041dd6361..e940c0f25ec 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -7,6 +7,7 @@ //! //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See //! tests for implementation examples. +#![allow(dead_code)] #[macro_use] extern crate lazy_static; diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 12c56284966..ca35bc0b222 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -105,10 +105,8 @@ where pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, // Withdrawals - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_index: u64, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_validator_index: u64, } @@ -199,7 +197,6 @@ impl PartialBeaconState { latest_execution_payload_header ] ), - #[cfg(feature = "withdrawals")] BeaconState::Capella(s) => impl_from_state_forgetful!( s, outer, @@ -216,22 +213,6 @@ impl PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - BeaconState::Capella(s) => impl_from_state_forgetful!( - s, - outer, - Capella, - PartialBeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), - #[cfg(feature = "withdrawals")] BeaconState::Eip4844(s) => impl_from_state_forgetful!( s, outer, @@ -248,21 +229,6 @@ impl PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - BeaconState::Eip4844(s) => impl_from_state_forgetful!( - s, - outer, - Eip4844, - PartialBeaconStateEip4844, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), } } @@ -450,7 +416,6 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header ] ), - #[cfg(feature = "withdrawals")] PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( inner, Capella, @@ -466,21 +431,6 @@ impl TryInto> for PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( - inner, - Capella, - BeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), - #[cfg(feature = "withdrawals")] PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( inner, Eip4844, @@ -496,20 +446,6 @@ impl TryInto> for PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( - inner, - Eip4844, - BeaconStateEip4844, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), }; Ok(state) } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index de2d60faec2..fc5eba98e29 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -35,5 +35,4 @@ procinfo = { version = "0.4.2", optional = true } [features] default = ["lighthouse"] lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"] -withdrawals = ["store/withdrawals", "types/withdrawals"] withdrawals-processing = ["store/withdrawals-processing"] \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml index 4d52cc59752..89979814756 100644 --- a/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml @@ -1,3 +1,4 @@ -- enr:-MK4QLij8YaVQ6fIi09rDuD9fufxBlCZRXwfM1q6SbNJfy5ZZdAvtlnsfqhIeI0IqeOZdaPExVCfZfR4JJTIuKXFR76GAYJGrqHnh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBCynldgwAP_QMAAAAAAAAAgmlkgnY0gmlwhCJ7uEyJc2VjcDI1NmsxoQJpeftU6RbmIhcFllICznlAMJXL3EwHEGhn73_Gk0wrCYhzeW5jbmV0cwCDdGNwgjLIg3VkcIIu4A -- enr:-JG4QK27MZvV3QbwdLt055Yhei27SjAsDXMFGCdl-Q7SDiCgR_qbiW3BmcOClehFVJgMa6IfjHeJBdbC0jvrr2NycOqGAYJLWb5kgmlkgnY0gmlwhCJE_eeJc2VjcDI1NmsxoQIecO7Y9C7J2Bs7RNxXaUkU6BfmPKIhEsDScKAoxENaRYN0Y3CCdl-DdWRwgnZf -- enr:-JG4QExcHW3vzBcE0f_r-93nSA4iBy4qNLthSyTw7p0tlPwjMl1JVTAgLSNHLLZJzOGtelJO4sw37LliuHyJ55zN5J6GAYJLWTvzgmlkgnY0gmlwhCKq1cmJc2VjcDI1NmsxoQJT2d4jtKQbHNw3tZPLhoMlR73o5LNdi-bk_bYq6siwuIN0Y3CCdl-DdWRwgnZf \ No newline at end of file +- enr:-MK4QFnkGjrt5qw5Ct5XXT9QYvfqmH8Cf6xqKuczTHWixDUZQmngyLrlftv-tMRbXDAxZZQDxNciNTjx7XpW0G4yQguGAYU2Pmnxh2F0dG5ldHOIAAAAAAAAAACEZXRoMpA3FbaXIAAAk___________gmlkgnY0gmlwhCJ5ITWJc2VjcDI1NmsxoQIlwaxycUgJ_Ht4lYdDlInbIuRxu0HcHcFbu0D7As2SLYhzeW5jbmV0cwCDdGNwgjLIg3VkcIIu4A +- enr:-MK4QBqN5McD5YYgfEnfQjWUvrSaCITjBvQevlP5q0nCVbKuZRoa2XvGWOBwTDQyLNYiqgeettVwXs0PrSc1rOY2rjKGAYU2M7A8h2F0dG5ldHOIAAAAAAAAAgCEZXRoMpA3FbaXIAAAk___________gmlkgnY0gmlwhCJ6vpeJc2VjcDI1NmsxoQNJzjxNKr7-a-iEDs0KvaL_vo1UH91kefEiWzgAdwSntYhzeW5jbmV0cw-DdGNwgjLIg3VkcIIu4A +- enr:-MK4QEzb6r0hpSyiko9u-mbEX1TzdTD9RcSiU4jhey5jJbTAHLdXNFR32CfjingVa6QMyCPtZRUfzSGbJ0ur3-Pdxe-GAYU2OwM5h2F0dG5ldHOIAAAAAAAAAACEZXRoMpA3FbaXIAAAk___________gmlkgnY0gmlwhCPi7faJc2VjcDI1NmsxoQIKBTGU_riCSYrscdRCLuocbNzhF9RTNItPfD4_PmYngIhzeW5jbmV0cwCDdGNwgjLIg3VkcIIu4A +- enr:-MK4QPnKUgY_13LlQBxPZsM49owul_gFUb0CxvGDZGkMnZixO1P_imfgFUCPJXOp-k4f3-t2reL8yr53h-ZwlCetaXKGAYU64CTsh2F0dG5ldHOIAAAAAAAAAACEZXRoMpA3FbaXIAAAk___________gmlkgnY0gmlwhCJ5ITWJc2VjcDI1NmsxoQIlwaxycUgJ_Ht4lYdDlInbIuRxu0HcHcFbu0D7As2SLYhzeW5jbmV0cwCDdGNwgjLIg3VkcIIu4A diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml b/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml index d6e6aef57a5..b2a5b98111d 100644 --- a/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml @@ -1,28 +1,21 @@ -# Prater config - -# Extends the mainnet preset -CONFIG_NAME: 'eip4844' -PRESET_BASE: 'mainnet' +CONFIG_NAME: testnet +PRESET_BASE: mainnet # Transition # --------------------------------------------------------------- -TERMINAL_TOTAL_DIFFICULTY: 40 +TERMINAL_TOTAL_DIFFICULTY: 2 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 # Genesis # --------------------------------------------------------------- -# `2**14` (= 16,384) +# The genesis fork version looks weird for legacy reasons MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 2 -# Mar-01-2021 08:53:32 AM +UTC -MIN_GENESIS_TIME: 1653318000 -# Prater area code (Vienna) +MIN_GENESIS_TIME: 1671528489 GENESIS_FORK_VERSION: 0x00000ffd -# Customized for Prater: 1919188 seconds (Mar-23-2021 02:00:00 PM +UTC) GENESIS_DELAY: 0 - # Forking # --------------------------------------------------------------- # Some forks are disabled for now: @@ -30,18 +23,21 @@ GENESIS_DELAY: 0 # - Temporarily set to max uint64 value: 2**64 - 1 # Altair -ALTAIR_FORK_VERSION: 0x01000ffd -ALTAIR_FORK_EPOCH: 1 +ALTAIR_FORK_EPOCH: 2 +ALTAIR_FORK_VERSION: 0x20000090 + # Merge -BELLATRIX_FORK_VERSION: 0x02000ffd -BELLATRIX_FORK_EPOCH: 2 -# Sharding -EIP4844_FORK_VERSION: 0x83000ffd -EIP4844_FORK_EPOCH: 3 +BELLATRIX_FORK_EPOCH: 3 +BELLATRIX_FORK_VERSION: 0x20000091 -# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY: 40 +# Capella +CAPELLA_FORK_EPOCH: 4 +CAPELLA_FORK_VERSION: 0x20000092 +MAX_WITHDRAWALS_PER_PAYLOAD: 4 +# EIP4844 +EIP4844_FORK_EPOCH: 5 +EIP4844_FORK_VERSION: 0x20000093 # Time parameters # --------------------------------------------------------------- @@ -53,9 +49,7 @@ SECONDS_PER_ETH1_BLOCK: 14 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**8 (= 256) epochs ~27 hours SHARD_COMMITTEE_PERIOD: 256 -# 2**11 (= 2,048) Eth1 blocks ~8 hours -ETH1_FOLLOW_DISTANCE: 15 - +ETH1_FOLLOW_DISTANCE: 1 # Validator cycle # --------------------------------------------------------------- @@ -70,7 +64,6 @@ MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 - # Fork choice # --------------------------------------------------------------- # 40% @@ -78,8 +71,7 @@ PROPOSER_SCORE_BOOST: 40 # Deposit contract # --------------------------------------------------------------- -# Ethereum Goerli testnet +DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 DEPOSIT_CHAIN_ID: 1331 -DEPOSIT_NETWORK_ID: 69 -# Prater test deposit contract on Goerli Testnet -DEPOSIT_CONTRACT_ADDRESS: 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 +DEPOSIT_NETWORK_ID: 1331 + diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip index 88b40507105..35e5f8b59cd 100644 Binary files a/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip and b/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip differ diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml index 4b232d8b324..130c1fa1c32 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml @@ -1,5 +1,5 @@ # Gnosis Chain Team -- enr:-IS4QGmLwm7gFd0L0CEisllrb1op3v-wAGSc7_pwSMGgN3bOS9Fz7m1dWbwuuPHKqeETz9MbhjVuoWk0ohkyRv98kVoBgmlkgnY0gmlwhGjtlgaJc2VjcDI1NmsxoQLMdh0It9fJbuiLydZ9fpF6MRzgNle0vODaDiMqhbC7WIN1ZHCCIyg -- enr:-IS4QFUVG3dvLPCUEI7ycRvFm0Ieg_ITa5tALmJ9LI7dJ6ieT3J4fF9xLRjOoB4ApV-Rjp7HeLKzyTWG1xRdbFBNZPQBgmlkgnY0gmlwhErP5weJc2VjcDI1NmsxoQOBbaJBvx0-w_pyZUhQl9A510Ho2T0grE0K8JevzES99IN1ZHCCIyg -- enr:-Ku4QOQk8V-Hu2gxFzRXmLYIO4AvWDZhoMFwTf3n3DYm_mbsWv0ZitoqiN6JZUUj6Li6e1Jk1w2zFSVHKPMUP1g5tsgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD5Jd3FAAAAZP__________gmlkgnY0gmlwhC1PTpmJc2VjcDI1NmsxoQL1Ynt5PoA0UOcHa1Rfn98rmnRlLzNuWTePPP4m4qHVroN1ZHCCKvg -- enr:-Ku4QFaTwgoms-EiiRIfHUH3FXprWUFgjHg4UuWvilqoUQtDbmTszVIxUEOwQUmA2qkiP-T9wXjc_rVUuh9cU7WgwbgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD5Jd3FAAAAZP__________gmlkgnY0gmlwhC0hBmCJc2VjcDI1NmsxoQOpsg1XCrXmCwZKcSTcycLwldoKUMHPUpMEVGeg_EEhuYN1ZHCCKvg +- enr:-Ly4QMU1y81COwm1VZgxGF4_eZ21ub9-GHF6dXZ29aEJ0oZpcV2Rysw-viaEKfpcpu9ZarILJLxFZjcKOjE0Sybs3MQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhANLnx-Jc2VjcDI1NmsxoQKoaYT8I-wf2I_f_ii6EgoSSXj5T3bhiDyW-7ZLsY3T64hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QBf76jLiCA_pDXoZjhyRbuwzFOscFY-MIKkPnmHPQbvaKhIDZutfe38G9ibzgQP0RKrTo3vcWOy4hf_8wOZ-U5MBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhBLGgjaJc2VjcDI1NmsxoQLGeo0Q4lDvxIjHjnkAqEuETTaFIjsNrEcSpdDhcHXWFYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QLjZUWdqUO_RwyDqCAccIK5-MbLRD6A2c7oBuVbBgBnWDkEf0UKJVAaJqi2pO101WVQQLYSnYgz1Q3pRhYdrlFoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhANA8sSJc2VjcDI1NmsxoQK4TC_EK1jSs0VVPUpOjIo1rhJmff2SLBPFOWSXMwdLVYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QKwX2rTFtKWKQHSGQFhquxsxL1jewO8JB1MG-jgHqAZVFWxnb3yMoQqnYSV1bk25-_jiLuhIulxar3RBWXEDm6EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhAN-qZeJc2VjcDI1NmsxoQI7EPGMpecl0QofLp4Wy_lYNCCChUFEH6kY7k-oBGkPFIhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 39a0be3d9fd..0b79539877a 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -43,5 +43,4 @@ arbitrary-fuzz = [ "eth2_ssz_types/arbitrary", "tree_hash/arbitrary", ] -withdrawals = ["types/withdrawals"] withdrawals-processing = [] diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index d9453d36446..23dd989f98b 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,13 +1,10 @@ use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use std::collections::{hash_map::Entry, HashMap}; -use std::marker::PhantomData; -use std::sync::Arc; use tree_hash::TreeHash; use types::{ AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList, - BlobsSidecar, ChainSpec, Epoch, EthSpec, ExecPayload, Hash256, IndexedAttestation, - SignedBeaconBlock, Slot, + ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -21,8 +18,6 @@ pub struct ConsensusContext { /// Cache of indexed attestations constructed during block processing. indexed_attestations: HashMap<(AttestationData, BitList), IndexedAttestation>, - /// Should only be populated if the sidecar has not been validated. - blobs_sidecar: Option>>, /// Whether `validate_blobs_sidecar` has successfully passed. blobs_sidecar_validated: bool, /// Whether `verify_kzg_commitments_against_transactions` has successfully passed. @@ -49,7 +44,6 @@ impl ConsensusContext { proposer_index: None, current_block_root: None, indexed_attestations: HashMap::new(), - blobs_sidecar: None, blobs_sidecar_validated: false, blobs_verified_vs_txs: false, } @@ -185,13 +179,4 @@ impl ConsensusContext { pub fn blobs_verified_vs_txs(&self) -> bool { self.blobs_verified_vs_txs } - - pub fn set_blobs_sidecar(mut self, blobs_sidecar: Option>>) -> Self { - self.blobs_sidecar = blobs_sidecar; - self - } - - pub fn blobs_sidecar(&self) -> Option>> { - self.blobs_sidecar.clone() - } } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 02843727a71..4b5e77e0a46 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -19,6 +19,7 @@ pub use process_operations::process_operations; pub use verify_attestation::{ verify_attestation_for_block_inclusion, verify_attestation_for_state, }; +#[cfg(feature = "withdrawals-processing")] pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, @@ -35,13 +36,12 @@ pub mod signature_sets; pub mod tests; mod verify_attestation; mod verify_attester_slashing; +#[cfg(feature = "withdrawals-processing")] mod verify_bls_to_execution_change; mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; -use crate::common::decrease_balance; - #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -162,7 +162,7 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] + #[cfg(feature = "withdrawals-processing")] process_withdrawals::(state, payload, spec)?; process_execution_payload::(state, payload, spec)?; } @@ -466,8 +466,9 @@ pub fn compute_timestamp_at_slot( .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } -/// FIXME: add link to this function once the spec is stable -#[cfg(feature = "withdrawals")] +/// Compute the next batch of withdrawals which should be included in a block. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-get_expected_withdrawals pub fn get_expected_withdrawals( state: &BeaconState, spec: &ChainSpec, @@ -481,7 +482,11 @@ pub fn get_expected_withdrawals( return Ok(withdrawals.into()); } - for _ in 0..state.validators().len() { + let bound = std::cmp::min( + state.validators().len() as u64, + spec.max_validators_per_withdrawals_sweep, + ); + for _ in 0..bound { let validator = state.get_validator(validator_index as usize)?; let balance = *state.balances().get(validator_index as usize).ok_or( BeaconStateError::BalancesOutOfBounds(validator_index as usize), @@ -518,8 +523,8 @@ pub fn get_expected_withdrawals( Ok(withdrawals.into()) } -/// FIXME: add link to this function once the spec is stable -#[cfg(feature = "withdrawals")] +/// Apply withdrawals to the state. +#[cfg(feature = "withdrawals-processing")] pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, payload: Payload::Ref<'payload>, @@ -547,11 +552,26 @@ pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload )?; } + // Update the next withdrawal index if this block contained withdrawals if let Some(latest_withdrawal) = expected_withdrawals.last() { *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; - let next_validator_index = latest_withdrawal - .validator_index - .safe_add(1)? + + // Update the next validator index to start the next withdrawal sweep + if expected_withdrawals.len() == T::max_withdrawals_per_payload() { + // Next sweep starts after the latest withdrawal's validator index + let next_validator_index = latest_withdrawal + .validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; + } + } + + // Advance sweep by the max length of the sweep if there was not a full set of withdrawals + if expected_withdrawals.len() != T::max_withdrawals_per_payload() { + let next_validator_index = state + .next_withdrawal_validator_index()? + .safe_add(spec.max_validators_per_withdrawals_sweep)? .safe_rem(state.validators().len() as u64)?; *state.next_withdrawal_validator_index_mut()? = next_validator_index; } diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 50bfbfdc454..bbf2c1caa51 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -170,7 +170,6 @@ where // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; self.include_sync_aggregate(block)?; - #[cfg(feature = "withdrawals")] self.include_bls_to_execution_changes(block)?; Ok(()) @@ -345,7 +344,6 @@ where } /// Include the signature of the block's BLS to execution changes for verification. - #[cfg(feature = "withdrawals")] pub fn include_bls_to_execution_changes>( &mut self, block: &'a SignedBeaconBlock, diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index cbfc2bd465e..f27fd48b4f5 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -34,7 +34,7 @@ pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload>( process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] + #[cfg(feature = "withdrawals-processing")] if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() { process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; } @@ -295,6 +295,7 @@ pub fn process_exits( /// /// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns /// an `Err` describing the invalid object or cause of failure. +#[cfg(feature = "withdrawals-processing")] pub fn process_bls_to_execution_changes( state: &mut BeaconState, bls_to_execution_changes: &[SignedBlsToExecutionChange], diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index 9a883698830..dc759b384d8 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -56,9 +56,7 @@ pub fn upgrade_to_capella( // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), // Withdrawals - #[cfg(feature = "withdrawals")] next_withdrawal_index: 0, - #[cfg(feature = "withdrawals")] next_withdrawal_validator_index: 0, // Caches total_active_balance: pre.total_active_balance, diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 6d66fd8412c..e829c01e7e2 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -9,13 +9,7 @@ pub fn upgrade_to_eip4844( let epoch = pre_state.current_epoch(); let pre = pre_state.as_capella_mut()?; - // FIXME(sean) This is a hack to let us participate in testnets where capella doesn't exist. - // if we are disabling withdrawals, assume we should fork off of bellatrix. - let previous_fork_version = if cfg!(feature = "withdrawals") { - pre.fork.current_version - } else { - spec.bellatrix_fork_version - }; + let previous_fork_version = pre.fork.current_version; // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. @@ -64,9 +58,7 @@ pub fn upgrade_to_eip4844( // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(), // Withdrawals - #[cfg(feature = "withdrawals")] next_withdrawal_index: pre.next_withdrawal_index, - #[cfg(feature = "withdrawals")] next_withdrawal_validator_index: pre.next_withdrawal_validator_index, // Caches total_active_balance: pre.total_active_balance, diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index be1c3191f09..44bf888a71f 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -74,4 +74,3 @@ arbitrary-fuzz = [ "swap_or_not_shuffle/arbitrary", "tree_hash/arbitrary", ] -withdrawals = [] diff --git a/consensus/types/presets/gnosis/capella.yaml b/consensus/types/presets/gnosis/capella.yaml new file mode 100644 index 00000000000..913c2956ba7 --- /dev/null +++ b/consensus/types/presets/gnosis/capella.yaml @@ -0,0 +1,17 @@ +# Mainnet preset - Capella + +# Misc +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + +# Execution +# --------------------------------------------------------------- +# 2**4 (= 16) withdrawals +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16384) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/presets/mainnet/capella.yaml b/consensus/types/presets/mainnet/capella.yaml index 0c087255bfb..913c2956ba7 100644 --- a/consensus/types/presets/mainnet/capella.yaml +++ b/consensus/types/presets/mainnet/capella.yaml @@ -9,4 +9,9 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16 # Execution # --------------------------------------------------------------- # 2**4 (= 16) withdrawals -MAX_WITHDRAWALS_PER_PAYLOAD: 16 \ No newline at end of file +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16384) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/presets/minimal/capella.yaml b/consensus/types/presets/minimal/capella.yaml index eacd6c7cbca..d27253de871 100644 --- a/consensus/types/presets/minimal/capella.yaml +++ b/consensus/types/presets/minimal/capella.yaml @@ -9,4 +9,9 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16 # Execution # --------------------------------------------------------------- # [customized] 2**2 (= 4) -MAX_WITHDRAWALS_PER_PAYLOAD: 4 \ No newline at end of file +MAX_WITHDRAWALS_PER_PAYLOAD: 4 + +# Withdrawals processing +# --------------------------------------------------------------- +# [customized] 2**4 (= 16) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16 diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 124cb08bcc0..fd38e9faf26 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -502,7 +502,6 @@ impl> EmptyBlock for BeaconBlockCape voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), execution_payload: Payload::Capella::default(), - #[cfg(feature = "withdrawals")] bls_to_execution_changes: VariableList::empty(), }, } @@ -532,7 +531,6 @@ impl> EmptyBlock for BeaconBlockEip4 voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), execution_payload: Payload::Eip4844::default(), - #[cfg(feature = "withdrawals")] bls_to_execution_changes: VariableList::empty(), blob_kzg_commitments: VariableList::empty(), }, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 718950c230f..8410e4eece4 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -61,7 +61,6 @@ pub struct BeaconBlockBody = FullPay #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] #[serde(flatten)] pub execution_payload: Payload::Eip4844, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub bls_to_execution_changes: VariableList, @@ -300,7 +299,6 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, } = body; @@ -318,7 +316,6 @@ impl From>> execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }, Some(execution_payload), @@ -344,7 +341,6 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, } = body; @@ -363,7 +359,6 @@ impl From>> execution_payload: BlindedPayloadEip4844 { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, }, @@ -432,7 +427,6 @@ impl BeaconBlockBodyCapella> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, } = self; @@ -449,7 +443,6 @@ impl BeaconBlockBodyCapella> { execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.clone(), } } @@ -468,7 +461,6 @@ impl BeaconBlockBodyEip4844> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, } = self; @@ -486,7 +478,6 @@ impl BeaconBlockBodyEip4844> { execution_payload: BlindedPayloadEip4844 { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 48a83f94f45..b3eff7374b2 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -297,10 +297,8 @@ where pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, // Withdrawals - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_index: u64, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_validator_index: u64, diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 30dd9f8d6bc..4cfc684f4d3 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -336,11 +336,9 @@ impl BeaconTreeHashCacheInner { } // Withdrawal indices (Capella and later). - #[cfg(feature = "withdrawals")] if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { hasher.write(next_withdrawal_index.tree_hash_root().as_bytes())?; } - #[cfg(feature = "withdrawals")] if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { hasher.write(next_withdrawal_validator_index.tree_hash_root().as_bytes())?; } diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index 430936cc275..f43a2e65085 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -1,14 +1,20 @@ +use crate::test_utils::TestRandom; use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot}; +use derivative::Derivative; use kzg::KzgProof; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; +use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Default)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, Default, TestRandom, Derivative, +)] #[serde(bound = "T: EthSpec")] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] pub struct BlobsSidecar { pub beacon_block_root: Hash256, pub beacon_block_slot: Slot, @@ -23,6 +29,16 @@ impl BlobsSidecar { pub fn empty() -> Self { Self::default() } + + pub fn empty_from_parts(beacon_block_root: Hash256, beacon_block_slot: Slot) -> Self { + Self { + beacon_block_root, + beacon_block_slot, + blobs: VariableList::empty(), + kzg_aggregated_proof: KzgProof::empty(), + } + } + #[allow(clippy::integer_arithmetic)] pub fn max_size() -> usize { // Fixed part diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index d16c9b8091c..bf9a7ed34db 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -158,8 +158,9 @@ pub struct ChainSpec { * Capella hard fork params */ pub capella_fork_version: [u8; 4], - /// The Capella fork epoch is optional, with `None` representing "Merge never happens". + /// The Capella fork epoch is optional, with `None` representing "Capella never happens". pub capella_fork_epoch: Option, + pub max_validators_per_withdrawals_sweep: u64, /* * Eip4844 hard fork params @@ -634,6 +635,7 @@ impl ChainSpec { */ capella_fork_version: [0x03, 00, 00, 00], capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16384, /* * Eip4844 hard fork params @@ -707,6 +709,7 @@ impl ChainSpec { // Capella capella_fork_version: [0x03, 0x00, 0x00, 0x01], capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16, // Eip4844 eip4844_fork_version: [0x04, 0x00, 0x00, 0x01], eip4844_fork_epoch: None, @@ -869,6 +872,7 @@ impl ChainSpec { */ capella_fork_version: [0x03, 0x00, 0x00, 0x64], capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16384, /* * Eip4844 hard fork params diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index f72b1710de3..ac93818b9c3 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,5 +1,6 @@ use crate::{ - consts::altair, AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec, ForkName, + consts::altair, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, + EthSpec, ForkName, }; use maplit::hashmap; use serde_derive::{Deserialize, Serialize}; @@ -11,7 +12,7 @@ use superstruct::superstruct; /// /// Mostly useful for the API. #[superstruct( - variants(Altair, Bellatrix), + variants(Bellatrix, Capella), variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) )] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] @@ -24,9 +25,11 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, - #[superstruct(only(Bellatrix))] #[serde(flatten)] pub bellatrix_preset: BellatrixPreset, + #[superstruct(only(Capella))] + #[serde(flatten)] + pub capella_preset: CapellaPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap, @@ -37,26 +40,29 @@ impl ConfigAndPreset { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); let altair_preset = AltairPreset::from_chain_spec::(spec); + let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let extra_fields = get_extra_fields(spec); - if spec.bellatrix_fork_epoch.is_some() + if spec.capella_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Merge) + || fork_name == Some(ForkName::Capella) { - let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); + let capella_preset = CapellaPreset::from_chain_spec::(spec); - ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { + ConfigAndPreset::Capella(ConfigAndPresetCapella { config, base_preset, altair_preset, bellatrix_preset, + capella_preset, extra_fields, }) } else { - ConfigAndPreset::Altair(ConfigAndPresetAltair { + ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { config, base_preset, altair_preset, + bellatrix_preset, extra_fields, }) } @@ -131,8 +137,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPresetBellatrix = + let from: ConfigAndPresetCapella = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(ConfigAndPreset::Bellatrix(from), yamlconfig); + assert_eq!(ConfigAndPreset::Capella(from), yamlconfig); } } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 18005094e4b..45f52fb65a7 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -80,7 +80,6 @@ pub struct ExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub withdrawals: Withdrawals, } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index a98a68e3e55..e2c23389a1f 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -75,7 +75,6 @@ pub struct ExecutionPayloadHeader { pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] pub transactions_root: Hash256, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] #[superstruct(getter(copy))] pub withdrawals_root: Hash256, @@ -128,7 +127,6 @@ impl ExecutionPayloadHeaderMerge { base_fee_per_gas: self.base_fee_per_gas, block_hash: self.block_hash, transactions_root: self.transactions_root, - #[cfg(feature = "withdrawals")] withdrawals_root: Hash256::zero(), } } @@ -153,7 +151,6 @@ impl ExecutionPayloadHeaderCapella { excess_data_gas: Uint256::zero(), block_hash: self.block_hash, transactions_root: self.transactions_root, - #[cfg(feature = "withdrawals")] withdrawals_root: self.withdrawals_root, } } @@ -196,7 +193,6 @@ impl From> for ExecutionPayloadHeaderCape base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), - #[cfg(feature = "withdrawals")] withdrawals_root: payload.withdrawals.tree_hash_root(), } } @@ -219,7 +215,6 @@ impl From> for ExecutionPayloadHeaderEip4 excess_data_gas: payload.excess_data_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), - #[cfg(feature = "withdrawals")] withdrawals_root: payload.withdrawals.tree_hash_root(), } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 787e1af6a87..44193b3549a 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -123,7 +123,7 @@ pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetAltair, ConfigAndPresetBellatrix, + ConfigAndPreset, ConfigAndPresetBellatrix, ConfigAndPresetCapella, }; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; @@ -160,7 +160,7 @@ pub use crate::payload::{ FullPayloadCapella, FullPayloadEip4844, FullPayloadMerge, FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; -pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; +pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset}; pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 2d9e37b81ab..8bba00b46df 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -37,7 +37,6 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn gas_limit(&self) -> u64; fn transactions(&self) -> Option<&Transactions>; /// fork-specific fields - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result; /// Is this a default payload with 0x0 roots for transactions and withdrawals? @@ -241,7 +240,6 @@ impl ExecPayload for FullPayload { }) } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { FullPayload::Merge(_) => Err(Error::IncorrectStateVariant), @@ -343,7 +341,6 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { }) } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { FullPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), @@ -523,7 +520,6 @@ impl ExecPayload for BlindedPayload { None } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { BlindedPayload::Merge(_) => Err(Error::IncorrectStateVariant), @@ -614,7 +610,6 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { None } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { BlindedPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), @@ -712,7 +707,6 @@ macro_rules! impl_exec_payload_common { f(self) } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { let g = $g; g(self) diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 8ee38e46a6d..7d7db228cef 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -184,6 +184,27 @@ impl BellatrixPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct CapellaPreset { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_bls_to_execution_changes: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_withdrawals_per_payload: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_validators_per_withdrawals_sweep: u64, +} + +impl CapellaPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + max_bls_to_execution_changes: T::max_bls_to_execution_changes() as u64, + max_withdrawals_per_payload: T::max_withdrawals_per_payload() as u64, + max_validators_per_withdrawals_sweep: spec.max_validators_per_withdrawals_sweep, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -219,6 +240,9 @@ mod test { let bellatrix: BellatrixPreset = preset_from_file(&preset_name, "bellatrix.yaml"); assert_eq!(bellatrix, BellatrixPreset::from_chain_spec::(&spec)); + + let capella: CapellaPreset = preset_from_file(&preset_name, "capella.yaml"); + assert_eq!(capella, CapellaPreset::from_chain_spec::(&spec)); } #[test] diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 2a8398f83f3..89ccb95a164 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -36,6 +36,10 @@ impl From for Hash256 { } } +pub enum BlobReconstructionError { + BlobsMissing, +} + /// A `BeaconBlock` and a signature from its proposer. #[superstruct( variants(Base, Altair, Merge, Capella, Eip4844), @@ -235,6 +239,29 @@ impl> SignedBeaconBlock pub fn canonical_root(&self) -> Hash256 { self.message().tree_hash_root() } + + /// Reconstructs an empty `BlobsSidecar`, using the given block root if provided, else calculates it. + /// If this block has kzg commitments, an error will be returned. If this block is from prior to the + /// Eip4844 fork, `None` will be returned. + pub fn reconstruct_empty_blobs( + &self, + block_root_opt: Option, + ) -> Result>, BlobReconstructionError> { + self.message() + .body() + .blob_kzg_commitments() + .map(|kzg_commitments| { + if kzg_commitments.len() > 0 { + Err(BlobReconstructionError::BlobsMissing) + } else { + Ok(Some(BlobsSidecar::empty_from_parts( + block_root_opt.unwrap_or(self.canonical_root()), + self.slot(), + ))) + } + }) + .unwrap_or(Ok(None)) + } } // We can convert pre-Bellatrix blocks without payloads into blocks with payloads. @@ -341,7 +368,6 @@ impl SignedBeaconBlockCapella> { voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadCapella { .. }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }, }, @@ -364,7 +390,6 @@ impl SignedBeaconBlockCapella> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }, }, @@ -397,7 +422,6 @@ impl SignedBeaconBlockEip4844> { voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadEip4844 { .. }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, }, @@ -421,7 +445,6 @@ impl SignedBeaconBlockEip4844> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, }, diff --git a/consensus/types/src/signed_block_and_blobs.rs b/consensus/types/src/signed_block_and_blobs.rs index 9b4517eb47a..c589fbcfeb9 100644 --- a/consensus/types/src/signed_block_and_blobs.rs +++ b/consensus/types/src/signed_block_and_blobs.rs @@ -1,4 +1,6 @@ +use crate::signed_beacon_block::BlobReconstructionError; use crate::{BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockEip4844, Slot}; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; @@ -12,8 +14,8 @@ pub struct SignedBeaconBlockAndBlobsSidecarDecode { pub blobs_sidecar: BlobsSidecar, } -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, PartialEq)] -#[serde(bound = "T: EthSpec")] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] pub struct SignedBeaconBlockAndBlobsSidecar { pub beacon_block: Arc>, pub blobs_sidecar: Arc>, @@ -32,62 +34,79 @@ impl SignedBeaconBlockAndBlobsSidecar { } } +/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. This newtype +/// wraps the `BlockWrapperInner` to ensure blobs cannot be accessed via an enum match. This would +/// circumvent empty blob reconstruction when accessing blobs. +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +pub struct BlockWrapper(BlockWrapperInner); + /// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. -#[derive(Clone, Debug)] -pub enum BlockWrapper { - Block { - block: Arc>, - }, - BlockAndBlob { - block_sidecar_pair: SignedBeaconBlockAndBlobsSidecar, - }, +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +pub enum BlockWrapperInner { + Block(Arc>), + BlockAndBlob(SignedBeaconBlockAndBlobsSidecar), } impl BlockWrapper { + pub fn new(block: Arc>) -> Self { + Self(BlockWrapperInner::Block(block)) + } + + pub fn new_with_blobs( + beacon_block: Arc>, + blobs_sidecar: Arc>, + ) -> Self { + Self(BlockWrapperInner::BlockAndBlob( + SignedBeaconBlockAndBlobsSidecar { + beacon_block, + blobs_sidecar, + }, + )) + } + pub fn slot(&self) -> Slot { - match self { - BlockWrapper::Block { block } => block.slot(), - BlockWrapper::BlockAndBlob { block_sidecar_pair } => { + match &self.0 { + BlockWrapperInner::Block(block) => block.slot(), + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { block_sidecar_pair.beacon_block.slot() } } } pub fn block(&self) -> &SignedBeaconBlock { - match self { - BlockWrapper::Block { block } => &block, - BlockWrapper::BlockAndBlob { block_sidecar_pair } => &block_sidecar_pair.beacon_block, + match &self.0 { + BlockWrapperInner::Block(block) => &block, + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => &block_sidecar_pair.beacon_block, } } pub fn block_cloned(&self) -> Arc> { - match self { - BlockWrapper::Block { block } => block.clone(), - BlockWrapper::BlockAndBlob { block_sidecar_pair } => { + match &self.0 { + BlockWrapperInner::Block(block) => block.clone(), + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { block_sidecar_pair.beacon_block.clone() } } } - pub fn blobs_sidecar(&self) -> Option>> { - match self { - BlockWrapper::Block { block: _ } => None, - BlockWrapper::BlockAndBlob { block_sidecar_pair } => { - Some(block_sidecar_pair.blobs_sidecar.clone()) - } - } - } - pub fn blobs(&self) -> Option<&BlobsSidecar> { - match self { - BlockWrapper::Block { .. } => None, - BlockWrapper::BlockAndBlob { block_sidecar_pair } => { - Some(&block_sidecar_pair.blobs_sidecar) + pub fn blobs( + &self, + block_root: Option, + ) -> Result>>, BlobReconstructionError> { + match &self.0 { + BlockWrapperInner::Block(block) => block + .reconstruct_empty_blobs(block_root) + .map(|blob_opt| blob_opt.map(Arc::new)), + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { + Ok(Some(block_sidecar_pair.blobs_sidecar.clone())) } } } pub fn message(&self) -> crate::BeaconBlockRef { - match self { - BlockWrapper::Block { block } => block.message(), - BlockWrapper::BlockAndBlob { block_sidecar_pair } => { + match &self.0 { + BlockWrapperInner::Block(block) => block.message(), + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { block_sidecar_pair.beacon_block.message() } } @@ -97,43 +116,45 @@ impl BlockWrapper { self.block().parent_root() } - pub fn deconstruct(self) -> (Arc>, Option>>) { - match self { - BlockWrapper::Block { block } => (block, None), - BlockWrapper::BlockAndBlob { block_sidecar_pair } => { + pub fn deconstruct( + self, + block_root: Option, + ) -> ( + Arc>, + Result>>, BlobReconstructionError>, + ) { + match self.0 { + BlockWrapperInner::Block(block) => { + let blobs = block + .reconstruct_empty_blobs(block_root) + .map(|blob_opt| blob_opt.map(Arc::new)); + (block, blobs) + } + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { let SignedBeaconBlockAndBlobsSidecar { beacon_block, blobs_sidecar, } = block_sidecar_pair; - (beacon_block, Some(blobs_sidecar)) + (beacon_block, Ok(Some(blobs_sidecar))) } } } } -// TODO: probably needes to be changed. This is needed because SignedBeaconBlockAndBlobsSidecar -// does not implement Hash -impl std::hash::Hash for BlockWrapper { - fn hash(&self, state: &mut H) { - match self { - BlockWrapper::Block { block } => block.hash(state), - BlockWrapper::BlockAndBlob { - block_sidecar_pair: block_and_blob, - } => block_and_blob.beacon_block.hash(state), - } - } -} - impl From> for BlockWrapper { fn from(block: SignedBeaconBlock) -> Self { - BlockWrapper::Block { - block: Arc::new(block), - } + BlockWrapper(BlockWrapperInner::Block(Arc::new(block))) } } impl From>> for BlockWrapper { fn from(block: Arc>) -> Self { - BlockWrapper::Block { block } + BlockWrapper(BlockWrapperInner::Block(block)) + } +} + +impl From> for BlockWrapper { + fn from(block: SignedBeaconBlockAndBlobsSidecar) -> Self { + BlockWrapper(BlockWrapperInner::BlockAndBlob(block)) } } diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index aff195310d4..69b91571db5 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -18,7 +18,7 @@ eth2_serde_utils = "0.1.1" hex = "0.4.2" eth2_hashing = "0.3.0" ethereum-types = "0.12.1" -c-kzg = {git = "https://github.com/pawanjay176/c-kzg-4844", rev = "48c048b12a7d29ae3e7bf09000e07870da4cb701" } +c-kzg = {git = "https://github.com/pawanjay176/c-kzg-4844", rev = "69bde8f4e0bbf0da30d92601b7db138bdd7e6a04" } [features] default = ["mainnet-spec"] diff --git a/crypto/kzg/src/kzg_proof.rs b/crypto/kzg/src/kzg_proof.rs index cb6e14df4ad..be85088f760 100644 --- a/crypto/kzg/src/kzg_proof.rs +++ b/crypto/kzg/src/kzg_proof.rs @@ -12,6 +12,14 @@ const KZG_PROOF_BYTES_LEN: usize = 48; #[ssz(struct_behaviour = "transparent")] pub struct KzgProof(pub [u8; KZG_PROOF_BYTES_LEN]); +impl KzgProof { + pub fn empty() -> Self { + let mut bytes = [0; KZG_PROOF_BYTES_LEN]; + bytes[0] = 192; + Self(bytes) + } +} + impl fmt::Display for KzgProof { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 4130c0f6afc..0da6b6f0902 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -8,7 +8,6 @@ edition = "2021" [features] portable = ["bls/supranational-portable"] fake_crypto = ['bls/fake_crypto'] -withdrawals = ["types/withdrawals", "beacon_chain/withdrawals", "store/withdrawals", "state_processing/withdrawals"] withdrawals-processing = ["beacon_chain/withdrawals-processing", "store/withdrawals-processing", "state_processing/withdrawals-processing"] [dependencies] diff --git a/lcli/Dockerfile b/lcli/Dockerfile index feda81d0302..5a83a9dc85a 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -2,7 +2,7 @@ # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` FROM rust:1.65.0-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE ENV PORTABLE $PORTABLE diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index d6e093c1733..d8973980feb 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -2,24 +2,21 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; use eth2_hashing::hash; use eth2_network_config::Eth2NetworkConfig; -use genesis::interop_genesis_state; use ssz::Decode; use ssz::Encode; use state_processing::process_activations; -use state_processing::upgrade::{ - upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844, -}; +use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use std::fs::File; use std::io::Read; use std::path::PathBuf; use std::str::FromStr; use std::time::{SystemTime, UNIX_EPOCH}; +use types::ExecutionBlockHash; use types::{ test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge, Hash256, Keypair, PublicKey, Validator, }; -use types::{BeaconStateMerge, ExecutionBlockHash}; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 8af376d55e1..c1d2f72d38c 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -24,8 +24,6 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] -# Support for inclusion of withdrawals fields in all capella consensus types in all APIs. -withdrawals = ["types/withdrawals", "beacon_node/withdrawals"] # Support for withdrawals consensus processing logic. withdrawals-processing = ["beacon_node/withdrawals-processing"] diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile index 5472b980bad..cee4b74d9df 100644 --- a/scripts/cross/Dockerfile +++ b/scripts/cross/Dockerfile @@ -9,6 +9,6 @@ RUN apt-get install -y unzip && \ unzip protoc.zip -d /usr && \ chmod +x /usr/bin/protoc -RUN apt-get install -y cmake clang-3.9 +RUN apt-get install -y cmake clang-3.9 && ln -s ../lib/llvm-3.9/bin/clang /usr/bin/clang ENV PROTOC=/usr/bin/protoc diff --git a/scripts/local_testnet/el_bootnode.sh b/scripts/local_testnet/el_bootnode.sh index 1f96bce2c2b..1b8834b8904 100755 --- a/scripts/local_testnet/el_bootnode.sh +++ b/scripts/local_testnet/el_bootnode.sh @@ -1,4 +1,5 @@ priv_key="02fd74636e96a8ffac8e7b01b0de8dea94d6bcf4989513b38cf59eb32163ff91" +source ./vars.env -/home/sean/CLionProjects/eip4844-interop/geth/go-ethereum/build/bin/bootnode --nodekeyhex $priv_key \ No newline at end of file +$BOOTNODE_BINARY --nodekeyhex $priv_key \ No newline at end of file diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index dbfc41e91f5..9a7c22ea586 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -1,4 +1,5 @@ GETH_BINARY=geth +BOOTNODE_BINARY=bootnode # Base directories for the validator keys and secrets DATADIR=~/.lighthouse/local-testnet diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index 32e2d5648df..a92bf6dbd6b 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,5 +1,5 @@ FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev COPY . lighthouse # Build lighthouse directly with a cargo build command, bypassing the Makefile. diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 1b42b42ff10..23bb29364d8 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -9,7 +9,6 @@ edition = "2021" ef_tests = [] milagro = ["bls/milagro"] fake_crypto = ["bls/fake_crypto"] -withdrawals = ["state_processing/withdrawals", "store/withdrawals", "beacon_chain/withdrawals", "types/withdrawals", "execution_layer/withdrawals"] withdrawals-processing = ["state_processing/withdrawals-processing", "store/withdrawals-processing", "beacon_chain/withdrawals-processing", "execution_layer/withdrawals-processing"] [dependencies] diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 10230ccf9da..d52f546dc26 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-alpha.1-hotfix +TESTS_TAG := v1.3.0-alpha.2 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index bb5959ebebe..a2356519ad5 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -4,8 +4,9 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; use serde_derive::Deserialize; +#[cfg(feature = "withdrawals-processing")] use state_processing::per_block_processing::process_operations::process_bls_to_execution_changes; -#[cfg(feature = "withdrawals")] +#[cfg(feature = "withdrawals-processing")] use state_processing::per_block_processing::process_withdrawals; use state_processing::{ per_block_processing::{ @@ -21,7 +22,7 @@ use state_processing::{ }; use std::fmt::Debug; use std::path::Path; -#[cfg(feature = "withdrawals")] +#[cfg(feature = "withdrawals-processing")] use types::SignedBlsToExecutionChange; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, @@ -41,7 +42,7 @@ struct ExecutionMetadata { } /// Newtype for testing withdrawals. -#[cfg(feature = "withdrawals")] +#[cfg(feature = "withdrawals-processing")] #[derive(Debug, Clone, Deserialize)] pub struct WithdrawalsPayload { payload: FullPayload, @@ -340,6 +341,7 @@ impl Operation for BlindedPayload { } } +#[cfg(feature = "withdrawals-processing")] impl Operation for WithdrawalsPayload { fn handler_name() -> String { "withdrawals".into() @@ -354,10 +356,6 @@ impl Operation for WithdrawalsPayload { return false; } - if !cfg!(feature = "withdrawals") { - return false; - } - fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge } @@ -370,7 +368,7 @@ impl Operation for WithdrawalsPayload { }) } - #[cfg(feature = "withdrawals")] + #[cfg(feature = "withdrawals-processing")] fn apply_to( &self, state: &mut BeaconState, @@ -384,19 +382,9 @@ impl Operation for WithdrawalsPayload { process_withdrawals::<_, FullPayload<_>>(state, self.payload.to_ref(), spec) } } - - #[cfg(not(feature = "withdrawals"))] - fn apply_to( - &self, - state: &mut BeaconState, - spec: &ChainSpec, - _: &Operations, - ) -> Result<(), BlockProcessingError> { - Ok(()) - } } -#[cfg(feature = "withdrawals")] +#[cfg(feature = "withdrawals-processing")] impl Operation for SignedBlsToExecutionChange { fn handler_name() -> String { "bls_to_execution_change".into() diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index fd3bf2bd1b5..a4d4f2d52d4 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,5 +1,5 @@ pub use case_result::CaseResult; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] pub use cases::WithdrawalsPayload; pub use cases::{ Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 396a12af5bc..86208a391bb 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,14 +82,14 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] #[test] fn operations_withdrawals() { OperationsHandler::>::default().run(); OperationsHandler::>::default().run(); } -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] #[test] fn operations_bls_to_execution_change() { OperationsHandler::::default().run(); diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index b5923aafe5d..e058d58afb6 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -23,5 +23,4 @@ hex = "0.4.2" fork_choice = { path = "../../consensus/fork_choice" } [features] -default = [] -withdrawals = [] \ No newline at end of file +default = [] \ No newline at end of file diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 5aa24a2b022..d453d7038ad 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -212,9 +212,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::() + .get_lighthouse_spec::() .await - .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .map(|res| ConfigAndPreset::Capella(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None);