From 0a2c6f76c790e2117ed4a225ef312795eb26f240 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 18 Oct 2024 14:27:48 +1100 Subject: [PATCH] Publish block without waiting for blob and column proof computation. --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 +- .../beacon_chain/src/blob_verification.rs | 6 - .../src/data_column_verification.rs | 2 - .../beacon_chain/tests/block_verification.rs | 4 +- beacon_node/beacon_chain/tests/events.rs | 2 +- beacon_node/http_api/src/publish_blocks.rs | 485 ++++++++++-------- .../tests/broadcast_validation_tests.rs | 8 +- .../gossip_methods.rs | 5 +- testing/ef_tests/src/cases/fork_choice.rs | 4 +- 9 files changed, 275 insertions(+), 249 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index dd70ba8c9a1..0d1609bd6f4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2977,7 +2977,6 @@ impl BeaconChain { pub async fn process_gossip_blob( self: &Arc, blob: GossipVerifiedBlob, - publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { let block_root = blob.block_root(); @@ -2998,9 +2997,7 @@ impl BeaconChain { self.emit_sse_blob_sidecar_events(&block_root, std::iter::once(blob.as_blob())); - let r = self - .check_gossip_blob_availability_and_import(blob, publish_fn) - .await; + let r = self.check_gossip_blob_availability_and_import(blob).await; self.remove_notified(&block_root, r) } @@ -3488,7 +3485,6 @@ impl BeaconChain { async fn check_gossip_blob_availability_and_import( self: &Arc, blob: GossipVerifiedBlob, - publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { let slot = blob.slot(); if let Some(slasher) = self.slasher.as_ref() { @@ -3496,7 +3492,7 @@ impl BeaconChain { } let availability = self.data_availability_checker.put_gossip_blob(blob)?; - self.process_availability(slot, availability, None, publish_fn) + self.process_availability(slot, availability, None, || Ok(())) .await } diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index c9c4f6dceb2..f7de26a6ca7 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -12,7 +12,6 @@ use crate::{metrics, BeaconChainError}; use kzg::{Error as KzgError, Kzg, KzgCommitment}; use slog::debug; use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; use std::time::Duration; use tree_hash::TreeHash; use types::blob_sidecar::BlobIdentifier; @@ -156,11 +155,6 @@ impl From for GossipBlobError { } } -pub type GossipVerifiedBlobList = VariableList< - GossipVerifiedBlob, - <::EthSpec as EthSpec>::MaxBlobsPerBlock, ->; - /// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on /// the p2p network. #[derive(Debug)] diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index a4e83b27514..62536e67c68 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -160,8 +160,6 @@ impl From for GossipDataColumnError { } } -pub type GossipVerifiedDataColumnList = RuntimeVariableList>; - /// A wrapper around a `DataColumnSidecar` that indicates it has been approved for re-gossiping on /// the p2p network. #[derive(Debug)] diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index ceb1b51fe3c..f094a173eec 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -976,7 +976,7 @@ async fn block_gossip_verification() { harness .chain - .process_gossip_blob(gossip_verified, || Ok(())) + .process_gossip_blob(gossip_verified) .await .expect("should import valid gossip verified blob"); } @@ -1247,7 +1247,7 @@ async fn verify_block_for_gossip_slashing_detection() { .unwrap(); harness .chain - .process_gossip_blob(verified_blob, || Ok(())) + .process_gossip_blob(verified_blob) .await .unwrap(); } diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 31e69f0524e..ab784d3be45 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -35,7 +35,7 @@ async fn blob_sidecar_event_on_process_gossip_blob() { let _ = harness .chain - .process_gossip_blob(gossip_verified_blob, || Ok(())) + .process_gossip_blob(gossip_verified_blob) .await .unwrap(); diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index fceeb2dd231..11effe2845b 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,4 +1,5 @@ use crate::metrics; +use std::future::Future; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; @@ -13,9 +14,10 @@ use eth2::types::{ PublishBlockRequest, SignedBlockContents, }; use execution_layer::ProvenancedPayload; +use futures::TryFutureExt; use lighthouse_network::{NetworkGlobals, PubsubMessage}; use network::NetworkMessage; -use rand::seq::SliceRandom; +use rand::prelude::SliceRandom; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::marker::PhantomData; @@ -26,9 +28,8 @@ use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ AbstractExecPayload, BeaconBlockRef, BlobSidecar, BlobsList, BlockImportSource, - DataColumnSidecarList, DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, - FullPayload, FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, - SignedBlindedBeaconBlock, + DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, + FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, SignedBlindedBeaconBlock, }; use warp::http::StatusCode; use warp::{reply::Response, Rejection, Reply}; @@ -97,14 +98,9 @@ pub async fn publish_block>( }; let block = unverified_block.inner_block(); debug!(log, "Signed block received in HTTP API"; "slot" => block.slot()); - let malicious_withhold_count = chain.config.malicious_withhold_count; - let chain_cloned = chain.clone(); /* actually publish a block */ let publish_block_p2p = move |block: Arc>, - should_publish_block: bool, - blob_sidecars: Vec>>, - mut data_column_sidecars: DataColumnSidecarList, sender, log, seen_timestamp| @@ -120,53 +116,16 @@ pub async fn publish_block>( publish_delay, ); - let mut pubsub_messages = if should_publish_block { - info!( - log, - "Signed block published to network via HTTP API"; - "slot" => block.slot(), - "blobs_published" => blob_sidecars.len(), - "publish_delay_ms" => publish_delay.as_millis(), - ); - vec![PubsubMessage::BeaconBlock(block.clone())] - } else { - vec![] - }; + info!( + log, + "Signed block published to network via HTTP API"; + "slot" => block.slot(), + "publish_delay_ms" => publish_delay.as_millis(), + ); - match block.as_ref() { - SignedBeaconBlock::Base(_) - | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Bellatrix(_) - | SignedBeaconBlock::Capella(_) => { - crate::publish_pubsub_messages(&sender, pubsub_messages) - .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; - } - SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => { - for blob in blob_sidecars.into_iter() { - pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new((blob.index, blob)))); - } - if malicious_withhold_count > 0 { - let columns_to_keep = data_column_sidecars - .len() - .saturating_sub(malicious_withhold_count); - // Randomize columns before dropping the last malicious_withhold_count items - data_column_sidecars.shuffle(&mut rand::thread_rng()); - drop(data_column_sidecars.drain(columns_to_keep..)); - } + crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; - for data_col in data_column_sidecars { - let subnet = DataColumnSubnetId::from_column_index::( - data_col.index as usize, - &chain_cloned.spec, - ); - pubsub_messages.push(PubsubMessage::DataColumnSidecar(Box::new(( - subnet, data_col, - )))); - } - crate::publish_pubsub_messages(&sender, pubsub_messages) - .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; - } - }; Ok(()) }; @@ -174,145 +133,11 @@ pub async fn publish_block>( let slot = block.message().slot(); let sender_clone = network_tx.clone(); - // Convert blobs to either: - // - // 1. Blob sidecars if prior to peer DAS, or - // 2. Data column sidecars if post peer DAS. - let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); - - let (blob_sidecars, data_column_sidecars) = match unverified_blobs { - // Pre-PeerDAS: construct blob sidecars for the network. - Some((kzg_proofs, blobs)) if !peer_das_enabled => { - let blob_sidecars = kzg_proofs - .into_iter() - .zip(blobs) - .enumerate() - .map(|(i, (proof, unverified_blob))| { - let _timer = metrics::start_timer( - &beacon_chain::metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION, - ); - let blob_sidecar = - BlobSidecar::new(i, unverified_blob, &block, proof).map(Arc::new); - blob_sidecar.map_err(|e| { - error!( - log, - "Invalid blob - not publishing block"; - "error" => ?e, - "blob_index" => i, - "slot" => slot, - ); - warp_utils::reject::custom_bad_request(format!("{e:?}")) - }) - }) - .collect::, Rejection>>()?; - (blob_sidecars, vec![]) - } - // Post PeerDAS: construct data columns. - Some((_, blobs)) => { - // TODO(das): this is sub-optimal and should likely not be happening prior to gossip - // block publishing. - let data_column_sidecars = build_blob_data_column_sidecars(&chain, &block, blobs) - .map_err(|e| { - error!( - log, - "Invalid data column - not publishing block"; - "error" => ?e, - "slot" => slot - ); - warp_utils::reject::custom_bad_request(format!("{e:?}")) - })?; - (vec![], data_column_sidecars) - } - None => (vec![], vec![]), - }; + let build_sidecar_task_handle = + spawn_build_data_sidecar_task(chain.clone(), block.clone(), unverified_blobs, log.clone())?; // Gossip verify the block and blobs/data columns separately. let gossip_verified_block_result = unverified_block.into_gossip_verified_block(&chain); - let gossip_verified_blobs = blob_sidecars - .into_iter() - .map(|blob_sidecar| { - let gossip_verified_blob = - GossipVerifiedBlob::new(blob_sidecar.clone(), blob_sidecar.index, &chain); - - match gossip_verified_blob { - Ok(blob) => Ok(Some(blob)), - Err(GossipBlobError::RepeatBlob { proposer, .. }) => { - // Log the error but do not abort publication, we may need to publish the block - // or some of the other blobs if the block & blobs are only partially published - // by the other publisher. - debug!( - log, - "Blob for publication already known"; - "blob_index" => blob_sidecar.index, - "slot" => slot, - "proposer" => proposer, - ); - Ok(None) - } - Err(e) => { - error!( - log, - "Blob for publication is gossip-invalid"; - "blob_index" => blob_sidecar.index, - "slot" => slot, - "error" => ?e, - ); - Err(warp_utils::reject::custom_bad_request(e.to_string())) - } - } - }) - .collect::, Rejection>>()?; - - let gossip_verified_data_columns = data_column_sidecars - .into_iter() - .map(|data_column_sidecar| { - let column_index = data_column_sidecar.index as usize; - let subnet = - DataColumnSubnetId::from_column_index::(column_index, &chain.spec); - let gossip_verified_column = - GossipVerifiedDataColumn::new(data_column_sidecar, subnet.into(), &chain); - - match gossip_verified_column { - Ok(blob) => Ok(Some(blob)), - Err(GossipDataColumnError::PriorKnown { proposer, .. }) => { - // Log the error but do not abort publication, we may need to publish the block - // or some of the other data columns if the block & data columns are only - // partially published by the other publisher. - debug!( - log, - "Data column for publication already known"; - "column_index" => column_index, - "slot" => slot, - "proposer" => proposer, - ); - Ok(None) - } - Err(e) => { - error!( - log, - "Data column for publication is gossip-invalid"; - "column_index" => column_index, - "slot" => slot, - "error" => ?e, - ); - Err(warp_utils::reject::custom_bad_request(format!("{e:?}"))) - } - } - }) - .collect::, Rejection>>()?; - - let publishable_blobs = gossip_verified_blobs - .iter() - .flatten() - .map(|b| b.clone_blob()) - .collect::>(); - - let publishable_data_columns = gossip_verified_data_columns - .iter() - .flatten() - .map(|b| b.clone_data_column()) - .collect::>(); - let block_root = block_root.unwrap_or_else(|| { gossip_verified_block_result.as_ref().map_or_else( |_| block.canonical_root(), @@ -321,12 +146,9 @@ pub async fn publish_block>( }); let should_publish_block = gossip_verified_block_result.is_ok(); - if let BroadcastValidation::Gossip = validation_level { + if BroadcastValidation::Gossip == validation_level && should_publish_block { publish_block_p2p( block.clone(), - should_publish_block, - publishable_blobs.clone(), - publishable_data_columns.clone(), sender_clone.clone(), log.clone(), seen_timestamp, @@ -337,38 +159,41 @@ pub async fn publish_block>( let publish_fn_completed = Arc::new(AtomicBool::new(false)); let block_to_publish = block.clone(); let publish_fn = || { - match validation_level { - BroadcastValidation::Gossip => (), - BroadcastValidation::Consensus => publish_block_p2p( - block_to_publish.clone(), - should_publish_block, - publishable_blobs.clone(), - publishable_data_columns.clone(), - sender_clone.clone(), - log.clone(), - seen_timestamp, - )?, - BroadcastValidation::ConsensusAndEquivocation => { - check_slashable(&chain, block_root, &block_to_publish, &log)?; - publish_block_p2p( + if should_publish_block { + match validation_level { + BroadcastValidation::Gossip => (), + BroadcastValidation::Consensus => publish_block_p2p( block_to_publish.clone(), - should_publish_block, - publishable_blobs.clone(), - publishable_data_columns.clone(), sender_clone.clone(), log.clone(), seen_timestamp, - )?; - } - }; + )?, + BroadcastValidation::ConsensusAndEquivocation => { + check_slashable(&chain, block_root, &block_to_publish, &log)?; + publish_block_p2p( + block_to_publish.clone(), + sender_clone.clone(), + log.clone(), + seen_timestamp, + )?; + } + }; + } + publish_fn_completed.store(true, Ordering::SeqCst); Ok(()) }; + // Wait for blobs/columns to get gossip verified before proceeding further as we need them for import. + let (gossip_verified_blobs, gossip_verified_columns) = build_sidecar_task_handle.await?; + for blob in gossip_verified_blobs.into_iter().flatten() { + publish_blob_sidecars(network_tx, &blob).map_err(|_| { + warp_utils::reject::custom_server_error("unable to publish blob sidecars".into()) + })?; // Importing the blobs could trigger block import and network publication in the case // where the block was already seen on gossip. - if let Err(e) = Box::pin(chain.process_gossip_blob(blob, &publish_fn)).await { + if let Err(e) = Box::pin(chain.process_gossip_blob(blob)).await { let msg = format!("Invalid blob: {e}"); return if let BroadcastValidation::Gossip = validation_level { Err(warp_utils::reject::broadcast_without_import(msg)) @@ -383,14 +208,12 @@ pub async fn publish_block>( } } - if gossip_verified_data_columns - .iter() - .map(Option::is_some) - .count() - > 0 - { + if gossip_verified_columns.iter().map(Option::is_some).count() > 0 { + publish_column_sidecars(network_tx, &gossip_verified_columns, &chain).map_err(|_| { + warp_utils::reject::custom_server_error("unable to publish data column sidecars".into()) + })?; let sampling_columns_indices = &network_globals.sampling_columns; - let sampling_columns = gossip_verified_data_columns + let sampling_columns = gossip_verified_columns .into_iter() .flatten() .filter(|data_column| sampling_columns_indices.contains(&data_column.index())) @@ -501,6 +324,224 @@ pub async fn publish_block>( } } +type BuildDataSidecarTaskResult = Result< + ( + Vec>>, + Vec>>, + ), + Rejection, +>; + +/// Convert blobs to either: +/// +/// 1. Blob sidecars if prior to peer DAS, or +/// 2. Data column sidecars if post peer DAS. +fn spawn_build_data_sidecar_task( + chain: Arc>, + block: Arc>>, + proofs_and_blobs: UnverifiedBlobs, + log: Logger, +) -> Result>, Rejection> { + chain + .clone() + .task_executor + .spawn_blocking_handle( + move || { + let Some((kzg_proofs, blobs)) = proofs_and_blobs else { + return Ok((vec![], vec![])); + }; + + let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); + if !peer_das_enabled { + // Pre-PeerDAS: construct blob sidecars for the network. + let gossip_verified_blobs = + build_gossip_verified_blobs(&chain, &block, blobs, kzg_proofs, &log)?; + Ok((gossip_verified_blobs, vec![])) + } else { + // Post PeerDAS: construct data columns. + let gossip_verified_data_columns = + build_gossip_verified_data_columns(&chain, &block, blobs, &log)?; + Ok((vec![], gossip_verified_data_columns)) + } + }, + "build_data_sidecars", + ) + .ok_or(warp_utils::reject::custom_server_error( + "runtime shutdown".to_string(), + )) + .map(|r| { + r.map_err(|_| warp_utils::reject::custom_server_error("join error".to_string())) + .and_then(|output| async move { output }) + }) +} + +fn build_gossip_verified_data_columns( + chain: &BeaconChain, + block: &SignedBeaconBlock>, + blobs: BlobsList, + log: &Logger, +) -> Result>>, Rejection> { + let slot = block.slot(); + let data_column_sidecars = + build_blob_data_column_sidecars(chain, block, blobs).map_err(|e| { + error!( + log, + "Invalid data column - not publishing block"; + "error" => ?e, + "slot" => slot + ); + warp_utils::reject::custom_bad_request(format!("{e:?}")) + })?; + + let slot = block.slot(); + let gossip_verified_data_columns = data_column_sidecars + .into_iter() + .map(|data_column_sidecar| { + let column_index = data_column_sidecar.index as usize; + let subnet = + DataColumnSubnetId::from_column_index::(column_index, &chain.spec); + let gossip_verified_column = + GossipVerifiedDataColumn::new(data_column_sidecar, subnet.into(), chain); + + match gossip_verified_column { + Ok(blob) => Ok(Some(blob)), + Err(GossipDataColumnError::PriorKnown { proposer, .. }) => { + // Log the error but do not abort publication, we may need to publish the block + // or some of the other data columns if the block & data columns are only + // partially published by the other publisher. + debug!( + log, + "Data column for publication already known"; + "column_index" => column_index, + "slot" => slot, + "proposer" => proposer, + ); + Ok(None) + } + Err(e) => { + error!( + log, + "Data column for publication is gossip-invalid"; + "column_index" => column_index, + "slot" => slot, + "error" => ?e, + ); + Err(warp_utils::reject::custom_bad_request(format!("{e:?}"))) + } + } + }) + .collect::, Rejection>>()?; + + Ok(gossip_verified_data_columns) +} + +fn build_gossip_verified_blobs( + chain: &BeaconChain, + block: &SignedBeaconBlock>, + blobs: BlobsList, + kzg_proofs: KzgProofs, + log: &Logger, +) -> Result>>, Rejection> { + let slot = block.slot(); + let gossip_verified_blobs = kzg_proofs + .into_iter() + .zip(blobs) + .enumerate() + .map(|(i, (proof, unverified_blob))| { + let timer = metrics::start_timer( + &beacon_chain::metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION, + ); + let blob_sidecar = BlobSidecar::new(i, unverified_blob, block, proof) + .map(Arc::new) + .map_err(|e| { + error!( + log, + "Invalid blob - not publishing block"; + "error" => ?e, + "blob_index" => i, + "slot" => slot, + ); + warp_utils::reject::custom_bad_request(format!("{e:?}")) + })?; + drop(timer); + + let gossip_verified_blob = + GossipVerifiedBlob::new(blob_sidecar.clone(), blob_sidecar.index, chain); + + match gossip_verified_blob { + Ok(blob) => Ok(Some(blob)), + Err(GossipBlobError::RepeatBlob { proposer, .. }) => { + // Log the error but do not abort publication, we may need to publish the block + // or some of the other blobs if the block & blobs are only partially published + // by the other publisher. + debug!( + log, + "Blob for publication already known"; + "blob_index" => blob_sidecar.index, + "slot" => slot, + "proposer" => proposer, + ); + Ok(None) + } + Err(e) => { + error!( + log, + "Blob for publication is gossip-invalid"; + "blob_index" => blob_sidecar.index, + "slot" => slot, + "error" => ?e, + ); + Err(warp_utils::reject::custom_bad_request(e.to_string())) + } + } + }) + .collect::, Rejection>>()?; + + Ok(gossip_verified_blobs) +} + +fn publish_column_sidecars( + sender_clone: &UnboundedSender>, + data_column_sidecars: &[Option>], + chain: &BeaconChain, +) -> Result<(), BlockError> { + let malicious_withhold_count = chain.config.malicious_withhold_count; + let mut data_column_sidecars = data_column_sidecars + .iter() + .flatten() + .map(|d| d.clone_data_column()) + .collect::>(); + if malicious_withhold_count > 0 { + let columns_to_keep = data_column_sidecars + .len() + .saturating_sub(malicious_withhold_count); + // Randomize columns before dropping the last malicious_withhold_count items + data_column_sidecars.shuffle(&mut rand::thread_rng()); + data_column_sidecars.truncate(columns_to_keep); + } + let pubsub_messages = data_column_sidecars + .into_iter() + .map(|data_col| { + let subnet = DataColumnSubnetId::from_column_index::( + data_col.index as usize, + &chain.spec, + ); + PubsubMessage::DataColumnSidecar(Box::new((subnet, data_col))) + }) + .collect::>(); + crate::publish_pubsub_messages(sender_clone, pubsub_messages) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) +} + +fn publish_blob_sidecars( + sender_clone: &UnboundedSender>, + blob: &GossipVerifiedBlob, +) -> Result<(), BlockError> { + let pubsub_message = PubsubMessage::BlobSidecar(Box::new((blob.index(), blob.clone_blob()))); + crate::publish_pubsub_message(sender_clone, pubsub_message) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) +} + async fn post_block_import_logging_and_response( result: Result, validation_level: BroadcastValidation, diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index f55983ec66a..1338f4f1802 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1486,7 +1486,7 @@ pub async fn block_seen_on_gossip_with_some_blobs() { tester .harness .chain - .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .process_gossip_blob(gossip_blob) .await .unwrap(); } @@ -1559,7 +1559,7 @@ pub async fn blobs_seen_on_gossip_without_block() { tester .harness .chain - .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .process_gossip_blob(gossip_blob) .await .unwrap(); } @@ -1633,7 +1633,7 @@ pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { tester .harness .chain - .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .process_gossip_blob(gossip_blob) .await .unwrap(); } @@ -1705,7 +1705,7 @@ pub async fn slashable_blobs_seen_on_gossip_cause_failure() { tester .harness .chain - .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .process_gossip_blob(gossip_blob) .await .unwrap(); } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index b5da8e82a77..aa7194bb056 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -914,10 +914,7 @@ impl NetworkBeaconProcessor { let blob_slot = verified_blob.slot(); let blob_index = verified_blob.id().index; - let result = self - .chain - .process_gossip_blob(verified_blob, || Ok(())) - .await; + let result = self.chain.process_gossip_blob(verified_blob).await; match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 8d933a6fcd5..33ae132e8a2 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -505,8 +505,8 @@ impl Tester { } Err(_) => GossipVerifiedBlob::__assumed_valid(blob_sidecar), }; - let result = self - .block_on_dangerous(self.harness.chain.process_gossip_blob(blob, || Ok(())))?; + let result = + self.block_on_dangerous(self.harness.chain.process_gossip_blob(blob))?; if valid { assert!(result.is_ok()); }