diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9c7ded313b6..b760e9697bb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -11,8 +11,8 @@ use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::POS_PANDA_BANNER; use crate::block_verification::{ - check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, - signature_verify_chain_segment, verify_header_signature, BlockError, ExecutionPendingBlock, + check_block_relevancy, signature_verify_chain_segment, verify_header_signature, + BlobProcessError, BlockError, BlockImportError, BlockProcessError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, }; use crate::block_verification_types::{ @@ -2778,18 +2778,23 @@ impl BeaconChain { } } } - Err(BlockError::BlockIsAlreadyKnown(block_root)) => { + Err(BlockProcessError::BlockError(BlockError::BlockIsAlreadyKnown( + block_root, + ))) => { debug!(self.log, "Ignoring already known blocks while processing chain segment"; "block_root" => ?block_root); continue; } - Err(error) => { + Err(BlockProcessError::BlockError(error)) => { return ChainSegmentResult::Failed { imported_blocks, error, }; } + Err(other) => { + todo!(); + } } } } @@ -2858,7 +2863,7 @@ impl BeaconChain { pub async fn process_gossip_blob( self: &Arc, blob: GossipVerifiedBlob, - ) -> Result> { + ) -> Result { let block_root = blob.block_root(); // If this block has already been imported to forkchoice it must have been available, so @@ -2868,7 +2873,7 @@ impl BeaconChain { .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(blob.block_root())); + return Err(BlobProcessError::AlreadyImported(blob.block_root())); } if let Some(event_handler) = self.event_handler.as_ref() { @@ -2890,7 +2895,7 @@ impl BeaconChain { slot: Slot, block_root: Hash256, blobs: FixedBlobSidecarList, - ) -> Result> { + ) -> Result { // If this block has already been imported to forkchoice it must have been available, so // we don't need to process its blobs again. if self @@ -2898,7 +2903,7 @@ impl BeaconChain { .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlobProcessError::AlreadyImported(block_root)); } if let Some(event_handler) = self.event_handler.as_ref() { @@ -2919,11 +2924,11 @@ impl BeaconChain { /// Remove any block components from the *processing cache* if we no longer require them. If the /// block was imported full or erred, we no longer require them. - fn remove_notified( + fn remove_notified( &self, block_root: &Hash256, - r: Result>, - ) -> Result> { + r: Result, + ) -> Result { let has_missing_components = matches!(r, Ok(AvailabilityProcessingStatus::MissingComponents(_, _))); if !has_missing_components { @@ -2939,7 +2944,7 @@ impl BeaconChain { block_root: Hash256, unverified_block: B, notify_execution_layer: NotifyExecutionLayer, - ) -> Result> { + ) -> Result> { self.reqresp_pre_import_cache .write() .insert(block_root, unverified_block.block_cloned()); @@ -2971,7 +2976,7 @@ impl BeaconChain { unverified_block: B, notify_execution_layer: NotifyExecutionLayer, publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, - ) -> Result> { + ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -3012,9 +3017,10 @@ impl BeaconChain { } match executed_block { - ExecutedBlock::Available(block) => { - self.import_available_block(Box::new(block)).await - } + ExecutedBlock::Available(block) => self + .import_available_block(Box::new(block)) + .await + .map_err(|e| e.into()), ExecutedBlock::AvailabilityPending(block) => { self.check_block_availability_and_import(block).await } @@ -3047,7 +3053,7 @@ impl BeaconChain { Ok(status) } - Err(e @ BlockError::BeaconChainError(BeaconChainError::TokioJoin(_))) => { + Err(e @ BlockProcessError::BeaconChainError(BeaconChainError::TokioJoin(_))) => { debug!( self.log, "Beacon block processing cancelled"; @@ -3057,20 +3063,20 @@ impl BeaconChain { } // There was an error whilst attempting to verify and import the block. The block might // be partially verified or partially imported. - Err(BlockError::BeaconChainError(e)) => { + Err(BlockProcessError::BeaconChainError(e)) => { crit!( self.log, "Beacon block processing error"; "error" => ?e, ); - Err(BlockError::BeaconChainError(e)) + Err(BlockProcessError::BeaconChainError(e)) } // The block failed verification. Err(other) => { debug!( self.log, "Beacon block rejected"; - "reason" => other.to_string(), + "reason" => ?other, // TODO ); Err(other) } @@ -3139,12 +3145,14 @@ impl BeaconChain { async fn check_block_availability_and_import( self: &Arc, block: AvailabilityPendingExecutedBlock, - ) -> Result> { + ) -> Result> { let slot = block.block.slot(); let availability = self .data_availability_checker .put_pending_executed_block(block)?; - self.process_availability(slot, availability).await + self.process_availability(slot, availability) + .await + .map_err(|e| e.into()) } /// Checks if the provided blob can make any cached blocks available, and imports immediately @@ -3152,14 +3160,16 @@ impl BeaconChain { async fn check_gossip_blob_availability_and_import( self: &Arc, blob: GossipVerifiedBlob, - ) -> Result> { + ) -> Result { let slot = blob.slot(); if let Some(slasher) = self.slasher.as_ref() { slasher.accept_block_header(blob.signed_block_header()); } let availability = self.data_availability_checker.put_gossip_blob(blob)?; - self.process_availability(slot, availability).await + self.process_availability(slot, availability) + .await + .map_err(|e| e.into()) } /// Checks if the provided blobs can make any cached blocks available, and imports immediately @@ -3169,7 +3179,7 @@ impl BeaconChain { slot: Slot, block_root: Hash256, blobs: FixedBlobSidecarList, - ) -> Result> { + ) -> Result { // Need to scope this to ensure the lock is dropped before calling `process_availability` // Even an explicit drop is not enough to convince the borrow checker. { @@ -3186,7 +3196,7 @@ impl BeaconChain { header.message.proposer_index, block_root, ) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; + .map_err(|e| BlobProcessError::BeaconChainError(e.into()))?; if let Some(slasher) = self.slasher.as_ref() { slasher.accept_block_header(header); } @@ -3197,7 +3207,9 @@ impl BeaconChain { .data_availability_checker .put_rpc_blobs(block_root, blobs)?; - self.process_availability(slot, availability).await + self.process_availability(slot, availability) + .await + .map_err(|e| e.into()) } /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` @@ -3208,7 +3220,7 @@ impl BeaconChain { self: &Arc, slot: Slot, availability: Availability, - ) -> Result> { + ) -> Result { match availability { Availability::Available(block) => { // Block is fully available, import into fork choice @@ -3223,7 +3235,7 @@ impl BeaconChain { pub async fn import_available_block( self: &Arc, block: Box>, - ) -> Result> { + ) -> Result { let AvailableExecutedBlock { block, import_data, @@ -3286,7 +3298,7 @@ impl BeaconChain { parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, mut consensus_context: ConsensusContext, - ) -> Result> { + ) -> Result { // ----------------------------- BLOCK NOT YET ATTESTABLE ---------------------------------- // Everything in this initial section is on the hot path between processing the block and // being able to attest to it. DO NOT add any extra processing in this initial section @@ -3331,8 +3343,11 @@ impl BeaconChain { let mut fork_choice = self.canonical_head.fork_choice_write_lock(); // Do not import a block that doesn't descend from the finalized root. - let signed_block = - check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, signed_block)?; + // Note: fork_choice.on_block() below checks that parent_root is known and is descendant of + // finalized checkpoint root. Block validation already checks that parent is known before + // importing the block. There maybe a race condition if we advance our finalized checkpoint + // between validating the block and importing it. In that case this block is useless and we + // can consider the failure internal i.e. a `BeaconChainError`. let block = signed_block.message(); // Register the new block with the fork choice service. @@ -3354,7 +3369,7 @@ impl BeaconChain { payload_verification_status, &self.spec, ) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; + .map_err(|e| BlockImportError::BeaconChainError(e.into()))?; } // If the block is recent enough and it was not optimistically imported, check to see if it @@ -3503,10 +3518,10 @@ impl BeaconChain { "error" => ?e, "warning" => "The database is likely corrupt now, consider --purge-db" ); - return Err(BlockError::BeaconChainError(e)); + return Err(BlockImportError::BeaconChainError(e)); } - return Err(e.into()); + return Err(BlockImportError::BeaconChainError(e.into())); } drop(txn_lock); @@ -3580,7 +3595,7 @@ impl BeaconChain { block: BeaconBlockRef, block_root: Hash256, state: &BeaconState, - ) -> Result<(), BlockError> { + ) -> Result<(), BlockImportError> { // Only perform the weak subjectivity check if it was configured. let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint else { return Ok(()); @@ -3625,11 +3640,11 @@ impl BeaconChain { Provided block root is not a checkpoint.", )) .map_err(|err| { - BlockError::BeaconChainError( + BlockImportError::BeaconChainError( BeaconChainError::WeakSubjectivtyShutdownError(err), ) })?; - return Err(BlockError::WeakSubjectivityConflict); + return Err(BlockImportError::WeakSubjectivityConflict); } } Ok(()) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 866dde5a763..b51bbc5764d 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -134,6 +134,92 @@ const MAXIMUM_BLOCK_SLOT_NUMBER: u64 = 4_294_967_296; // 2^32 /// Only useful for testing. const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); +#[derive(Debug)] +pub enum BlobProcessError { + BeaconChainError(BeaconChainError), + ImportError(BlockImportError), + AvailabilityCheckError(AvailabilityCheckError), + AlreadyImported(Hash256), +} + +impl From for BlobProcessError { + fn from(e: BeaconChainError) -> Self { + Self::BeaconChainError(e) + } +} + +impl From for BlobProcessError { + fn from(e: BlockImportError) -> Self { + Self::ImportError(e) + } +} + +impl From for BlobProcessError { + fn from(e: AvailabilityCheckError) -> Self { + Self::AvailabilityCheckError(e) + } +} + +#[derive(Debug)] +pub enum BlockProcessError { + BlockError(BlockError), + BeaconChainError(BeaconChainError), + ImportError(BlockImportError), + AvailabilityCheckError(AvailabilityCheckError), +} + +impl From> for BlockProcessError { + fn from(e: BlockError) -> Self { + match e { + BlockError::BeaconChainError(e) => Self::BeaconChainError(e), + e => Self::BlockError(e), + } + } +} + +impl From for BlockProcessError { + fn from(e: BeaconChainError) -> Self { + Self::BeaconChainError(e) + } +} + +impl From for BlockProcessError { + fn from(e: BlockImportError) -> Self { + Self::ImportError(e) + } +} + +impl From for BlockProcessError { + fn from(e: AvailabilityCheckError) -> Self { + Self::AvailabilityCheckError(e) + } +} + +#[derive(Debug)] +pub enum BlockImportError { + /// There was an error whilst processing the block. It is not necessarily invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this block due to an internal error. It's unclear if the block is + /// valid. + BeaconChainError(BeaconChainError), + /// There was an error whilst verifying weak subjectivity. This block conflicts with the + /// configured weak subjectivity checkpoint and was not imported. + /// + /// ## Peer scoring + /// + /// The block is invalid and the peer is faulty. + WeakSubjectivityConflict, + AvailabilityCheckError(AvailabilityCheckError), +} + +impl From for BlockImportError { + fn from(e: BeaconChainError) -> Self { + BlockImportError::BeaconChainError(e) + } +} + /// Returned when a block was not verified. A block is not verified for two reasons: /// /// - The block is malformed/invalid (indicated by all results other than `BeaconChainError`. diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 221bb8b2922..1ae1f7662a1 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -74,9 +74,9 @@ pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{ - get_block_root, BlockError, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, - IntoExecutionPendingBlock, IntoGossipVerifiedBlockContents, PayloadVerificationOutcome, - PayloadVerificationStatus, + get_block_root, BlobProcessError, BlockError, BlockProcessError, ExecutionPayloadError, + ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, + IntoGossipVerifiedBlockContents, PayloadVerificationOutcome, PayloadVerificationStatus, }; pub use block_verification_types::AvailabilityPendingExecutedBlock; pub use block_verification_types::ExecutedBlock; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8fbd5d575f9..d318f41b32e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1883,7 +1883,8 @@ where NotifyExecutionLayer::Yes, || Ok(()), ) - .await? + .await + .map_err(|_| BlockError::BeaconChainError(todo!("placeholder")))? .try_into() .unwrap(); self.chain.recompute_head_at_current_slot().await; @@ -1909,7 +1910,8 @@ where NotifyExecutionLayer::Yes, || Ok(()), ) - .await? + .await + .map_err(|_| BlockError::BeaconChainError(todo!("placeholder")))? .try_into() .expect("block blobs are available"); self.chain.recompute_head_at_current_slot().await; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index af7f3a53e56..32aef700f79 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,7 +4,6 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; -use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::store::Error; use beacon_chain::{ @@ -18,6 +17,10 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer, }; +use beacon_chain::{ + blob_verification::{GossipBlobError, GossipVerifiedBlob}, + BlockProcessError, +}; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use operation_pool::ReceivedPreCapella; use slog::{crit, debug, error, info, trace, warn, Logger}; @@ -778,14 +781,7 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); } - Err(BlockError::BlockIsAlreadyKnown(_)) => { - debug!( - self.log, - "Ignoring gossip blob already imported"; - "block_root" => ?block_root, - "blob_index" => blob_index, - ); - } + // No case for BlockIsAlreadyKnown Err(err) => { debug!( self.log, @@ -1187,7 +1183,7 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); } - Err(BlockError::ParentUnknown(block)) => { + Err(BlockProcessError::BlockError(BlockError::ParentUnknown(block))) => { // Inform the sync manager to find parents for this block // This should not occur. It should be checked by `should_forward_block` error!( @@ -1201,14 +1197,16 @@ impl NetworkBeaconProcessor { block_root, )); } - Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { + Err(BlockProcessError::BlockError( + ref e @ BlockError::ExecutionPayloadError(ref epe), + )) if !epe.penalize_peer() => { debug!( self.log, "Failed to verify execution payload"; - "error" => %e + "error" => ?e ); } - Err(BlockError::AvailabilityCheck(err)) => { + Err(BlockProcessError::BlockError(BlockError::AvailabilityCheck(err))) => { match err.category() { AvailabilityCheckErrorCategory::Internal => { warn!( @@ -2764,7 +2762,7 @@ impl NetworkBeaconProcessor { invalid_block_storage: &InvalidBlockStorage, block_root: Hash256, block: &SignedBeaconBlock, - error: &BlockError, + error: &BlockProcessError, log: &Logger, ) { if let InvalidBlockStorage::Enabled(base_dir) = invalid_block_storage { @@ -2812,7 +2810,7 @@ impl NetworkBeaconProcessor { }; write_file(block_path, &block.as_ssz_bytes()); - write_file(error_path, error.to_string().as_bytes()); + write_file(error_path, todo!()); // impl to_string } } } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 887974c6e0b..e7095321b04 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -8,6 +8,7 @@ use crate::sync::{ use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::data_availability_checker::MaybeAvailableBlock; +use beacon_chain::BlobProcessError; use beacon_chain::{ validator_monitor::get_slot_delay_ms, AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, @@ -279,7 +280,7 @@ impl NetworkBeaconProcessor { "slot" => %slot, ); } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + Err(BlobProcessError::AlreadyImported(_)) => { debug!( self.log, "Blobs have already been imported"; diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index a2909b49dd1..247fa6eb063 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -8,7 +8,7 @@ use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::block_lookups::common::LookupType; use crate::sync::block_lookups::parent_lookup::{ParentLookup, RequestError}; use crate::sync::block_lookups::single_block_lookup::{CachedChild, LookupRequestError}; -use crate::sync::manager::{Id, SingleLookupReqId}; +use crate::sync::manager::{BlockComponentProcessError, Id, SingleLookupReqId}; use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; pub use beacon_chain::data_availability_checker::ChildComponents; use beacon_chain::data_availability_checker::{ @@ -718,7 +718,7 @@ impl BlockLookups { pub fn single_block_component_processed>( &mut self, target_id: Id, - result: BlockProcessingResult, + result: BlockProcessingResult, cx: &mut SyncNetworkContext, ) { let Some(mut lookup) = self.single_block_lookups.remove(&target_id) else { @@ -747,7 +747,7 @@ impl BlockLookups { let action = match result { BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) - | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { + | BlockProcessingResult::Err(BlockComponentProcessError::AlreadyImported { .. }) => { // Successfully imported trace!(self.log, "Single block processing succeeded"; "block" => %block_root); Action::Drop @@ -795,45 +795,22 @@ impl BlockLookups { } BlockProcessingResult::Err(e) => { let root = lookup.block_root(); - trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); + trace!(self.log, "Single block processing failed"; "block" => %root, "error" => ?e); match e { - BlockError::BeaconChainError(e) => { + BlockComponentProcessError::Internal(e) => { // Internal error - error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + error!(self.log, "Lookup processing internal error"; "block_root" => %root, "error" => e); Action::Drop } - BlockError::ParentUnknown(block) => { - let slot = block.slot(); - let parent_root = block.parent_root(); - lookup.add_child_components(block.into()); - Action::ParentUnknown { parent_root, slot } - } - ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { - // These errors indicate that the execution layer is offline - // and failed to validate the execution payload. Do not downscore peer. - debug!( - self.log, - "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; - "root" => %root, - "error" => ?e - ); - Action::Drop - } - BlockError::AvailabilityCheck(e) => match e.category() { - AvailabilityCheckErrorCategory::Internal => { - warn!(self.log, "Internal availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); - lookup.block_request_state.state.on_download_failure(); - lookup.blob_request_state.state.on_download_failure(); - Action::Retry + BlockComponentProcessError::ParentUnknown(parent_root) => { + Action::ParentUnknown { + parent_root, + // Wait for https://github.com/sigp/lighthouse/pull/5655 + slot: todo!(), } - AvailabilityCheckErrorCategory::Malicious => { - warn!(self.log, "Availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); - lookup.handle_availability_check_failure(cx); - Action::Retry - } - }, - other => { - warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); + } + BlockComponentProcessError::Invalid(e) => { + warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?e, "peer_id" => %peer_id); if let Ok(block_peer) = lookup.block_request_state.state.processing_peer() { cx.report_peer( block_peer, @@ -878,7 +855,7 @@ impl BlockLookups { pub fn parent_block_processed( &mut self, chain_hash: Hash256, - result: BlockProcessingResult, + result: BlockProcessingResult, cx: &mut SyncNetworkContext, ) { let index = self diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 8e3b35ee5d3..cab5f868fd0 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -249,7 +249,7 @@ impl TestRig { ) } - fn parent_block_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { + fn parent_block_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type: BlockProcessType::ParentLookup { chain_hash }, result, @@ -266,7 +266,7 @@ impl TestRig { fn single_block_component_processed( &mut self, id: SingleLookupReqId, - result: BlockProcessingResult, + result: BlockProcessingResult, ) { self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type: BlockProcessType::SingleBlock { id: id.id }, @@ -288,7 +288,7 @@ impl TestRig { fn single_blob_component_processed( &mut self, id: SingleLookupReqId, - result: BlockProcessingResult, + result: BlockProcessingResult, ) { self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type: BlockProcessType::SingleBlob { id: id.id }, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 15b96c52b10..3382f616570 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -48,7 +48,8 @@ use beacon_chain::block_verification_types::AsBlock; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::data_availability_checker::ChildComponents; use beacon_chain::{ - AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, + AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlobProcessError, BlockError, + BlockProcessError, EngineState, }; use futures::StreamExt; use lighthouse_network::rpc::RPCError; @@ -144,7 +145,7 @@ pub enum SyncMessage { /// Block processed BlockComponentProcessed { process_type: BlockProcessType, - result: BlockProcessingResult, + result: BlockProcessingResult, }, } @@ -157,12 +158,21 @@ pub enum BlockProcessType { } #[derive(Debug)] -pub enum BlockProcessingResult { +pub enum BlockProcessingResult { Ok(AvailabilityProcessingStatus), - Err(BlockError), + Err(BlockComponentProcessError), Ignored, } +#[derive(Debug)] +pub enum BlockComponentProcessError { + ParentUnknown(Hash256), + AlreadyImported(Hash256), + // TODO: Not okay, errors should only be serialized if debug logs are enabled + Internal(String), + Invalid(String), +} + /// The result of processing multiple blocks (a chain segment). #[derive(Debug)] pub enum BatchProcessResult { @@ -1029,19 +1039,35 @@ impl SyncManager { } } -impl From>> - for BlockProcessingResult +impl> From> + for BlockProcessingResult { - fn from(result: Result>) -> Self { + fn from(result: Result) -> Self { match result { Ok(status) => BlockProcessingResult::Ok(status), - Err(e) => BlockProcessingResult::Err(e), + Err(e) => BlockProcessingResult::Err(e.into()), + } + } +} + +impl From> for BlockComponentProcessError { + fn from(value: BlockProcessError) -> Self { + match value { + BlockProcessError::BlockError(_) => todo!(), + BlockProcessError::BeaconChainError(_) => todo!(), + BlockProcessError::ImportError(_) => todo!(), + BlockProcessError::AvailabilityCheckError(_) => todo!(), } } } -impl From> for BlockProcessingResult { - fn from(e: BlockError) -> Self { - BlockProcessingResult::Err(e) +impl From for BlockComponentProcessError { + fn from(value: BlobProcessError) -> Self { + match value { + BlobProcessError::BeaconChainError(_) => todo!(), + BlobProcessError::ImportError(_) => todo!(), + BlobProcessError::AvailabilityCheckError(_) => todo!(), + BlobProcessError::AlreadyImported(_) => todo!(), + } } }