diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 813eb64d903..f6935fc90e1 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -84,8 +84,8 @@ enum RpcBlockInner { /// This variant is used with parent lookups and by-range responses. It should have all blobs /// ordered, all block roots matching, and the correct number of blobs for this block. BlockAndBlobs(Arc>, BlobSidecarList), - /// This variant is used with parent lookups and by-range responses. It should have all data columns - /// ordered, all block roots matching, and the correct number of data columns for this block. + /// This variant is used with parent lookups and by-range responses. It should have all + /// requested data columns, all block roots matching for this block. BlockAndDataColumns(Arc>, DataColumnSidecarList), } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index b4e1f724403..743b84e61ea 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -259,47 +259,40 @@ impl DataAvailabilityChecker { block: RpcBlock, ) -> Result, AvailabilityCheckError> { let (block_root, block, blobs, data_columns) = block.deconstruct(); - match (blobs, data_columns) { - (None, None) => { - if self.blobs_required_for_block(&block) { - Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) - } else { - Ok(MaybeAvailableBlock::Available(AvailableBlock { - block_root, - block, - blobs: None, - data_columns: None, - })) - } + if self.blobs_required_for_block(&block) { + if let Some(blob_list) = blobs.as_ref() { + let kzg = self + .kzg + .as_ref() + .ok_or(AvailabilityCheckError::KzgNotInitialized)?; + verify_kzg_for_blob_list(blob_list.iter(), kzg) + .map_err(AvailabilityCheckError::Kzg)?; + return Ok(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs, + data_columns: None, + })); } - (maybe_blob_list, maybe_data_column_list) => { - let (verified_blobs, verified_data_column) = - if self.blobs_required_for_block(&block) { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - - if let Some(blob_list) = maybe_blob_list.as_ref() { - verify_kzg_for_blob_list(blob_list.iter(), kzg) - .map_err(AvailabilityCheckError::Kzg)?; - } - if let Some(data_column_list) = maybe_data_column_list.as_ref() { - verify_kzg_for_data_column_list(data_column_list.iter(), kzg) - .map_err(AvailabilityCheckError::Kzg)?; - } - (maybe_blob_list, maybe_data_column_list) - } else { - (None, None) - }; - Ok(MaybeAvailableBlock::Available(AvailableBlock { + } + if self.data_columns_required_for_block(&block) { + if let Some(data_column_list) = data_columns.as_ref() { + let kzg = self + .kzg + .as_ref() + .ok_or(AvailabilityCheckError::KzgNotInitialized)?; + verify_kzg_for_data_column_list(data_column_list.iter(), kzg) + .map_err(AvailabilityCheckError::Kzg)?; + return Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, - blobs: verified_blobs, - data_columns: verified_data_column, - })) + blobs: None, + data_columns, + })); } } + + Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) } /// Checks if a vector of blocks are available. Returns a vector of `MaybeAvailableBlock` @@ -313,6 +306,11 @@ impl DataAvailabilityChecker { blocks: Vec>, ) -> Result>, AvailabilityCheckError> { let mut results = Vec::with_capacity(blocks.len()); + let kzg = self + .kzg + .as_ref() + .ok_or(AvailabilityCheckError::KzgNotInitialized)?; + let all_blobs: BlobSidecarList = blocks .iter() .filter(|block| self.blobs_required_for_block(block.as_block())) @@ -324,43 +322,29 @@ impl DataAvailabilityChecker { // verify kzg for all blobs at once if !all_blobs.is_empty() { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; verify_kzg_for_blob_list(all_blobs.iter(), kzg)?; } + // TODO(das) verify kzg for all data columns + for block in blocks { let (block_root, block, blobs, data_columns) = block.deconstruct(); - match (blobs, data_columns) { - (None, None) => { - if self.blobs_required_for_block(&block) { - results.push(MaybeAvailableBlock::AvailabilityPending { block_root, block }) - } else { - results.push(MaybeAvailableBlock::Available(AvailableBlock { - block_root, - block, - blobs: None, - data_columns: None, - })) - } - } - (maybe_blob_list, maybe_data_column_list) => { - let (verified_blobs, verified_data_columns) = - if self.blobs_required_for_block(&block) { - (maybe_blob_list, maybe_data_column_list) - } else { - (None, None) - }; - // already verified kzg for all blobs - results.push(MaybeAvailableBlock::Available(AvailableBlock { - block_root, - block, - blobs: verified_blobs, - data_columns: verified_data_columns, - })) - } + if self.blobs_required_for_block(&block) && blobs.is_some() { + results.push(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs, + data_columns: None, + })) + } else if self.data_columns_required_for_block(&block) && data_columns.is_some() { + results.push(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: None, + data_columns, + })) + } else { + results.push(MaybeAvailableBlock::AvailabilityPending { block_root, block }) } } @@ -370,7 +354,25 @@ impl DataAvailabilityChecker { /// Determines the blob requirements for a block. If the block is pre-deneb, no blobs are required. /// If the block's epoch is from prior to the data availability boundary, no blobs are required. fn blobs_required_for_block(&self, block: &SignedBeaconBlock) -> bool { - block.num_expected_blobs() > 0 && self.da_check_required_for_epoch(block.epoch()) + block.num_expected_blobs() > 0 + && self.da_check_required_for_epoch(block.epoch()) + && !self.is_peer_das_enabled_for_epoch(block.epoch()) + } + + /// Determines the data column requirements for a block. + /// - If the block is pre-peerdas, no data columns are required. + /// - If the block's epoch is from prior to the data availability boundary, no data columns are required. + fn data_columns_required_for_block(&self, block: &SignedBeaconBlock) -> bool { + block.num_expected_blobs() > 0 + && self.da_check_required_for_epoch(block.epoch()) + && self.is_peer_das_enabled_for_epoch(block.epoch()) + } + + /// Returns true if the given epoch is greater than or equal to the `PEER_DAS_EPOCH`. + fn is_peer_das_enabled_for_epoch(&self, block_epoch: Epoch) -> bool { + self.spec + .peer_das_epoch + .map_or(false, |peer_das_epoch| block_epoch >= peer_das_epoch) } /// The epoch at which we require a data availability check in block processing. diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index c8712538f2d..211ec06c151 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -691,10 +691,9 @@ pub struct OverflowLRUCache { /// The capacity of the LRU cache capacity: NonZeroUsize, /// The number of data columns the node is custodying. - // FIXME(das): Using `Option` as temporary workaround to disable custody requirement checks in - // tests. To be removed once we implement proper fork / epoch transition. custody_column_count: Option, log: Logger, + spec: ChainSpec, } impl OverflowLRUCache { @@ -711,11 +710,12 @@ impl OverflowLRUCache { Ok(Self { critical: RwLock::new(critical), overflow_store, - state_cache: StateLRUCache::new(beacon_store, spec), + state_cache: StateLRUCache::new(beacon_store, spec.clone()), maintenance_lock: Mutex::new(()), capacity, custody_column_count, log, + spec, }) } @@ -1388,8 +1388,14 @@ mod test { let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); let cache = Arc::new( - OverflowLRUCache::::new(capacity_non_zero, test_store, None, spec.clone()) - .expect("should create cache"), + OverflowLRUCache::::new( + capacity_non_zero, + test_store, + None, + harness.logger().clone(), + spec.clone(), + ) + .expect("should create cache"), ); (harness, cache, chain_db_path) } @@ -1894,6 +1900,7 @@ mod test { new_non_zero_usize(capacity), harness.chain.store.clone(), None, + harness.logger().clone(), harness.chain.spec.clone(), ) .expect("should recover cache"); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 58987ba63d2..50f9e4444ff 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -737,6 +737,7 @@ impl NetworkService { } if !self.subscribe_all_data_column_subnets { + // TODO(das): subscribe after `PEER_DAS_EPOCH` for column_subnet in DataColumnSubnetId::compute_custody_subnets::( self.network_globals.local_enr().node_id().raw().into(), self.network_globals diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 4b792b838e9..2f9945e8e2d 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -182,6 +182,7 @@ pub struct ChainSpec { /* * DAS params */ + pub peer_das_epoch: Option, pub custody_requirement: u64, /* @@ -722,6 +723,7 @@ impl ChainSpec { /* * DAS params */ + peer_das_epoch: None, custody_requirement: 1, /* @@ -1004,6 +1006,7 @@ impl ChainSpec { /* * DAS params */ + peer_das_epoch: None, custody_requirement: 1, /* * Network specific diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 757f0eb7ccf..83fbd19df11 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -512,13 +512,13 @@ mod test { use super::*; #[test] - fn test_peerdas_config_all_specs() { - test_peerdas_config::(); - test_peerdas_config::(); - test_peerdas_config::(); + fn test_peer_das_config_all_specs() { + test_peer_das_config::(); + test_peer_das_config::(); + test_peer_das_config::(); } - fn test_peerdas_config() { + fn test_peer_das_config() { assert_eq!( E::data_columns_per_subnet(), E::number_of_columns() / E::data_column_subnet_count()