From 3d4e6e263ec7f63b6325c3dbdd1d529f4701926c Mon Sep 17 00:00:00 2001 From: realbigsean Date: Thu, 11 Apr 2024 11:33:54 -0400 Subject: [PATCH] Remove availability view trait (#5544) * Move processing cache out of DA * Merge branch 'sigp/unstable' into non-da-processing-cach * Merge branch 'unstable' of https://github.com/sigp/lighthouse into non-da-processing-cache * remove unused file, remove outdated TODO, add is_deneb check to missing blob id calculations * remove availability view trait * Merge branch 'unstable' of https://github.com/sigp/lighthouse into remove-availability-view-trait * fix lints --- .../beacon_chain/src/blob_verification.rs | 3 + .../src/data_availability_checker.rs | 4 - .../availability_view.rs | 465 ------------------ .../overflow_lru_cache.rs | 334 ++++++++++++- .../state_lru_cache.rs | 10 + 5 files changed, 346 insertions(+), 470 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index a69f2b74524..1fb61702006 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -284,6 +284,9 @@ impl KzgVerifiedBlob { pub fn as_blob(&self) -> &BlobSidecar { &self.blob } + pub fn get_commitment(&self) -> &KzgCommitment { + &self.blob.kzg_commitment + } /// This is cheap as we're calling clone on an Arc pub fn clone_blob(&self) -> Arc> { self.blob.clone() diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 1d4e7e57e92..3ef105c6d34 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -2,9 +2,6 @@ use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, Kzg use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, }; -pub use crate::data_availability_checker::availability_view::{ - AvailabilityView, GetCommitment, GetCommitments, -}; pub use crate::data_availability_checker::child_components::ChildComponents; use crate::data_availability_checker::overflow_lru_cache::OverflowLRUCache; use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; @@ -21,7 +18,6 @@ use task_executor::TaskExecutor; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; -mod availability_view; mod child_components; mod error; mod overflow_lru_cache; diff --git a/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs deleted file mode 100644 index d4e5ca34492..00000000000 --- a/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs +++ /dev/null @@ -1,465 +0,0 @@ -use super::state_lru_cache::DietAvailabilityPendingExecutedBlock; -use crate::blob_verification::KzgVerifiedBlob; -use crate::block_verification_types::AsBlock; -use crate::data_availability_checker::overflow_lru_cache::PendingComponents; -use kzg::KzgCommitment; -use ssz_types::FixedVector; -use std::sync::Arc; -use types::beacon_block_body::KzgCommitments; -use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; - -/// Defines an interface for managing data availability with two key invariants: -/// -/// 1. If we haven't seen a block yet, we will insert the first blob for a given (block_root, index) -/// but we won't insert subsequent blobs for the same (block_root, index) if they have a different -/// commitment. -/// 2. On block insertion, any non-matching blob commitments are evicted. -/// -/// Types implementing this trait can be used for validating and managing availability -/// of blocks and blobs in a cache-like data structure. -pub trait AvailabilityView { - /// The type representing a block in the implementation. - type BlockType: GetCommitments; - - /// The type representing a blob in the implementation. Must implement `Clone`. - type BlobType: Clone + GetCommitment; - - /// Returns an immutable reference to the cached block. - fn get_cached_block(&self) -> &Option; - - /// Returns an immutable reference to the fixed vector of cached blobs. - fn get_cached_blobs(&self) -> &FixedVector, E::MaxBlobsPerBlock>; - - /// Returns a mutable reference to the cached block. - fn get_cached_block_mut(&mut self) -> &mut Option; - - /// Returns a mutable reference to the fixed vector of cached blobs. - fn get_cached_blobs_mut( - &mut self, - ) -> &mut FixedVector, E::MaxBlobsPerBlock>; - - /// Checks if a block exists in the cache. - /// - /// Returns: - /// - `true` if a block exists. - /// - `false` otherwise. - fn block_exists(&self) -> bool { - self.get_cached_block().is_some() - } - - /// Checks if a blob exists at the given index in the cache. - /// - /// Returns: - /// - `true` if a blob exists at the given index. - /// - `false` otherwise. - fn blob_exists(&self, blob_index: usize) -> bool { - self.get_cached_blobs() - .get(blob_index) - .map(|b| b.is_some()) - .unwrap_or(false) - } - - /// Returns the number of blobs that are expected to be present. Returns `None` if we don't have a - /// block. - /// - /// This corresponds to the number of commitments that are present in a block. - fn num_expected_blobs(&self) -> Option { - self.get_cached_block() - .as_ref() - .map(|b| b.get_commitments().len()) - } - - /// Returns the number of blobs that have been received and are stored in the cache. - fn num_received_blobs(&self) -> usize { - self.get_cached_blobs().iter().flatten().count() - } - - /// Inserts a block into the cache. - fn insert_block(&mut self, block: Self::BlockType) { - *self.get_cached_block_mut() = Some(block) - } - - /// Inserts a blob at a specific index in the cache. - /// - /// Existing blob at the index will be replaced. - fn insert_blob_at_index(&mut self, blob_index: usize, blob: Self::BlobType) { - if let Some(b) = self.get_cached_blobs_mut().get_mut(blob_index) { - *b = Some(blob); - } - } - - /// Merges a given set of blobs into the cache. - /// - /// Blobs are only inserted if: - /// 1. The blob entry at the index is empty and no block exists. - /// 2. The block exists and its commitment matches the blob's commitment. - fn merge_blobs(&mut self, blobs: FixedVector, E::MaxBlobsPerBlock>) { - for (index, blob) in blobs.iter().cloned().enumerate() { - let Some(blob) = blob else { continue }; - self.merge_single_blob(index, blob); - } - } - - /// Merges a single blob into the cache. - /// - /// Blobs are only inserted if: - /// 1. The blob entry at the index is empty and no block exists, or - /// 2. The block exists and its commitment matches the blob's commitment. - fn merge_single_blob(&mut self, index: usize, blob: Self::BlobType) { - if let Some(cached_block) = self.get_cached_block() { - let block_commitment_opt = cached_block.get_commitments().get(index).copied(); - if let Some(block_commitment) = block_commitment_opt { - if block_commitment == *blob.get_commitment() { - self.insert_blob_at_index(index, blob) - } - } - } else if !self.blob_exists(index) { - self.insert_blob_at_index(index, blob) - } - } - - /// Inserts a new block and revalidates the existing blobs against it. - /// - /// Blobs that don't match the new block's commitments are evicted. - fn merge_block(&mut self, block: Self::BlockType) { - self.insert_block(block); - let reinsert = std::mem::take(self.get_cached_blobs_mut()); - self.merge_blobs(reinsert); - } - - /// Checks if the block and all of its expected blobs are available in the cache. - /// - /// Returns `true` if both the block exists and the number of received blobs matches the number - /// of expected blobs. - fn is_available(&self) -> bool { - if let Some(num_expected_blobs) = self.num_expected_blobs() { - num_expected_blobs == self.num_received_blobs() - } else { - false - } - } -} - -/// Implements the `AvailabilityView` trait for a given struct. -/// -/// - `$struct_name`: The name of the struct for which to implement `AvailabilityView`. -/// - `$block_type`: The type to use for `BlockType` in the `AvailabilityView` trait. -/// - `$blob_type`: The type to use for `BlobType` in the `AvailabilityView` trait. -/// - `$block_field`: The field name in the struct that holds the cached block. -/// - `$blob_field`: The field name in the struct that holds the cached blobs. -#[macro_export] -macro_rules! impl_availability_view { - ($struct_name:ident, $block_type:ty, $blob_type:ty, $block_field:ident, $blob_field:ident) => { - impl AvailabilityView for $struct_name { - type BlockType = $block_type; - type BlobType = $blob_type; - - fn get_cached_block(&self) -> &Option { - &self.$block_field - } - - fn get_cached_blobs( - &self, - ) -> &FixedVector, E::MaxBlobsPerBlock> { - &self.$blob_field - } - - fn get_cached_block_mut(&mut self) -> &mut Option { - &mut self.$block_field - } - - fn get_cached_blobs_mut( - &mut self, - ) -> &mut FixedVector, E::MaxBlobsPerBlock> { - &mut self.$blob_field - } - } - }; -} - -impl_availability_view!( - PendingComponents, - DietAvailabilityPendingExecutedBlock, - KzgVerifiedBlob, - executed_block, - verified_blobs -); - -pub trait GetCommitments { - fn get_commitments(&self) -> KzgCommitments; -} - -pub trait GetCommitment { - fn get_commitment(&self) -> &KzgCommitment; -} - -impl GetCommitment for KzgCommitment { - fn get_commitment(&self) -> &KzgCommitment { - self - } -} - -// These implementations are required to implement `AvailabilityView` for `PendingComponents`. -impl GetCommitments for DietAvailabilityPendingExecutedBlock { - fn get_commitments(&self) -> KzgCommitments { - self.as_block() - .message() - .body() - .blob_kzg_commitments() - .cloned() - .unwrap_or_default() - } -} - -impl GetCommitment for KzgVerifiedBlob { - fn get_commitment(&self) -> &KzgCommitment { - &self.as_blob().kzg_commitment - } -} - -// These implementations are required to implement `AvailabilityView` for `ChildComponents`. -impl GetCommitments for Arc> { - fn get_commitments(&self) -> KzgCommitments { - self.message() - .body() - .blob_kzg_commitments() - .ok() - .cloned() - .unwrap_or_default() - } -} -impl GetCommitment for Arc> { - fn get_commitment(&self) -> &KzgCommitment { - &self.kzg_commitment - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use crate::block_verification_types::BlockImportData; - use crate::eth1_finalization_cache::Eth1FinalizationData; - use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs}; - use crate::AvailabilityPendingExecutedBlock; - use crate::PayloadVerificationOutcome; - use fork_choice::PayloadVerificationStatus; - use rand::rngs::StdRng; - use rand::SeedableRng; - use state_processing::ConsensusContext; - use types::test_utils::TestRandom; - use types::{BeaconState, ChainSpec, ForkName, MainnetEthSpec, Slot}; - - type E = MainnetEthSpec; - - type Setup = ( - SignedBeaconBlock, - FixedVector>>, ::MaxBlobsPerBlock>, - FixedVector>>, ::MaxBlobsPerBlock>, - ); - - pub fn pre_setup() -> Setup { - let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); - let (block, blobs_vec) = - generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng); - let mut blobs: FixedVector<_, ::MaxBlobsPerBlock> = FixedVector::default(); - - for blob in blobs_vec { - if let Some(b) = blobs.get_mut(blob.index as usize) { - *b = Some(Arc::new(blob)); - } - } - - let mut invalid_blobs: FixedVector< - Option>>, - ::MaxBlobsPerBlock, - > = FixedVector::default(); - for (index, blob) in blobs.iter().enumerate() { - if let Some(invalid_blob) = blob { - let mut blob_copy = invalid_blob.as_ref().clone(); - blob_copy.kzg_commitment = KzgCommitment::random_for_test(&mut rng); - *invalid_blobs.get_mut(index).unwrap() = Some(Arc::new(blob_copy)); - } - } - - (block, blobs, invalid_blobs) - } - - type PendingComponentsSetup = ( - DietAvailabilityPendingExecutedBlock, - FixedVector>, ::MaxBlobsPerBlock>, - FixedVector>, ::MaxBlobsPerBlock>, - ); - - pub fn setup_pending_components( - block: SignedBeaconBlock, - valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, - invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, - ) -> PendingComponentsSetup { - let blobs = FixedVector::from( - valid_blobs - .iter() - .map(|blob_opt| { - blob_opt - .as_ref() - .map(|blob| KzgVerifiedBlob::__assumed_valid(blob.clone())) - }) - .collect::>(), - ); - let invalid_blobs = FixedVector::from( - invalid_blobs - .iter() - .map(|blob_opt| { - blob_opt - .as_ref() - .map(|blob| KzgVerifiedBlob::__assumed_valid(blob.clone())) - }) - .collect::>(), - ); - let dummy_parent = block.clone_as_blinded(); - let block = AvailabilityPendingExecutedBlock { - block: Arc::new(block), - import_data: BlockImportData { - block_root: Default::default(), - state: BeaconState::new(0, Default::default(), &ChainSpec::minimal()), - parent_block: dummy_parent, - parent_eth1_finalization_data: Eth1FinalizationData { - eth1_data: Default::default(), - eth1_deposit_index: 0, - }, - confirmed_state_roots: vec![], - consensus_context: ConsensusContext::new(Slot::new(0)), - }, - payload_verification_outcome: PayloadVerificationOutcome { - payload_verification_status: PayloadVerificationStatus::Verified, - is_valid_merge_transition_block: false, - }, - }; - (block.into(), blobs, invalid_blobs) - } - - pub fn assert_cache_consistent>(cache: V) { - if let Some(cached_block) = cache.get_cached_block() { - let cached_block_commitments = cached_block.get_commitments(); - for index in 0..E::max_blobs_per_block() { - let block_commitment = cached_block_commitments.get(index).copied(); - let blob_commitment_opt = cache.get_cached_blobs().get(index).unwrap(); - let blob_commitment = blob_commitment_opt.as_ref().map(|b| *b.get_commitment()); - assert_eq!(block_commitment, blob_commitment); - } - } else { - panic!("No cached block") - } - } - - pub fn assert_empty_blob_cache>(cache: V) { - for blob in cache.get_cached_blobs().iter() { - assert!(blob.is_none()); - } - } - - #[macro_export] - macro_rules! generate_tests { - ($module_name:ident, $type_name:ty, $block_field:ident, $blob_field:ident, $setup_fn:ident) => { - mod $module_name { - use super::*; - use types::Hash256; - - #[test] - fn valid_block_invalid_blobs_valid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); - let (block_commitments, blobs, random_blobs) = - $setup_fn(block_commitments, blobs, random_blobs); - let block_root = Hash256::zero(); - let mut cache = <$type_name>::empty(block_root); - cache.merge_block(block_commitments); - cache.merge_blobs(random_blobs); - cache.merge_blobs(blobs); - - assert_cache_consistent(cache); - } - - #[test] - fn invalid_blobs_block_valid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); - let (block_commitments, blobs, random_blobs) = - $setup_fn(block_commitments, blobs, random_blobs); - let block_root = Hash256::zero(); - let mut cache = <$type_name>::empty(block_root); - cache.merge_blobs(random_blobs); - cache.merge_block(block_commitments); - cache.merge_blobs(blobs); - - assert_cache_consistent(cache); - } - - #[test] - fn invalid_blobs_valid_blobs_block() { - let (block_commitments, blobs, random_blobs) = pre_setup(); - let (block_commitments, blobs, random_blobs) = - $setup_fn(block_commitments, blobs, random_blobs); - - let block_root = Hash256::zero(); - let mut cache = <$type_name>::empty(block_root); - cache.merge_blobs(random_blobs); - cache.merge_blobs(blobs); - cache.merge_block(block_commitments); - - assert_empty_blob_cache(cache); - } - - #[test] - fn block_valid_blobs_invalid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); - let (block_commitments, blobs, random_blobs) = - $setup_fn(block_commitments, blobs, random_blobs); - - let block_root = Hash256::zero(); - let mut cache = <$type_name>::empty(block_root); - cache.merge_block(block_commitments); - cache.merge_blobs(blobs); - cache.merge_blobs(random_blobs); - - assert_cache_consistent(cache); - } - - #[test] - fn valid_blobs_block_invalid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); - let (block_commitments, blobs, random_blobs) = - $setup_fn(block_commitments, blobs, random_blobs); - - let block_root = Hash256::zero(); - let mut cache = <$type_name>::empty(block_root); - cache.merge_blobs(blobs); - cache.merge_block(block_commitments); - cache.merge_blobs(random_blobs); - - assert_cache_consistent(cache); - } - - #[test] - fn valid_blobs_invalid_blobs_block() { - let (block_commitments, blobs, random_blobs) = pre_setup(); - let (block_commitments, blobs, random_blobs) = - $setup_fn(block_commitments, blobs, random_blobs); - - let block_root = Hash256::zero(); - let mut cache = <$type_name>::empty(block_root); - cache.merge_blobs(blobs); - cache.merge_blobs(random_blobs); - cache.merge_block(block_commitments); - - assert_cache_consistent(cache); - } - } - }; - } - - generate_tests!( - pending_components_tests, - PendingComponents, - executed_block, - verified_blobs, - setup_pending_components - ); -} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 8dcbe9aa450..edd981e6ddb 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -33,7 +33,6 @@ use crate::blob_verification::KzgVerifiedBlob; use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock, }; -use crate::data_availability_checker::availability_view::AvailabilityView; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; use crate::store::{DBColumn, KeyValueStore}; use crate::BeaconChainTypes; @@ -59,6 +58,126 @@ pub struct PendingComponents { } impl PendingComponents { + /// Returns an immutable reference to the cached block. + pub fn get_cached_block(&self) -> &Option> { + &self.executed_block + } + + /// Returns an immutable reference to the fixed vector of cached blobs. + pub fn get_cached_blobs( + &self, + ) -> &FixedVector>, E::MaxBlobsPerBlock> { + &self.verified_blobs + } + + /// Returns a mutable reference to the cached block. + pub fn get_cached_block_mut(&mut self) -> &mut Option> { + &mut self.executed_block + } + + /// Returns a mutable reference to the fixed vector of cached blobs. + pub fn get_cached_blobs_mut( + &mut self, + ) -> &mut FixedVector>, E::MaxBlobsPerBlock> { + &mut self.verified_blobs + } + + /// Checks if a blob exists at the given index in the cache. + /// + /// Returns: + /// - `true` if a blob exists at the given index. + /// - `false` otherwise. + pub fn blob_exists(&self, blob_index: usize) -> bool { + self.get_cached_blobs() + .get(blob_index) + .map(|b| b.is_some()) + .unwrap_or(false) + } + + /// Returns the number of blobs that are expected to be present. Returns `None` if we don't have a + /// block. + /// + /// This corresponds to the number of commitments that are present in a block. + pub fn num_expected_blobs(&self) -> Option { + self.get_cached_block() + .as_ref() + .map(|b| b.get_commitments().len()) + } + + /// Returns the number of blobs that have been received and are stored in the cache. + pub fn num_received_blobs(&self) -> usize { + self.get_cached_blobs().iter().flatten().count() + } + + /// Inserts a block into the cache. + pub fn insert_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { + *self.get_cached_block_mut() = Some(block) + } + + /// Inserts a blob at a specific index in the cache. + /// + /// Existing blob at the index will be replaced. + pub fn insert_blob_at_index(&mut self, blob_index: usize, blob: KzgVerifiedBlob) { + if let Some(b) = self.get_cached_blobs_mut().get_mut(blob_index) { + *b = Some(blob); + } + } + + /// Merges a given set of blobs into the cache. + /// + /// Blobs are only inserted if: + /// 1. The blob entry at the index is empty and no block exists. + /// 2. The block exists and its commitment matches the blob's commitment. + pub fn merge_blobs( + &mut self, + blobs: FixedVector>, E::MaxBlobsPerBlock>, + ) { + for (index, blob) in blobs.iter().cloned().enumerate() { + let Some(blob) = blob else { continue }; + self.merge_single_blob(index, blob); + } + } + + /// Merges a single blob into the cache. + /// + /// Blobs are only inserted if: + /// 1. The blob entry at the index is empty and no block exists, or + /// 2. The block exists and its commitment matches the blob's commitment. + pub fn merge_single_blob(&mut self, index: usize, blob: KzgVerifiedBlob) { + if let Some(cached_block) = self.get_cached_block() { + let block_commitment_opt = cached_block.get_commitments().get(index).copied(); + if let Some(block_commitment) = block_commitment_opt { + if block_commitment == *blob.get_commitment() { + self.insert_blob_at_index(index, blob) + } + } + } else if !self.blob_exists(index) { + self.insert_blob_at_index(index, blob) + } + } + + /// Inserts a new block and revalidates the existing blobs against it. + /// + /// Blobs that don't match the new block's commitments are evicted. + pub fn merge_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { + self.insert_block(block); + let reinsert = std::mem::take(self.get_cached_blobs_mut()); + self.merge_blobs(reinsert); + } + + /// Checks if the block and all of its expected blobs are available in the cache. + /// + /// Returns `true` if both the block exists and the number of received blobs matches the number + /// of expected blobs. + pub fn is_available(&self) -> bool { + if let Some(num_expected_blobs) = self.num_expected_blobs() { + num_expected_blobs == self.num_received_blobs() + } else { + false + } + } + + /// Returns an empty `PendingComponents` object with the given block root. pub fn empty(block_root: Hash256) -> Self { Self { block_root, @@ -118,6 +237,7 @@ impl PendingComponents { ))) } + /// Returns the epoch of the block if it is cached, otherwise returns the epoch of the first blob. pub fn epoch(&self) -> Option { self.executed_block .as_ref() @@ -1703,3 +1823,215 @@ mod test { ); } } + +#[cfg(test)] +mod pending_components_tests { + use super::*; + use crate::block_verification_types::BlockImportData; + use crate::eth1_finalization_cache::Eth1FinalizationData; + use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs}; + use crate::PayloadVerificationOutcome; + use fork_choice::PayloadVerificationStatus; + use kzg::KzgCommitment; + use rand::rngs::StdRng; + use rand::SeedableRng; + use state_processing::ConsensusContext; + use types::test_utils::TestRandom; + use types::{BeaconState, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot}; + + type E = MainnetEthSpec; + + type Setup = ( + SignedBeaconBlock, + FixedVector>>, ::MaxBlobsPerBlock>, + FixedVector>>, ::MaxBlobsPerBlock>, + ); + + pub fn pre_setup() -> Setup { + let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + let (block, blobs_vec) = + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng); + let mut blobs: FixedVector<_, ::MaxBlobsPerBlock> = FixedVector::default(); + + for blob in blobs_vec { + if let Some(b) = blobs.get_mut(blob.index as usize) { + *b = Some(Arc::new(blob)); + } + } + + let mut invalid_blobs: FixedVector< + Option>>, + ::MaxBlobsPerBlock, + > = FixedVector::default(); + for (index, blob) in blobs.iter().enumerate() { + if let Some(invalid_blob) = blob { + let mut blob_copy = invalid_blob.as_ref().clone(); + blob_copy.kzg_commitment = KzgCommitment::random_for_test(&mut rng); + *invalid_blobs.get_mut(index).unwrap() = Some(Arc::new(blob_copy)); + } + } + + (block, blobs, invalid_blobs) + } + + type PendingComponentsSetup = ( + DietAvailabilityPendingExecutedBlock, + FixedVector>, ::MaxBlobsPerBlock>, + FixedVector>, ::MaxBlobsPerBlock>, + ); + + pub fn setup_pending_components( + block: SignedBeaconBlock, + valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + ) -> PendingComponentsSetup { + let blobs = FixedVector::from( + valid_blobs + .iter() + .map(|blob_opt| { + blob_opt + .as_ref() + .map(|blob| KzgVerifiedBlob::__assumed_valid(blob.clone())) + }) + .collect::>(), + ); + let invalid_blobs = FixedVector::from( + invalid_blobs + .iter() + .map(|blob_opt| { + blob_opt + .as_ref() + .map(|blob| KzgVerifiedBlob::__assumed_valid(blob.clone())) + }) + .collect::>(), + ); + let dummy_parent = block.clone_as_blinded(); + let block = AvailabilityPendingExecutedBlock { + block: Arc::new(block), + import_data: BlockImportData { + block_root: Default::default(), + state: BeaconState::new(0, Default::default(), &ChainSpec::minimal()), + parent_block: dummy_parent, + parent_eth1_finalization_data: Eth1FinalizationData { + eth1_data: Default::default(), + eth1_deposit_index: 0, + }, + confirmed_state_roots: vec![], + consensus_context: ConsensusContext::new(Slot::new(0)), + }, + payload_verification_outcome: PayloadVerificationOutcome { + payload_verification_status: PayloadVerificationStatus::Verified, + is_valid_merge_transition_block: false, + }, + }; + (block.into(), blobs, invalid_blobs) + } + + pub fn assert_cache_consistent(cache: PendingComponents) { + if let Some(cached_block) = cache.get_cached_block() { + let cached_block_commitments = cached_block.get_commitments(); + for index in 0..E::max_blobs_per_block() { + let block_commitment = cached_block_commitments.get(index).copied(); + let blob_commitment_opt = cache.get_cached_blobs().get(index).unwrap(); + let blob_commitment = blob_commitment_opt.as_ref().map(|b| *b.get_commitment()); + assert_eq!(block_commitment, blob_commitment); + } + } else { + panic!("No cached block") + } + } + + pub fn assert_empty_blob_cache(cache: PendingComponents) { + for blob in cache.get_cached_blobs().iter() { + assert!(blob.is_none()); + } + } + + #[test] + fn valid_block_invalid_blobs_valid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + setup_pending_components(block_commitments, blobs, random_blobs); + let block_root = Hash256::zero(); + let mut cache = >::empty(block_root); + cache.merge_block(block_commitments); + cache.merge_blobs(random_blobs); + cache.merge_blobs(blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn invalid_blobs_block_valid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + setup_pending_components(block_commitments, blobs, random_blobs); + let block_root = Hash256::zero(); + let mut cache = >::empty(block_root); + cache.merge_blobs(random_blobs); + cache.merge_block(block_commitments); + cache.merge_blobs(blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn invalid_blobs_valid_blobs_block() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + setup_pending_components(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = >::empty(block_root); + cache.merge_blobs(random_blobs); + cache.merge_blobs(blobs); + cache.merge_block(block_commitments); + + assert_empty_blob_cache(cache); + } + + #[test] + fn block_valid_blobs_invalid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + setup_pending_components(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = >::empty(block_root); + cache.merge_block(block_commitments); + cache.merge_blobs(blobs); + cache.merge_blobs(random_blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn valid_blobs_block_invalid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + setup_pending_components(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = >::empty(block_root); + cache.merge_blobs(blobs); + cache.merge_block(block_commitments); + cache.merge_blobs(random_blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn valid_blobs_invalid_blobs_block() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + setup_pending_components(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = >::empty(block_root); + cache.merge_blobs(blobs); + cache.merge_blobs(random_blobs); + cache.merge_block(block_commitments); + + assert_cache_consistent(cache); + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 35c114db542..c3492b53bda 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -10,6 +10,7 @@ use parking_lot::RwLock; use ssz_derive::{Decode, Encode}; use state_processing::{BlockReplayer, ConsensusContext, StateProcessingStrategy}; use std::sync::Arc; +use types::beacon_block_body::KzgCommitments; use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc}; use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; @@ -42,6 +43,15 @@ impl DietAvailabilityPendingExecutedBlock { .blob_kzg_commitments() .map_or(0, |commitments| commitments.len()) } + + pub fn get_commitments(&self) -> KzgCommitments { + self.as_block() + .message() + .body() + .blob_kzg_commitments() + .cloned() + .unwrap_or_default() + } } /// This LRU cache holds BeaconStates used for block import. If the cache overflows,