Skip to content

Commit

Permalink
Introduce peer_das_epoch and make blobs and columns mutually exclus…
Browse files Browse the repository at this point in the history
…ive.
  • Loading branch information
jimmygchen committed Apr 17, 2024
1 parent c5d0e31 commit 467da4a
Show file tree
Hide file tree
Showing 6 changed files with 94 additions and 81 deletions.
4 changes: 2 additions & 2 deletions beacon_node/beacon_chain/src/block_verification_types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,8 @@ enum RpcBlockInner<E: EthSpec> {
/// This variant is used with parent lookups and by-range responses. It should have all blobs
/// ordered, all block roots matching, and the correct number of blobs for this block.
BlockAndBlobs(Arc<SignedBeaconBlock<E>>, BlobSidecarList<E>),
/// This variant is used with parent lookups and by-range responses. It should have all data columns
/// ordered, all block roots matching, and the correct number of data columns for this block.
/// This variant is used with parent lookups and by-range responses. It should have all
/// requested data columns, all block roots matching for this block.
BlockAndDataColumns(Arc<SignedBeaconBlock<E>>, DataColumnSidecarList<E>),
}

Expand Down
140 changes: 71 additions & 69 deletions beacon_node/beacon_chain/src/data_availability_checker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -259,47 +259,40 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
block: RpcBlock<T::EthSpec>,
) -> Result<MaybeAvailableBlock<T::EthSpec>, AvailabilityCheckError> {
let (block_root, block, blobs, data_columns) = block.deconstruct();
match (blobs, data_columns) {
(None, None) => {
if self.blobs_required_for_block(&block) {
Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block })
} else {
Ok(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: None,
data_columns: None,
}))
}
if self.blobs_required_for_block(&block) {
if let Some(blob_list) = blobs.as_ref() {
let kzg = self
.kzg
.as_ref()
.ok_or(AvailabilityCheckError::KzgNotInitialized)?;
verify_kzg_for_blob_list(blob_list.iter(), kzg)
.map_err(AvailabilityCheckError::Kzg)?;
return Ok(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs,
data_columns: None,
}));
}
(maybe_blob_list, maybe_data_column_list) => {
let (verified_blobs, verified_data_column) =
if self.blobs_required_for_block(&block) {
let kzg = self
.kzg
.as_ref()
.ok_or(AvailabilityCheckError::KzgNotInitialized)?;

if let Some(blob_list) = maybe_blob_list.as_ref() {
verify_kzg_for_blob_list(blob_list.iter(), kzg)
.map_err(AvailabilityCheckError::Kzg)?;
}
if let Some(data_column_list) = maybe_data_column_list.as_ref() {
verify_kzg_for_data_column_list(data_column_list.iter(), kzg)
.map_err(AvailabilityCheckError::Kzg)?;
}
(maybe_blob_list, maybe_data_column_list)
} else {
(None, None)
};
Ok(MaybeAvailableBlock::Available(AvailableBlock {
}
if self.data_columns_required_for_block(&block) {
if let Some(data_column_list) = data_columns.as_ref() {
let kzg = self
.kzg
.as_ref()
.ok_or(AvailabilityCheckError::KzgNotInitialized)?;
verify_kzg_for_data_column_list(data_column_list.iter(), kzg)
.map_err(AvailabilityCheckError::Kzg)?;
return Ok(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: verified_blobs,
data_columns: verified_data_column,
}))
blobs: None,
data_columns,
}));
}
}

Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block })
}

/// Checks if a vector of blocks are available. Returns a vector of `MaybeAvailableBlock`
Expand All @@ -313,6 +306,11 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
blocks: Vec<RpcBlock<T::EthSpec>>,
) -> Result<Vec<MaybeAvailableBlock<T::EthSpec>>, AvailabilityCheckError> {
let mut results = Vec::with_capacity(blocks.len());
let kzg = self
.kzg
.as_ref()
.ok_or(AvailabilityCheckError::KzgNotInitialized)?;

let all_blobs: BlobSidecarList<T::EthSpec> = blocks
.iter()
.filter(|block| self.blobs_required_for_block(block.as_block()))
Expand All @@ -324,43 +322,29 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {

// verify kzg for all blobs at once
if !all_blobs.is_empty() {
let kzg = self
.kzg
.as_ref()
.ok_or(AvailabilityCheckError::KzgNotInitialized)?;
verify_kzg_for_blob_list(all_blobs.iter(), kzg)?;
}

// TODO(das) verify kzg for all data columns

for block in blocks {
let (block_root, block, blobs, data_columns) = block.deconstruct();
match (blobs, data_columns) {
(None, None) => {
if self.blobs_required_for_block(&block) {
results.push(MaybeAvailableBlock::AvailabilityPending { block_root, block })
} else {
results.push(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: None,
data_columns: None,
}))
}
}
(maybe_blob_list, maybe_data_column_list) => {
let (verified_blobs, verified_data_columns) =
if self.blobs_required_for_block(&block) {
(maybe_blob_list, maybe_data_column_list)
} else {
(None, None)
};
// already verified kzg for all blobs
results.push(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: verified_blobs,
data_columns: verified_data_columns,
}))
}
if self.blobs_required_for_block(&block) && blobs.is_some() {
results.push(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs,
data_columns: None,
}))
} else if self.data_columns_required_for_block(&block) && data_columns.is_some() {
results.push(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: None,
data_columns,
}))
} else {
results.push(MaybeAvailableBlock::AvailabilityPending { block_root, block })
}
}

Expand All @@ -370,7 +354,25 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
/// Determines the blob requirements for a block. If the block is pre-deneb, no blobs are required.
/// If the block's epoch is from prior to the data availability boundary, no blobs are required.
fn blobs_required_for_block(&self, block: &SignedBeaconBlock<T::EthSpec>) -> bool {
block.num_expected_blobs() > 0 && self.da_check_required_for_epoch(block.epoch())
block.num_expected_blobs() > 0
&& self.da_check_required_for_epoch(block.epoch())
&& !self.is_peer_das_enabled_for_epoch(block.epoch())
}

/// Determines the data column requirements for a block.
/// - If the block is pre-peerdas, no data columns are required.
/// - If the block's epoch is from prior to the data availability boundary, no data columns are required.
fn data_columns_required_for_block(&self, block: &SignedBeaconBlock<T::EthSpec>) -> bool {
block.num_expected_blobs() > 0
&& self.da_check_required_for_epoch(block.epoch())
&& self.is_peer_das_enabled_for_epoch(block.epoch())
}

/// Returns true if the given epoch is greater than or equal to the `PEER_DAS_EPOCH`.
fn is_peer_das_enabled_for_epoch(&self, block_epoch: Epoch) -> bool {
self.spec
.peer_das_epoch
.map_or(false, |peer_das_epoch| block_epoch >= peer_das_epoch)
}

/// The epoch at which we require a data availability check in block processing.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -691,10 +691,9 @@ pub struct OverflowLRUCache<T: BeaconChainTypes> {
/// The capacity of the LRU cache
capacity: NonZeroUsize,
/// The number of data columns the node is custodying.
// FIXME(das): Using `Option` as temporary workaround to disable custody requirement checks in
// tests. To be removed once we implement proper fork / epoch transition.
custody_column_count: Option<usize>,
log: Logger,
spec: ChainSpec,
}

impl<T: BeaconChainTypes> OverflowLRUCache<T> {
Expand All @@ -711,11 +710,12 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
Ok(Self {
critical: RwLock::new(critical),
overflow_store,
state_cache: StateLRUCache::new(beacon_store, spec),
state_cache: StateLRUCache::new(beacon_store, spec.clone()),
maintenance_lock: Mutex::new(()),
capacity,
custody_column_count,
log,
spec,
})
}

Expand Down Expand Up @@ -1388,8 +1388,14 @@ mod test {
let test_store = harness.chain.store.clone();
let capacity_non_zero = new_non_zero_usize(capacity);
let cache = Arc::new(
OverflowLRUCache::<T>::new(capacity_non_zero, test_store, None, spec.clone())
.expect("should create cache"),
OverflowLRUCache::<T>::new(
capacity_non_zero,
test_store,
None,
harness.logger().clone(),
spec.clone(),
)
.expect("should create cache"),
);
(harness, cache, chain_db_path)
}
Expand Down Expand Up @@ -1894,6 +1900,7 @@ mod test {
new_non_zero_usize(capacity),
harness.chain.store.clone(),
None,
harness.logger().clone(),
harness.chain.spec.clone(),
)
.expect("should recover cache");
Expand Down
1 change: 1 addition & 0 deletions beacon_node/network/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -737,6 +737,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
}

if !self.subscribe_all_data_column_subnets {
// TODO(das): subscribe after `PEER_DAS_EPOCH`
for column_subnet in DataColumnSubnetId::compute_custody_subnets::<T::EthSpec>(
self.network_globals.local_enr().node_id().raw().into(),
self.network_globals
Expand Down
3 changes: 3 additions & 0 deletions consensus/types/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ pub struct ChainSpec {
/*
* DAS params
*/
pub peer_das_epoch: Option<Epoch>,
pub custody_requirement: u64,

/*
Expand Down Expand Up @@ -722,6 +723,7 @@ impl ChainSpec {
/*
* DAS params
*/
peer_das_epoch: None,
custody_requirement: 1,

/*
Expand Down Expand Up @@ -1004,6 +1006,7 @@ impl ChainSpec {
/*
* DAS params
*/
peer_das_epoch: None,
custody_requirement: 1,
/*
* Network specific
Expand Down
10 changes: 5 additions & 5 deletions consensus/types/src/eth_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -512,13 +512,13 @@ mod test {
use super::*;

#[test]
fn test_peerdas_config_all_specs() {
test_peerdas_config::<MainnetEthSpec>();
test_peerdas_config::<GnosisEthSpec>();
test_peerdas_config::<MinimalEthSpec>();
fn test_peer_das_config_all_specs() {
test_peer_das_config::<MainnetEthSpec>();
test_peer_das_config::<GnosisEthSpec>();
test_peer_das_config::<MinimalEthSpec>();
}

fn test_peerdas_config<E: EthSpec>() {
fn test_peer_das_config<E: EthSpec>() {
assert_eq!(
E::data_columns_per_subnet(),
E::number_of_columns() / E::data_column_subnet_count()
Expand Down

0 comments on commit 467da4a

Please sign in to comment.