Skip to content

Commit

Permalink
PR reviews
Browse files Browse the repository at this point in the history
  • Loading branch information
dapplion committed Mar 26, 2024
1 parent 8c914c9 commit a6c4c09
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 30 deletions.
26 changes: 9 additions & 17 deletions beacon_node/beacon_chain/src/data_availability_checker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,25 +135,17 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
match block {
Some(cached_block) => {
let block_commitments = cached_block.get_commitments();

let num_blobs_expected = block_commitments.len();
let mut blob_ids = Vec::with_capacity(num_blobs_expected);

// Zip here will always limit the number of iterations to the size of
// `block_commitment` because `blob_commitments` will always be populated
// with `Option` values up to `MAX_BLOBS_PER_BLOCK`.
for (index, (_, blob_commitment_opt)) in
block_commitments.into_iter().zip(blobs.iter()).enumerate()
{
// Always add a missing blob.
if blob_commitment_opt.is_none() {
blob_ids.push(BlobIdentifier {
let blob_ids = blobs
.iter()
.take(block_commitments.len())
.enumerate()
.filter_map(|(index, blob_commitment_opt)| {
blob_commitment_opt.is_none().then_some(BlobIdentifier {
block_root,
index: index as u64,
});
continue;
};
}
})
})
.collect();
MissingBlobs::KnownMissing(blob_ids)
}
None => {
Expand Down
28 changes: 15 additions & 13 deletions beacon_node/network/src/sync/block_lookups/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use std::ops::IndexMut;
use std::sync::Arc;
use std::time::Duration;
use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList};
use types::{BlobSidecar, ChainSpec, EthSpec, Hash256, SignedBeaconBlock};
use types::{BlobSidecar, ChainSpec, Hash256, SignedBeaconBlock};

#[derive(Debug, Copy, Clone)]
pub enum ResponseType {
Expand Down Expand Up @@ -378,24 +378,26 @@ impl<L: Lookup, T: BeaconChainTypes> RequestState<L, T> for BlobRequestState<L,
match blob {
Some(blob) => {
let received_id = blob.id();
if !self.requested_ids.contains(&received_id) {
self.state.register_failure_downloading();
return Err(LookupVerifyError::UnrequestedBlobId);
}

blob.verify_blob_sidecar_inclusion_proof()
.map_err(|_| LookupVerifyError::InvalidInclusionProof)?;
if blob.block_root() != expected_block_root {
return Err(LookupVerifyError::UnrequestedHeader);
if !self.requested_ids.contains(&received_id) {
Err(LookupVerifyError::UnrequestedBlobId)
} else if blob.verify_blob_sidecar_inclusion_proof().unwrap_or(false) {
Err(LookupVerifyError::InvalidInclusionProof)
} else if blob.block_root() != expected_block_root {
Err(LookupVerifyError::UnrequestedHeader)
} else {
Ok(())
}
.map_err(|e| {
self.state.register_failure_downloading();
e
})?;

// State should remain downloading until we receive the stream terminator.
self.requested_ids.remove(&received_id);
let blob_index = blob.index;

if blob_index >= T::EthSpec::max_blobs_per_block() as u64 {
return Err(LookupVerifyError::InvalidIndex(blob.index));
}
// The inclusion proof check above ensures `blob.index` is < MAX_BLOBS_PER_BLOCK
let blob_index = blob.index;
*self.blob_download_queue.index_mut(blob_index as usize) = Some(blob);
Ok(None)
}
Expand Down

0 comments on commit a6c4c09

Please sign in to comment.