Skip to content

Commit

Permalink
Fix todos in deneb code (#4547)
Browse files Browse the repository at this point in the history
* Low hanging fruits

* Remove unnecessary todo

I think it's fine to not handle this since the calling functions handle the error.
No specific reason imo to handle it in the function as well.

* Rename BlobError to GossipBlobError

I feel this signified better what the error is for. The BlobError was only for failures when gossip
verifying a blob. We cannot get this error when doing rpc validation

* Remove the BlockError::BlobValidation variant

This error was only there to appease gossip verification before publish.
It's unclear how to peer score this error since this cannot actually occur during any
block verification flows.
This commit introuduces an additional error type BlockContentsError to better represent the
Error type

* Add docs for peer scoring (or lack thereof) of AvailabilityCheck errors

* I do not see a non-convoluted way of doing this. Okay to have some redundant code here

* Removing this to catch the failure red handed

* Fix compilation

* Cannot be deleted because some tests assume the trait impl

Also useful to have around for testing in the future imo

* Add some metrics and logs

* Only process `Imported` variant in sync_methods

The only additional thing for other variants that might be useful is logging. We can do that
later if required

* Convert to TryFrom

Not really sure where this would be used, but just did what the comment says.
Could consider just returning the Block variant for a deneb block in the From version

* Unlikely to change now

* This is fine as this is max_rpc_size per rpc chunk (for blobs, it would be 128kb max)

* Log count instead of individual blobs, can delete log later if it becomes too annoying.

* Add block production blob verification timer

* Extend block_straemer test to deneb

* Remove dbg statement

* Fix tests
  • Loading branch information
pawanjay176 authored Aug 4, 2023
1 parent 9c75d80 commit a36e34e
Show file tree
Hide file tree
Showing 20 changed files with 211 additions and 123 deletions.
14 changes: 7 additions & 7 deletions beacon_node/beacon_chain/src/beacon_block_streamer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -715,21 +715,21 @@ mod tests {
}

#[tokio::test]
async fn check_all_blocks_from_altair_to_capella() {
async fn check_all_blocks_from_altair_to_deneb() {
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
let num_epochs = 8;
let bellatrix_fork_epoch = 2usize;
let capella_fork_epoch = 4usize;
let deneb_fork_epoch = 6usize;
let num_blocks_produced = num_epochs * slots_per_epoch;

let mut spec = test_spec::<MinimalEthSpec>();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
//FIXME(sean) extend this to test deneb?
spec.deneb_fork_epoch = None;
spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));

let harness = get_harness(VALIDATOR_COUNT, spec);
let harness = get_harness(VALIDATOR_COUNT, spec.clone());
// go to bellatrix fork
harness
.extend_slots(bellatrix_fork_epoch * slots_per_epoch)
Expand Down Expand Up @@ -836,19 +836,19 @@ mod tests {
}

#[tokio::test]
async fn check_fallback_altair_to_capella() {
async fn check_fallback_altair_to_deneb() {
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
let num_epochs = 8;
let bellatrix_fork_epoch = 2usize;
let capella_fork_epoch = 4usize;
let deneb_fork_epoch = 6usize;
let num_blocks_produced = num_epochs * slots_per_epoch;

let mut spec = test_spec::<MinimalEthSpec>();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
//FIXME(sean) extend this to test deneb?
spec.deneb_fork_epoch = None;
spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));

let harness = get_harness(VALIDATOR_COUNT, spec);

Expand Down
15 changes: 8 additions & 7 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}
use crate::beacon_proposer_cache::compute_proposer_duties_from_head;
use crate::beacon_proposer_cache::BeaconProposerCache;
use crate::blob_cache::BlobCache;
use crate::blob_verification::{self, BlobError, GossipVerifiedBlob};
use crate::blob_verification::{self, GossipBlobError, GossipVerifiedBlob};
use crate::block_times_cache::BlockTimesCache;
use crate::block_verification::POS_PANDA_BANNER;
use crate::block_verification::{
Expand Down Expand Up @@ -2015,7 +2015,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self: &Arc<Self>,
blob_sidecar: SignedBlobSidecar<T::EthSpec>,
subnet_id: u64,
) -> Result<GossipVerifiedBlob<T>, BlobError<T::EthSpec>> {
) -> Result<GossipVerifiedBlob<T>, GossipBlobError<T::EthSpec>> {
blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self)
}

Expand Down Expand Up @@ -2834,7 +2834,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
notify_execution_layer,
)?;

//TODO(sean) error handling?
publish_fn()?;

let executed_block = self
Expand Down Expand Up @@ -3216,10 +3215,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {

if let Some(blobs) = blobs {
if !blobs.is_empty() {
//FIXME(sean) using this for debugging for now
info!(
self.log, "Writing blobs to store";
"block_root" => ?block_root
"block_root" => %block_root,
"count" => blobs.len(),
);
ops.push(StoreOp::PutBlobs(block_root, blobs));
}
Expand Down Expand Up @@ -4948,8 +4947,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let (mut block, _) = block.deconstruct();
*block.state_root_mut() = state_root;

//FIXME(sean)
// - add a new timer for processing here
let blobs_verification_timer =
metrics::start_timer(&metrics::BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES);
if let (Some(blobs), Some(proofs)) = (blobs_opt, proofs_opt) {
let kzg = self
.kzg
Expand Down Expand Up @@ -5012,6 +5011,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.put(beacon_block_root, blob_sidecars);
}

drop(blobs_verification_timer);

metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES);

trace!(
Expand Down
69 changes: 43 additions & 26 deletions beacon_node/beacon_chain/src/blob_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,9 @@ use types::{
Hash256, KzgCommitment, RelativeEpoch, SignedBlobSidecar, Slot,
};

/// An error occurred while validating a gossip blob.
#[derive(Debug)]
pub enum BlobError<T: EthSpec> {
pub enum GossipBlobError<T: EthSpec> {
/// The blob sidecar is from a slot that is later than the current slot (with respect to the
/// gossip clock disparity).
///
Expand Down Expand Up @@ -109,15 +110,30 @@ pub enum BlobError<T: EthSpec> {
},
}

impl<T: EthSpec> From<BeaconChainError> for BlobError<T> {
impl<T: EthSpec> std::fmt::Display for GossipBlobError<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
GossipBlobError::BlobParentUnknown(blob_sidecar) => {
write!(
f,
"BlobParentUnknown(parent_root:{})",
blob_sidecar.block_parent_root
)
}
other => write!(f, "{:?}", other),
}
}
}

impl<T: EthSpec> From<BeaconChainError> for GossipBlobError<T> {
fn from(e: BeaconChainError) -> Self {
BlobError::BeaconChainError(e)
GossipBlobError::BeaconChainError(e)
}
}

impl<T: EthSpec> From<BeaconStateError> for BlobError<T> {
impl<T: EthSpec> From<BeaconStateError> for GossipBlobError<T> {
fn from(e: BeaconStateError) -> Self {
BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
GossipBlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
}
}

Expand All @@ -137,7 +153,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlob<T> {
pub fn new(
blob: SignedBlobSidecar<T::EthSpec>,
chain: &BeaconChain<T>,
) -> Result<Self, BlobError<T::EthSpec>> {
) -> Result<Self, GossipBlobError<T::EthSpec>> {
let blob_index = blob.message.index;
validate_blob_sidecar_for_gossip(blob, blob_index, chain)
}
Expand All @@ -162,7 +178,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>,
subnet: u64,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlob<T>, BlobError<T::EthSpec>> {
) -> Result<GossipVerifiedBlob<T>, GossipBlobError<T::EthSpec>> {
let blob_slot = signed_blob_sidecar.message.slot;
let blob_index = signed_blob_sidecar.message.index;
let block_parent_root = signed_blob_sidecar.message.block_parent_root;
Expand All @@ -171,7 +187,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(

// Verify that the blob_sidecar was received on the correct subnet.
if blob_index != subnet {
return Err(BlobError::InvalidSubnet {
return Err(GossipBlobError::InvalidSubnet {
expected: blob_index,
received: subnet,
});
Expand All @@ -183,7 +199,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > latest_permissible_slot {
return Err(BlobError::FutureSlot {
return Err(GossipBlobError::FutureSlot {
message_slot: blob_slot,
latest_permissible_slot,
});
Expand All @@ -196,7 +212,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.epoch
.start_slot(T::EthSpec::slots_per_epoch());
if blob_slot <= latest_finalized_slot {
return Err(BlobError::PastFinalizedSlot {
return Err(GossipBlobError::PastFinalizedSlot {
blob_slot,
finalized_slot: latest_finalized_slot,
});
Expand All @@ -207,9 +223,9 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.observed_blob_sidecars
.read()
.is_known(&signed_blob_sidecar.message)
.map_err(|e| BlobError::BeaconChainError(e.into()))?
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
{
return Err(BlobError::RepeatBlob {
return Err(GossipBlobError::RepeatBlob {
proposer: blob_proposer_index,
slot: blob_slot,
index: blob_index,
Expand All @@ -224,13 +240,15 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.get_block(&block_parent_root)
{
if parent_block.slot >= blob_slot {
return Err(BlobError::BlobIsNotLaterThanParent {
return Err(GossipBlobError::BlobIsNotLaterThanParent {
blob_slot,
parent_slot: parent_block.slot,
});
}
} else {
return Err(BlobError::BlobParentUnknown(signed_blob_sidecar.message));
return Err(GossipBlobError::BlobParentUnknown(
signed_blob_sidecar.message,
));
}

// Note: We check that the proposer_index matches against the shuffling first to avoid
Expand Down Expand Up @@ -301,9 +319,9 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(

let parent_block = chain
.get_blinded_block(&block_parent_root)
.map_err(BlobError::BeaconChainError)?
.map_err(GossipBlobError::BeaconChainError)?
.ok_or_else(|| {
BlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root))
GossipBlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root))
})?;

let mut parent_state = chain
Expand Down Expand Up @@ -338,7 +356,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
};

if proposer_index != blob_proposer_index as usize {
return Err(BlobError::ProposerIndexMismatch {
return Err(GossipBlobError::ProposerIndexMismatch {
sidecar: blob_proposer_index as usize,
local: proposer_index,
});
Expand All @@ -350,11 +368,11 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)
.map_err(BlobError::BeaconChainError)?;
.map_err(GossipBlobError::BeaconChainError)?;

let pubkey = pubkey_cache
.get(proposer_index)
.ok_or_else(|| BlobError::UnknownValidator(proposer_index as u64))?;
.ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?;

signed_blob_sidecar.verify_signature(
None,
Expand All @@ -366,7 +384,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
};

if !signature_is_valid {
return Err(BlobError::ProposerSignatureInvalid);
return Err(GossipBlobError::ProposerSignatureInvalid);
}

// Now the signature is valid, store the proposal so we don't accept another blob sidecar
Expand All @@ -384,9 +402,9 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.observed_blob_sidecars
.write()
.observe_sidecar(&signed_blob_sidecar.message)
.map_err(|e| BlobError::BeaconChainError(e.into()))?
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
{
return Err(BlobError::RepeatBlob {
return Err(GossipBlobError::RepeatBlob {
proposer: proposer_index as u64,
slot: blob_slot,
index: blob_index,
Expand All @@ -412,13 +430,12 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
///
/// Note: This is a copy of the `block_verification::cheap_state_advance_to_obtain_committees` to return
/// a BlobError error type instead.
/// TODO(pawan): try to unify the 2 functions.
fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
state: &'a mut BeaconState<E>,
state_root_opt: Option<Hash256>,
blob_slot: Slot,
spec: &ChainSpec,
) -> Result<Cow<'a, BeaconState<E>>, BlobError<E>> {
) -> Result<Cow<'a, BeaconState<E>>, GossipBlobError<E>> {
let block_epoch = blob_slot.epoch(E::slots_per_epoch());

if state.current_epoch() == block_epoch {
Expand All @@ -429,7 +446,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(

Ok(Cow::Borrowed(state))
} else if state.slot() > blob_slot {
Err(BlobError::BlobIsNotLaterThanParent {
Err(GossipBlobError::BlobIsNotLaterThanParent {
blob_slot,
parent_slot: state.slot(),
})
Expand All @@ -440,7 +457,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
// Advance the state into the same epoch as the block. Use the "partial" method since state
// roots are not important for proposer/attester shuffling.
partial_state_advance(&mut state, state_root_opt, target_slot, spec)
.map_err(|e| BlobError::BeaconChainError(BeaconChainError::from(e)))?;
.map_err(|e| GossipBlobError::BeaconChainError(BeaconChainError::from(e)))?;

state.build_committee_cache(RelativeEpoch::Previous, spec)?;
state.build_committee_cache(RelativeEpoch::Current, spec)?;
Expand Down
Loading

0 comments on commit a36e34e

Please sign in to comment.