From 82b131d37fef84b93c4a3477ee8093a0b39f81e7 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Mon, 22 Apr 2024 19:21:21 -0500 Subject: [PATCH 01/16] Electra: Add New Containers (#5607) * Electra: Add New Containers --- consensus/types/src/consolidation.rs | 35 ++++++++++++++++++ consensus/types/src/deposit_receipt.rs | 37 +++++++++++++++++++ .../src/execution_layer_withdrawal_request.rs | 34 +++++++++++++++++ consensus/types/src/lib.rs | 14 +++++++ .../types/src/pending_balance_deposit.rs | 33 +++++++++++++++++ consensus/types/src/pending_consolidation.rs | 33 +++++++++++++++++ .../types/src/pending_partial_withdrawal.rs | 35 ++++++++++++++++++ consensus/types/src/signed_consolidation.rs | 32 ++++++++++++++++ 8 files changed, 253 insertions(+) create mode 100644 consensus/types/src/consolidation.rs create mode 100644 consensus/types/src/deposit_receipt.rs create mode 100644 consensus/types/src/execution_layer_withdrawal_request.rs create mode 100644 consensus/types/src/pending_balance_deposit.rs create mode 100644 consensus/types/src/pending_consolidation.rs create mode 100644 consensus/types/src/pending_partial_withdrawal.rs create mode 100644 consensus/types/src/signed_consolidation.rs diff --git a/consensus/types/src/consolidation.rs b/consensus/types/src/consolidation.rs new file mode 100644 index 00000000000..09a2d4bb0c3 --- /dev/null +++ b/consensus/types/src/consolidation.rs @@ -0,0 +1,35 @@ +use crate::test_utils::TestRandom; +use crate::Epoch; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct Consolidation { + #[serde(with = "serde_utils::quoted_u64")] + pub source_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub target_index: u64, + pub epoch: Epoch, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(Consolidation); +} diff --git a/consensus/types/src/deposit_receipt.rs b/consensus/types/src/deposit_receipt.rs new file mode 100644 index 00000000000..6a08f717f3d --- /dev/null +++ b/consensus/types/src/deposit_receipt.rs @@ -0,0 +1,37 @@ +use crate::test_utils::TestRandom; +use crate::{Hash256, PublicKeyBytes, Signature}; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct DepositReceipt { + pub pubkey: PublicKeyBytes, + pub withdrawal_credentials: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub amount: u64, + pub signature: Signature, + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(DepositReceipt); +} diff --git a/consensus/types/src/execution_layer_withdrawal_request.rs b/consensus/types/src/execution_layer_withdrawal_request.rs new file mode 100644 index 00000000000..b1d814c2834 --- /dev/null +++ b/consensus/types/src/execution_layer_withdrawal_request.rs @@ -0,0 +1,34 @@ +use crate::test_utils::TestRandom; +use crate::{Address, PublicKeyBytes}; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct ExecutionLayerWithdrawalRequest { + pub source_address: Address, + pub validator_pubkey: PublicKeyBytes, + #[serde(with = "serde_utils::quoted_u64")] + pub amount: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(ExecutionLayerWithdrawalRequest); +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 6551ebc1dda..dee55789398 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -29,16 +29,19 @@ pub mod bls_to_execution_change; pub mod builder_bid; pub mod chain_spec; pub mod checkpoint; +pub mod consolidation; pub mod consts; pub mod contribution_and_proof; pub mod deposit; pub mod deposit_data; pub mod deposit_message; +pub mod deposit_receipt; pub mod deposit_tree_snapshot; pub mod enr_fork_id; pub mod eth1_data; pub mod eth_spec; pub mod execution_block_hash; +pub mod execution_layer_withdrawal_request; pub mod execution_payload; pub mod execution_payload_header; pub mod fork; @@ -54,6 +57,9 @@ pub mod light_client_finality_update; pub mod light_client_optimistic_update; pub mod light_client_update; pub mod pending_attestation; +pub mod pending_balance_deposit; +pub mod pending_consolidation; +pub mod pending_partial_withdrawal; pub mod proposer_preparation_data; pub mod proposer_slashing; pub mod relative_epoch; @@ -63,6 +69,7 @@ pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; pub mod signed_bls_to_execution_change; +pub mod signed_consolidation; pub mod signed_contribution_and_proof; pub mod signed_voluntary_exit; pub mod signing_data; @@ -133,10 +140,12 @@ pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ ConfigAndPreset, ConfigAndPresetCapella, ConfigAndPresetDeneb, ConfigAndPresetElectra, }; +pub use crate::consolidation::Consolidation; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::deposit_message::DepositMessage; +pub use crate::deposit_receipt::DepositReceipt; pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; pub use crate::enr_fork_id::EnrForkId; pub use crate::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; @@ -144,6 +153,7 @@ pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::ExecutionBlockHeader; +pub use crate::execution_layer_withdrawal_request::ExecutionLayerWithdrawalRequest; pub use crate::execution_payload::{ ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadMerge, ExecutionPayloadRef, Transaction, Transactions, Withdrawals, @@ -189,6 +199,9 @@ pub use crate::payload::{ FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; +pub use crate::pending_balance_deposit::PendingBalanceDeposit; +pub use crate::pending_consolidation::PendingConsolidation; +pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; pub use crate::preset::{ AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, }; @@ -207,6 +220,7 @@ pub use crate::signed_beacon_block::{ }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; +pub use crate::signed_consolidation::SignedConsolidation; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_data::{SignedRoot, SigningData}; diff --git a/consensus/types/src/pending_balance_deposit.rs b/consensus/types/src/pending_balance_deposit.rs new file mode 100644 index 00000000000..a2bce577f87 --- /dev/null +++ b/consensus/types/src/pending_balance_deposit.rs @@ -0,0 +1,33 @@ +use crate::test_utils::TestRandom; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct PendingBalanceDeposit { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub amount: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(PendingBalanceDeposit); +} diff --git a/consensus/types/src/pending_consolidation.rs b/consensus/types/src/pending_consolidation.rs new file mode 100644 index 00000000000..6e0b74a7383 --- /dev/null +++ b/consensus/types/src/pending_consolidation.rs @@ -0,0 +1,33 @@ +use crate::test_utils::TestRandom; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct PendingConsolidation { + #[serde(with = "serde_utils::quoted_u64")] + pub source_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub target_index: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(PendingConsolidation); +} diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/pending_partial_withdrawal.rs new file mode 100644 index 00000000000..e5ace7b2736 --- /dev/null +++ b/consensus/types/src/pending_partial_withdrawal.rs @@ -0,0 +1,35 @@ +use crate::test_utils::TestRandom; +use crate::Epoch; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct PendingPartialWithdrawal { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub amount: u64, + pub withdrawable_epoch: Epoch, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(PendingPartialWithdrawal); +} diff --git a/consensus/types/src/signed_consolidation.rs b/consensus/types/src/signed_consolidation.rs new file mode 100644 index 00000000000..f004ec23bd4 --- /dev/null +++ b/consensus/types/src/signed_consolidation.rs @@ -0,0 +1,32 @@ +use crate::test_utils::TestRandom; +use crate::{Consolidation, Signature}; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct SignedConsolidation { + pub message: Consolidation, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(SignedConsolidation); +} From 72a33604b3f1bd5ba9e23812ae53b1df4f29e405 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 23 Apr 2024 23:13:34 +1000 Subject: [PATCH 02/16] Add timing for block availability (#5510) * Add timing for block availability * Attestation metrics analysis * Prettier printing * Add some metrics and timings to track late blocks * Update to latest unstable * fmt * Merge latest unstable * Small tweaks * Try pushing blob timing down into verification * Simplify for clippy --- beacon_node/beacon_chain/src/beacon_chain.rs | 65 +++++----- .../beacon_chain/src/blob_verification.rs | 42 +++++-- .../beacon_chain/src/block_times_cache.rs | 89 +++++++++++++- .../beacon_chain/src/block_verification.rs | 3 +- .../beacon_chain/src/canonical_head.rs | 115 +++++++++++++++--- .../src/data_availability_checker.rs | 23 +++- .../src/data_availability_checker/error.rs | 4 +- .../overflow_lru_cache.rs | 7 ++ beacon_node/beacon_chain/src/metrics.rs | 72 ++++++----- beacon_node/execution_layer/src/lib.rs | 24 ++-- beacon_node/network/src/metrics.rs | 63 ++++------ .../gossip_methods.rs | 40 +++--- 12 files changed, 391 insertions(+), 156 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b3790024f81..c59c5e8ed10 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2953,7 +2953,7 @@ impl BeaconChain { } /// Wraps `process_block` in logic to cache the block's commitments in the processing cache - /// and evict if the block was imported or erred. + /// and evict if the block was imported or errored. pub async fn process_block_with_early_caching>( self: &Arc, block_root: Hash256, @@ -2998,22 +2998,20 @@ impl BeaconChain { // Increment the Prometheus counter for block processing requests. metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); + let block_slot = unverified_block.block().slot(); + // Set observed time if not already set. Usually this should be set by gossip or RPC, // but just in case we set it again here (useful for tests). - if let (Some(seen_timestamp), Some(current_slot)) = - (self.slot_clock.now_duration(), self.slot_clock.now()) - { + if let Some(seen_timestamp) = self.slot_clock.now_duration() { self.block_times_cache.write().set_time_observed( block_root, - current_slot, + block_slot, seen_timestamp, None, None, ); } - let block_slot = unverified_block.block().slot(); - // A small closure to group the verification and import errors. let chain = self.clone(); let import_block = async move { @@ -3024,6 +3022,15 @@ impl BeaconChain { )?; publish_fn()?; let executed_block = chain.into_executed_block(execution_pending).await?; + // Record the time it took to ask the execution layer. + if let Some(seen_timestamp) = self.slot_clock.now_duration() { + self.block_times_cache.write().set_execution_time( + block_root, + block_slot, + seen_timestamp, + ) + } + match executed_block { ExecutedBlock::Available(block) => { self.import_available_block(Box::new(block)).await @@ -3090,8 +3097,8 @@ impl BeaconChain { } } - /// Accepts a fully-verified block and awaits on it's payload verification handle to - /// get a fully `ExecutedBlock` + /// Accepts a fully-verified block and awaits on its payload verification handle to + /// get a fully `ExecutedBlock`. /// /// An error is returned if the verification handle couldn't be awaited. pub async fn into_executed_block( @@ -3224,10 +3231,6 @@ impl BeaconChain { ) -> Result> { match availability { Availability::Available(block) => { - // This is the time since start of the slot where all the components of the block have become available - let delay = - get_slot_delay_ms(timestamp_now(), block.block.slot(), &self.slot_clock); - metrics::observe_duration(&metrics::BLOCK_AVAILABILITY_DELAY, delay); // Block is fully available, import into fork choice self.import_available_block(block).await } @@ -3256,6 +3259,15 @@ impl BeaconChain { consensus_context, } = import_data; + // Record the time at which this block's blobs became available. + if let Some(blobs_available) = block.blobs_available_timestamp() { + self.block_times_cache.write().set_time_blob_observed( + block_root, + block.slot(), + blobs_available, + ); + } + // import let chain = self.clone(); let block_root = self @@ -3396,6 +3408,14 @@ impl BeaconChain { "Early attester cache insert failed"; "error" => ?e ); + } else { + let attestable_timestamp = + self.slot_clock.now_duration().unwrap_or_default(); + self.block_times_cache.write().set_time_attestable( + block_root, + signed_block.slot(), + attestable_timestamp, + ) } } else { warn!( @@ -3885,25 +3905,6 @@ impl BeaconChain { ); } - // Do not store metrics if the block was > 4 slots old, this helps prevent noise during - // sync. - if block_delay_total < self.slot_clock.slot_duration() * 4 { - // Observe the delay between when we observed the block and when we imported it. - let block_delays = self.block_times_cache.read().get_block_delays( - block_root, - self.slot_clock - .start_of(current_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - metrics::observe_duration( - &metrics::BEACON_BLOCK_IMPORTED_OBSERVED_DELAY_TIME, - block_delays - .imported - .unwrap_or_else(|| Duration::from_secs(0)), - ); - } - if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_block_subscribers() { event_handler.register(EventKind::Block(SseBlock { diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 1fb61702006..a1ae260d930 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -14,6 +14,7 @@ use merkle_proof::MerkleTreeError; use slog::{debug, warn}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; +use std::time::Duration; use tree_hash::TreeHash; use types::blob_sidecar::BlobIdentifier; use types::{ @@ -214,7 +215,10 @@ impl GossipVerifiedBlob { pub fn __assumed_valid(blob: Arc>) -> Self { Self { block_root: blob.block_root(), - blob: KzgVerifiedBlob { blob }, + blob: KzgVerifiedBlob { + blob, + seen_timestamp: Duration::from_secs(0), + }, } } pub fn id(&self) -> BlobIdentifier { @@ -260,6 +264,8 @@ impl GossipVerifiedBlob { #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedBlob { blob: Arc>, + #[ssz(skip_serializing, skip_deserializing)] + seen_timestamp: Duration, } impl PartialOrd for KzgVerifiedBlob { @@ -275,8 +281,12 @@ impl Ord for KzgVerifiedBlob { } impl KzgVerifiedBlob { - pub fn new(blob: Arc>, kzg: &Kzg) -> Result { - verify_kzg_for_blob(blob, kzg) + pub fn new( + blob: Arc>, + kzg: &Kzg, + seen_timestamp: Duration, + ) -> Result { + verify_kzg_for_blob(blob, kzg, seen_timestamp) } pub fn to_blob(self) -> Arc> { self.blob @@ -294,12 +304,18 @@ impl KzgVerifiedBlob { pub fn blob_index(&self) -> u64 { self.blob.index } + pub fn seen_timestamp(&self) -> Duration { + self.seen_timestamp + } /// Construct a `KzgVerifiedBlob` that is assumed to be valid. /// /// This should ONLY be used for testing. #[cfg(test)] pub fn __assumed_valid(blob: Arc>) -> Self { - Self { blob } + Self { + blob, + seen_timestamp: Duration::from_secs(0), + } } } @@ -309,9 +325,13 @@ impl KzgVerifiedBlob { pub fn verify_kzg_for_blob( blob: Arc>, kzg: &Kzg, + seen_timestamp: Duration, ) -> Result, KzgError> { validate_blob::(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof)?; - Ok(KzgVerifiedBlob { blob }) + Ok(KzgVerifiedBlob { + blob, + seen_timestamp, + }) } pub struct KzgVerifiedBlobList { @@ -322,13 +342,17 @@ impl KzgVerifiedBlobList { pub fn new>>>( blob_list: I, kzg: &Kzg, + seen_timestamp: Duration, ) -> Result { let blobs = blob_list.into_iter().collect::>(); verify_kzg_for_blob_list(blobs.iter(), kzg)?; Ok(Self { verified_blobs: blobs .into_iter() - .map(|blob| KzgVerifiedBlob { blob }) + .map(|blob| KzgVerifiedBlob { + blob, + seen_timestamp, + }) .collect(), }) } @@ -374,6 +398,8 @@ pub fn validate_blob_sidecar_for_gossip( let blob_epoch = blob_slot.epoch(T::EthSpec::slots_per_epoch()); let signed_block_header = &blob_sidecar.signed_block_header; + let seen_timestamp = chain.slot_clock.now_duration().unwrap_or_default(); + // This condition is not possible if we have received the blob from the network // since we only subscribe to `MaxBlobsPerBlock` subnets over gossip network. // We include this check only for completeness. @@ -641,8 +667,8 @@ pub fn validate_blob_sidecar_for_gossip( .kzg .as_ref() .ok_or(GossipBlobError::KzgNotInitialized)?; - let kzg_verified_blob = - KzgVerifiedBlob::new(blob_sidecar, kzg).map_err(GossipBlobError::KzgError)?; + let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar, kzg, seen_timestamp) + .map_err(GossipBlobError::KzgError)?; Ok(GossipVerifiedBlob { block_root, diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index c5293bcb0ee..db547a1186c 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -18,6 +18,9 @@ type BlockRoot = Hash256; #[derive(Clone, Default)] pub struct Timestamps { pub observed: Option, + pub all_blobs_observed: Option, + pub execution_time: Option, + pub attestable: Option, pub imported: Option, pub set_as_head: Option, } @@ -25,8 +28,25 @@ pub struct Timestamps { // Helps arrange delay data so it is more relevant to metrics. #[derive(Debug, Default)] pub struct BlockDelays { + /// Time after start of slot we saw the block. pub observed: Option, + /// The time after the start of the slot we saw all blobs. + pub all_blobs_observed: Option, + /// The time it took to get verification from the EL for the block. + pub execution_time: Option, + /// The delay from the start of the slot before the block became available + /// + /// Equal to max(`observed + execution_time`, `all_blobs_observed`). + pub available: Option, + /// Time after `available`. + pub attestable: Option, + /// Time + /// ALSO time after `available`. + /// + /// We need to use `available` again rather than `attestable` to handle the case where the block + /// does not get added to the early-attester cache. pub imported: Option, + /// Time after `imported`. pub set_as_head: Option, } @@ -35,14 +55,34 @@ impl BlockDelays { let observed = times .observed .and_then(|observed_time| observed_time.checked_sub(slot_start_time)); + let all_blobs_observed = times + .all_blobs_observed + .and_then(|all_blobs_observed| all_blobs_observed.checked_sub(slot_start_time)); + let execution_time = times + .execution_time + .and_then(|execution_time| execution_time.checked_sub(times.observed?)); + // Duration since UNIX epoch at which block became available. + let available_time = times.execution_time.map(|execution_time| { + std::cmp::max(execution_time, times.all_blobs_observed.unwrap_or_default()) + }); + // Duration from the start of the slot until the block became available. + let available_delay = + available_time.and_then(|available_time| available_time.checked_sub(slot_start_time)); + let attestable = times + .attestable + .and_then(|attestable_time| attestable_time.checked_sub(slot_start_time)); let imported = times .imported - .and_then(|imported_time| imported_time.checked_sub(times.observed?)); + .and_then(|imported_time| imported_time.checked_sub(available_time?)); let set_as_head = times .set_as_head .and_then(|set_as_head_time| set_as_head_time.checked_sub(times.imported?)); BlockDelays { observed, + all_blobs_observed, + execution_time, + available: available_delay, + attestable, imported, set_as_head, } @@ -109,6 +149,53 @@ impl BlockTimesCache { } } + pub fn set_time_blob_observed( + &mut self, + block_root: BlockRoot, + slot: Slot, + timestamp: Duration, + ) { + let block_times = self + .cache + .entry(block_root) + .or_insert_with(|| BlockTimesCacheValue::new(slot)); + if block_times + .timestamps + .all_blobs_observed + .map_or(true, |prev| timestamp > prev) + { + block_times.timestamps.all_blobs_observed = Some(timestamp); + } + } + + pub fn set_execution_time(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { + let block_times = self + .cache + .entry(block_root) + .or_insert_with(|| BlockTimesCacheValue::new(slot)); + if block_times + .timestamps + .execution_time + .map_or(true, |prev| timestamp < prev) + { + block_times.timestamps.execution_time = Some(timestamp); + } + } + + pub fn set_time_attestable(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { + let block_times = self + .cache + .entry(block_root) + .or_insert_with(|| BlockTimesCacheValue::new(slot)); + if block_times + .timestamps + .attestable + .map_or(true, |prev| timestamp < prev) + { + block_times.timestamps.attestable = Some(timestamp); + } + } + pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { let block_times = self .cache diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 3cd8a7f259b..38648949f9c 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -666,8 +666,7 @@ type PayloadVerificationHandle = /// - Parent is known /// - Signatures /// - State root check -/// - Per block processing -/// - Blobs sidecar has been validated if present +/// - Block processing /// /// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid /// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index ced4eda05cf..734575d2c0d 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1405,13 +1405,6 @@ fn observe_head_block_delays( // Do not store metrics if the block was > 4 slots old, this helps prevent noise during // sync. if !block_from_sync { - // Observe the total block delay. This is the delay between the time the slot started - // and when the block was set as head. - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME, - block_delay_total, - ); - // Observe the delay between when we imported the block and when we set the block as // head. let block_delays = block_times_cache.get_block_delays( @@ -1421,34 +1414,120 @@ fn observe_head_block_delays( .unwrap_or_else(|| Duration::from_secs(0)), ); - metrics::observe_duration( - &metrics::BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME, + // Update all the metrics + + // Convention here is to use "Time" to indicate the duration of the event and "Delay" + // to indicate the time since the start of the slot. + // + // Observe the total block delay. This is the delay between the time the slot started + // and when the block was set as head. + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_TOTAL, + block_delay_total.as_millis() as i64, + ); + + // The time at which the beacon block was first observed to be processed + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_OBSERVED_SLOT_START, block_delays .observed - .unwrap_or_else(|| Duration::from_secs(0)), + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + + // The time from the start of the slot when all blobs have been observed. Technically this + // is the time we last saw a blob related to this block/slot. + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_ALL_OBSERVED_SLOT_START, + block_delays + .all_blobs_observed + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, ); - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME, + // The time it took to check the validity with the EL + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_EXECUTION_TIME, + block_delays + .execution_time + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + + // The time the block became available after the start of the slot. Available here means + // that all the blobs have arrived and the block has been verified by the execution layer. + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_AVAILABLE_SLOT_START, + block_delays + .available + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + + // The time the block became attestable after the start of the slot. + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_ATTESTABLE_SLOT_START, + block_delays + .attestable + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + + // The time the block was imported since becoming available. + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_IMPORTED_TIME, + block_delays + .imported + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + + // The time the block was imported and setting it as head + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_HEAD_IMPORTED_TIME, block_delays .set_as_head - .unwrap_or_else(|| Duration::from_secs(0)), + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, ); // If the block was enshrined as head too late for attestations to be created for it, // log a debug warning and increment a metric. + let format_delay = |delay: &Option| { + delay.map_or("unknown".to_string(), |d| format!("{}", d.as_millis())) + }; if late_head { - metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL); + metrics::inc_counter(&metrics::BEACON_BLOCK_DELAY_HEAD_SLOT_START_EXCEEDED_TOTAL); debug!( log, "Delayed head block"; "block_root" => ?head_block_root, "proposer_index" => head_block_proposer_index, "slot" => head_block_slot, - "block_delay" => ?block_delay_total, - "observed_delay" => ?block_delays.observed, - "imported_delay" => ?block_delays.imported, - "set_as_head_delay" => ?block_delays.set_as_head, + "total_delay_ms" => block_delay_total.as_millis(), + "observed_delay_ms" => format_delay(&block_delays.observed), + "blob_delay_ms" => format_delay(&block_delays.all_blobs_observed), + "execution_time_ms" => format_delay(&block_delays.execution_time), + "available_delay_ms" => format_delay(&block_delays.available), + "attestable_delay_ms" => format_delay(&block_delays.attestable), + "imported_time_ms" => format_delay(&block_delays.imported), + "set_as_head_time_ms" => format_delay(&block_delays.set_as_head), + ); + } else { + debug!( + log, + "On-time head block"; + "block_root" => ?head_block_root, + "proposer_index" => head_block_proposer_index, + "slot" => head_block_slot, + "total_delay_ms" => block_delay_total.as_millis(), + "observed_delay_ms" => format_delay(&block_delays.observed), + "blob_delay_ms" => format_delay(&block_delays.all_blobs_observed), + "execution_time_ms" => format_delay(&block_delays.execution_time), + "available_delay_ms" => format_delay(&block_delays.available), + "attestable_delay_ms" => format_delay(&block_delays.attestable), + "imported_time_ms" => format_delay(&block_delays.imported), + "set_as_head_time_ms" => format_delay(&block_delays.set_as_head), ); } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 3ef105c6d34..dd0d97b1dae 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -14,6 +14,7 @@ use std::fmt; use std::fmt::Debug; use std::num::NonZeroUsize; use std::sync::Arc; +use std::time::Duration; use task_executor::TaskExecutor; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; @@ -176,8 +177,14 @@ impl DataAvailabilityChecker { return Err(AvailabilityCheckError::KzgNotInitialized); }; - let verified_blobs = KzgVerifiedBlobList::new(Vec::from(blobs).into_iter().flatten(), kzg) - .map_err(AvailabilityCheckError::Kzg)?; + let seen_timestamp = self + .slot_clock + .now_duration() + .ok_or(AvailabilityCheckError::SlotClockError)?; + + let verified_blobs = + KzgVerifiedBlobList::new(Vec::from(blobs).into_iter().flatten(), kzg, seen_timestamp) + .map_err(AvailabilityCheckError::Kzg)?; self.availability_cache .put_kzg_verified_blobs(block_root, verified_blobs) @@ -225,6 +232,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: None, + blobs_available_timestamp: None, })) } } @@ -244,6 +252,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: verified_blobs, + blobs_available_timestamp: None, })) } } @@ -289,6 +298,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: None, + blobs_available_timestamp: None, })) } } @@ -303,6 +313,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: verified_blobs, + blobs_available_timestamp: None, })) } } @@ -462,6 +473,8 @@ pub struct AvailableBlock { block_root: Hash256, block: Arc>, blobs: Option>, + /// Timestamp at which this block first became available (UNIX timestamp, time since 1970). + blobs_available_timestamp: Option, } impl AvailableBlock { @@ -474,6 +487,7 @@ impl AvailableBlock { block_root, block, blobs, + blobs_available_timestamp: None, } } @@ -488,6 +502,10 @@ impl AvailableBlock { self.blobs.as_ref() } + pub fn blobs_available_timestamp(&self) -> Option { + self.blobs_available_timestamp + } + pub fn deconstruct( self, ) -> ( @@ -499,6 +517,7 @@ impl AvailableBlock { block_root, block, blobs, + blobs_available_timestamp: _, } = self; (block_root, block, blobs) } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index 0804fe3b9ab..6c524786bfa 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -19,6 +19,7 @@ pub enum Error { ParentStateMissing(Hash256), BlockReplayError(state_processing::BlockReplayError), RebuildingStateCaches(BeaconStateError), + SlotClockError, } pub enum ErrorCategory { @@ -39,7 +40,8 @@ impl Error { | Error::Unexpected | Error::ParentStateMissing(_) | Error::BlockReplayError(_) - | Error::RebuildingStateCaches(_) => ErrorCategory::Internal, + | Error::RebuildingStateCaches(_) + | Error::SlotClockError => ErrorCategory::Internal, Error::Kzg(_) | Error::BlobIndexInvalid(_) | Error::KzgCommitmentMismatch { .. } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index edd981e6ddb..f4c1bc308c0 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -204,6 +204,12 @@ impl PendingComponents { executed_block, } = self; + let blobs_available_timestamp = verified_blobs + .iter() + .flatten() + .map(|blob| blob.seen_timestamp()) + .max(); + let Some(diet_executed_block) = executed_block else { return Err(AvailabilityCheckError::Unexpected); }; @@ -231,6 +237,7 @@ impl PendingComponents { block_root, block, blobs: Some(verified_blobs), + blobs_available_timestamp, }; Ok(Availability::Available(Box::new( AvailableExecutedBlock::new(available_block, import_data, payload_verification_outcome), diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 6cb0b6fd766..df718413cc0 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -847,37 +847,55 @@ lazy_static! { "Number of attester slashings seen", &["src", "validator"] ); +} + +// Prevent recursion limit +lazy_static! { /* * Block Delay Metrics */ - pub static ref BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_observed_slot_start_delay_time", - "Duration between the start of the block's slot and the time the block was observed.", - // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - decimal_buckets(-1,2) - ); - pub static ref BEACON_BLOCK_IMPORTED_OBSERVED_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_imported_observed_delay_time", - "Duration between the time the block was observed and the time when it was imported.", - // [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5] - decimal_buckets(-2,0) - ); - pub static ref BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_head_imported_delay_time", - "Duration between the time the block was imported and the time when it was set as head.", - // [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5] - decimal_buckets(-2,-1) - ); - pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_head_slot_start_delay_time", + pub static ref BEACON_BLOCK_DELAY_TOTAL: Result = try_create_int_gauge( + "beacon_block_delay_total", "Duration between the start of the block's slot and the time when it was set as head.", - // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - decimal_buckets(-1,2) ); - pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL: Result = try_create_int_counter( - "beacon_block_head_slot_start_delay_exceeded_total", - "Triggered when the duration between the start of the block's slot and the current time \ + + pub static ref BEACON_BLOCK_DELAY_OBSERVED_SLOT_START: Result = try_create_int_gauge( + "beacon_block_delay_observed_slot_start", + "Duration between the start of the block's slot and the time the block was observed.", + ); + + pub static ref BEACON_BLOB_DELAY_ALL_OBSERVED_SLOT_START: Result = try_create_int_gauge( + "beacon_blob_delay_all_observed_slot_start", + "Duration between the start of the block's slot and the time the block was observed.", + ); + + pub static ref BEACON_BLOCK_DELAY_EXECUTION_TIME: Result = try_create_int_gauge( + "beacon_block_delay_execution_time", + "The duration in verifying the block with the execution layer.", + ); + + pub static ref BEACON_BLOCK_DELAY_AVAILABLE_SLOT_START: Result = try_create_int_gauge( + "beacon_block_delay_available_slot_start", + "Duration between the time that block became available and the start of the slot.", + ); + pub static ref BEACON_BLOCK_DELAY_ATTESTABLE_SLOT_START: Result = try_create_int_gauge( + "beacon_block_delay_attestable_slot_start", + "Duration between the time that block became attestable and the start of the slot.", + ); + + pub static ref BEACON_BLOCK_DELAY_IMPORTED_TIME: Result = try_create_int_gauge( + "beacon_block_delay_imported_time", + "Duration between the time the block became available and the time when it was imported.", + ); + + pub static ref BEACON_BLOCK_DELAY_HEAD_IMPORTED_TIME: Result = try_create_int_gauge( + "beacon_block_delay_head_imported_time", + "Duration between the time that block was imported and the time when it was set as head.", + ); + pub static ref BEACON_BLOCK_DELAY_HEAD_SLOT_START_EXCEEDED_TOTAL: Result = try_create_int_counter( + "beacon_block_delay_head_slot_start_exceeded_total", + "A counter that is triggered when the duration between the start of the block's slot and the current time \ will result in failed attestations.", ); @@ -1130,11 +1148,9 @@ lazy_static! { /* * Availability related metrics */ - pub static ref BLOCK_AVAILABILITY_DELAY: Result = try_create_histogram_with_buckets( + pub static ref BLOCK_AVAILABILITY_DELAY: Result = try_create_int_gauge( "block_availability_delay", "Duration between start of the slot and the time at which all components of the block are available.", - // Create a custom bucket list for greater granularity in block delay - Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) ); /* diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 22410976c9d..3e7bf7f561d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -24,7 +24,7 @@ use payload_status::process_payload_status; pub use payload_status::PayloadStatus; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, info, trace, warn, Logger}; +use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; use std::fmt; @@ -1331,15 +1331,11 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metrics::NEW_PAYLOAD], ); + let timer = std::time::Instant::now(); + let block_number = new_payload_request.block_number(); let block_hash = new_payload_request.block_hash(); - trace!( - self.log(), - "Issuing engine_newPayload"; - "parent_hash" => ?new_payload_request.parent_hash(), - "block_hash" => ?block_hash, - "block_number" => ?new_payload_request.block_number(), - ); + let parent_hash = new_payload_request.parent_hash(); let result = self .engine() @@ -1347,9 +1343,19 @@ impl ExecutionLayer { .await; if let Ok(status) = &result { + let status_str = <&'static str>::from(status.status); metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_PAYLOAD_STATUS, - &["new_payload", status.status.into()], + &["new_payload", status_str], + ); + debug!( + self.log(), + "Processed engine_newPayload"; + "status" => status_str, + "parent_hash" => ?parent_hash, + "block_hash" => ?block_hash, + "block_number" => block_number, + "response_time_ms" => timer.elapsed().as_millis() ); } *self.inner.last_new_payload_errored.write().await = result.is_err(); diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index d19a41a28fc..d3804fbed8d 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -248,50 +248,41 @@ lazy_static! { /* * Block Delay Metrics */ - pub static ref BEACON_BLOCK_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_gossip_propagation_verification_delay_time", - "Duration between when the block is received and when it is verified for propagation.", - // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] - decimal_buckets(-3,-1) - ); - pub static ref BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_gossip_slot_start_delay_time", - "Duration between when the block is received and the start of the slot it belongs to.", - // Create a custom bucket list for greater granularity in block delay - Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) - // NOTE: Previous values, which we may want to switch back to. - // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - //decimal_buckets(-1,2) - + pub static ref BEACON_BLOCK_DELAY_GOSSIP: Result = try_create_int_gauge( + "beacon_block_delay_gossip", + "The first time we see this block from gossip as a delay from the start of the slot" + ); + pub static ref BEACON_BLOCK_DELAY_GOSSIP_VERIFICATION: Result = try_create_int_gauge( + "beacon_block_delay_gossip_verification", + "Keeps track of the time delay from the start of the slot to the point we propagate the block" ); - pub static ref BEACON_BLOCK_LAST_DELAY: Result = try_create_int_gauge( - "beacon_block_last_delay", - "Keeps track of the last block's delay from the start of the slot" + pub static ref BEACON_BLOCK_DELAY_FULL_VERIFICATION: Result = try_create_int_gauge( + "beacon_block_delay_full_verification", + "The time it takes to verify a beacon block." ); - pub static ref BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( - "beacon_block_gossip_arrived_late_total", + pub static ref BEACON_BLOCK_DELAY_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( + "beacon_block_delay_gossip_arrived_late_total", "Count of times when a gossip block arrived from the network later than the attestation deadline.", ); /* * Blob Delay Metrics */ - pub static ref BEACON_BLOB_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_blob_gossip_propagation_verification_delay_time", - "Duration between when the blob is received over gossip and when it is verified for propagation.", - // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] - decimal_buckets(-3,-1) - ); - pub static ref BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_blob_gossip_slot_start_delay_time", - "Duration between when the blob is received over gossip and the start of the slot it belongs to.", - // Create a custom bucket list for greater granularity in block delay - Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) - // NOTE: Previous values, which we may want to switch back to. - // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - //decimal_buckets(-1,2) + pub static ref BEACON_BLOB_DELAY_GOSSIP: Result = try_create_int_gauge( + "beacon_blob_delay_gossip_last_delay", + "The first time we see this blob as a delay from the start of the slot" + ); + + pub static ref BEACON_BLOB_DELAY_GOSSIP_VERIFICATION: Result = try_create_int_gauge( + "beacon_blob_delay_gossip_verification", + "Keeps track of the time delay from the start of the slot to the point we propagate the blob" + ); + pub static ref BEACON_BLOB_DELAY_FULL_VERIFICATION: Result = try_create_int_gauge( + "beacon_blob_last_full_verification_delay", + "The time it takes to verify a beacon blob" ); + pub static ref BEACON_BLOB_RPC_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_blob_rpc_slot_start_delay_time", "Duration between when a blob is received over rpc and the start of the slot it belongs to.", @@ -302,10 +293,6 @@ lazy_static! { //decimal_buckets(-1,2) ); - pub static ref BEACON_BLOB_LAST_DELAY: Result = try_create_int_gauge( - "beacon_blob_last_delay", - "Keeps track of the last blob's delay from the start of the slot" - ); pub static ref BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( "beacon_blob_gossip_arrived_late_total", diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index f7bba900372..7b8826bd853 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -27,7 +27,7 @@ use std::fs; use std::io::Write; use std::path::PathBuf; use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ @@ -615,8 +615,7 @@ impl NetworkBeaconProcessor { let commitment = blob_sidecar.kzg_commitment; let delay = get_slot_delay_ms(seen_duration, slot, &self.chain.slot_clock); // Log metrics to track delay from other nodes on the network. - metrics::observe_duration(&metrics::BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME, delay); - metrics::set_gauge(&metrics::BEACON_BLOB_LAST_DELAY, delay.as_millis() as i64); + metrics::set_gauge(&metrics::BEACON_BLOB_DELAY_GOSSIP, delay.as_millis() as i64); match self .chain .verify_blob_sidecar_for_gossip(blob_sidecar, blob_index) @@ -654,9 +653,9 @@ impl NetworkBeaconProcessor { .ok() .and_then(|now| now.checked_sub(seen_duration)) { - metrics::observe_duration( - &metrics::BEACON_BLOB_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME, - duration, + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_GOSSIP_VERIFICATION, + duration.as_millis() as i64, ); } self.process_gossip_verified_blob(peer_id, gossip_verified_blob, seen_duration) @@ -747,9 +746,9 @@ impl NetworkBeaconProcessor { self: &Arc, peer_id: PeerId, verified_blob: GossipVerifiedBlob, - // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { + let processing_start_time = Instant::now(); let block_root = verified_blob.block_root(); let blob_slot = verified_blob.slot(); let blob_index = verified_blob.id().index; @@ -764,6 +763,11 @@ impl NetworkBeaconProcessor { "block_root" => %block_root ); self.chain.recompute_head_at_current_slot().await; + + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, + processing_start_time.elapsed().as_millis() as i64, + ); } Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { trace!( @@ -865,12 +869,9 @@ impl NetworkBeaconProcessor { let block_delay = get_block_delay_ms(seen_duration, block.message(), &self.chain.slot_clock); // Log metrics to track delay from other nodes on the network. - metrics::observe_duration( - &metrics::BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME, - block_delay, - ); + metrics::set_gauge( - &metrics::BEACON_BLOCK_LAST_DELAY, + &metrics::BEACON_BLOCK_DELAY_GOSSIP, block_delay.as_millis() as i64, ); @@ -898,7 +899,7 @@ impl NetworkBeaconProcessor { let verified_block = match verification_result { Ok(verified_block) => { if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() { - metrics::inc_counter(&metrics::BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL); + metrics::inc_counter(&metrics::BEACON_BLOCK_DELAY_GOSSIP_ARRIVED_LATE_TOTAL); debug!( self.log, "Gossip block arrived late"; @@ -923,9 +924,9 @@ impl NetworkBeaconProcessor { .ok() .and_then(|now| now.checked_sub(seen_duration)) { - metrics::observe_duration( - &metrics::BEACON_BLOCK_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME, - duration, + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_GOSSIP_VERIFICATION, + duration.as_millis() as i64, ); } @@ -1130,9 +1131,9 @@ impl NetworkBeaconProcessor { verified_block: GossipVerifiedBlock, reprocess_tx: mpsc::Sender, invalid_block_storage: InvalidBlockStorage, - // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { + let processing_start_time = Instant::now(); let block = verified_block.block.block_cloned(); let block_root = verified_block.block_root; @@ -1168,6 +1169,11 @@ impl NetworkBeaconProcessor { ); self.chain.recompute_head_at_current_slot().await; + + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_FULL_VERIFICATION, + processing_start_time.elapsed().as_millis() as i64, + ); } Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { trace!( From 05fbbdd840304e34a34216ed8970cc32fc971f1e Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 23 Apr 2024 09:46:49 -0500 Subject: [PATCH 03/16] Electra: Add Preset, Constants, & Config (#5606) * Electra: Add Presets, Constants, & Config --- consensus/types/src/chain_spec.rs | 93 ++++++++++++++++++++++++++++++- consensus/types/src/eth_spec.rs | 81 ++++++++++++++++++++++++--- 2 files changed, 165 insertions(+), 9 deletions(-) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index e9345ab14ea..e4f27d6873c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -25,6 +25,7 @@ pub enum Domain { SyncCommittee, ContributionAndProof, SyncCommitteeSelectionProof, + Consolidation, ApplicationMask(ApplicationDomain), } @@ -76,6 +77,7 @@ pub struct ChainSpec { pub genesis_fork_version: [u8; 4], pub bls_withdrawal_prefix_byte: u8, pub eth1_address_withdrawal_prefix_byte: u8, + pub compounding_withdrawal_prefix_byte: u8, /* * Time parameters @@ -108,6 +110,7 @@ pub struct ChainSpec { pub(crate) domain_voluntary_exit: u32, pub(crate) domain_selection_proof: u32, pub(crate) domain_aggregate_and_proof: u32, + pub(crate) domain_consolidation: u32, /* * Fork choice @@ -177,6 +180,15 @@ pub struct ChainSpec { pub electra_fork_version: [u8; 4], /// The Electra fork epoch is optional, with `None` representing "Electra never happens". pub electra_fork_epoch: Option, + pub unset_deposit_receipts_start_index: u64, + pub full_exit_request_amount: u64, + pub min_activation_balance: u64, + pub max_effective_balance_electra: u64, + pub min_slashing_penalty_quotient_electra: u64, + pub whistleblower_reward_quotient_electra: u64, + pub max_pending_partials_per_withdrawals_sweep: u64, + pub min_per_epoch_churn_limit_electra: u64, + pub max_per_epoch_activation_exit_churn_limit: u64, /* * Networking @@ -364,7 +376,9 @@ impl ChainSpec { state: &BeaconState, ) -> u64 { let fork_name = state.fork_name_unchecked(); - if fork_name >= ForkName::Merge { + if fork_name >= ForkName::Electra { + self.min_slashing_penalty_quotient_electra + } else if fork_name >= ForkName::Merge { self.min_slashing_penalty_quotient_bellatrix } else if fork_name >= ForkName::Altair { self.min_slashing_penalty_quotient_altair @@ -418,6 +432,7 @@ impl ChainSpec { Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), Domain::BlsToExecutionChange => self.domain_bls_to_execution_change, + Domain::Consolidation => self.domain_consolidation, } } @@ -602,6 +617,7 @@ impl ChainSpec { genesis_fork_version: [0; 4], bls_withdrawal_prefix_byte: 0x00, eth1_address_withdrawal_prefix_byte: 0x01, + compounding_withdrawal_prefix_byte: 0x02, /* * Time parameters @@ -635,6 +651,7 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_consolidation: 0x0B, /* * Fork choice @@ -709,6 +726,30 @@ impl ChainSpec { */ electra_fork_version: [0x05, 00, 00, 00], electra_fork_epoch: None, + unset_deposit_receipts_start_index: u64::MAX, + full_exit_request_amount: 0, + min_activation_balance: option_wrapper(|| { + u64::checked_pow(2, 5)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_effective_balance_electra: option_wrapper(|| { + u64::checked_pow(2, 11)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + min_slashing_penalty_quotient_electra: u64::checked_pow(2, 12) + .expect("pow does not overflow"), + whistleblower_reward_quotient_electra: u64::checked_pow(2, 12) + .expect("pow does not overflow"), + max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 3) + .expect("pow does not overflow"), + min_per_epoch_churn_limit_electra: option_wrapper(|| { + u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_per_epoch_activation_exit_churn_limit: option_wrapper(|| { + u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), /* * Network specific @@ -874,6 +915,7 @@ impl ChainSpec { genesis_fork_version: [0x00, 0x00, 0x00, 0x64], bls_withdrawal_prefix_byte: 0x00, eth1_address_withdrawal_prefix_byte: 0x01, + compounding_withdrawal_prefix_byte: 0x02, /* * Time parameters @@ -907,6 +949,7 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_consolidation: 0x0B, /* * Fork choice @@ -983,6 +1026,30 @@ impl ChainSpec { */ electra_fork_version: [0x05, 0x00, 0x00, 0x64], electra_fork_epoch: None, + unset_deposit_receipts_start_index: u64::MAX, + full_exit_request_amount: 0, + min_activation_balance: option_wrapper(|| { + u64::checked_pow(2, 5)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_effective_balance_electra: option_wrapper(|| { + u64::checked_pow(2, 11)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + min_slashing_penalty_quotient_electra: u64::checked_pow(2, 12) + .expect("pow does not overflow"), + whistleblower_reward_quotient_electra: u64::checked_pow(2, 12) + .expect("pow does not overflow"), + max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 3) + .expect("pow does not overflow"), + min_per_epoch_churn_limit_electra: option_wrapper(|| { + u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_per_epoch_activation_exit_churn_limit: option_wrapper(|| { + u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), /* * Network specific @@ -1206,6 +1273,13 @@ pub struct Config { #[serde(default = "default_blob_sidecar_subnet_count")] #[serde(with = "serde_utils::quoted_u64")] blob_sidecar_subnet_count: u64, + + #[serde(default = "default_min_per_epoch_churn_limit_electra")] + #[serde(with = "serde_utils::quoted_u64")] + min_per_epoch_churn_limit_electra: u64, + #[serde(default = "default_max_per_epoch_activation_exit_churn_limit")] + #[serde(with = "serde_utils::quoted_u64")] + max_per_epoch_activation_exit_churn_limit: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1320,6 +1394,14 @@ const fn default_blob_sidecar_subnet_count() -> u64 { 6 } +const fn default_min_per_epoch_churn_limit_electra() -> u64 { + 128_000_000_000 +} + +const fn default_max_per_epoch_activation_exit_churn_limit() -> u64 { + 256_000_000_000 +} + const fn default_epochs_per_subnet_subscription() -> u64 { 256 } @@ -1496,6 +1578,10 @@ impl Config { max_request_blob_sidecars: spec.max_request_blob_sidecars, min_epochs_for_blob_sidecars_requests: spec.min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count: spec.blob_sidecar_subnet_count, + + min_per_epoch_churn_limit_electra: spec.min_per_epoch_churn_limit_electra, + max_per_epoch_activation_exit_churn_limit: spec + .max_per_epoch_activation_exit_churn_limit, } } @@ -1563,6 +1649,8 @@ impl Config { max_request_blob_sidecars, min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count, + min_per_epoch_churn_limit_electra, + max_per_epoch_activation_exit_churn_limit, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -1623,6 +1711,8 @@ impl Config { max_request_blob_sidecars, min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count, + min_per_epoch_churn_limit_electra, + max_per_epoch_activation_exit_churn_limit, // We need to re-derive any values that might have changed in the config. max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks), @@ -1695,6 +1785,7 @@ mod tests { &spec, ); test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); + test_domain(Domain::Consolidation, spec.domain_consolidation, &spec); // The builder domain index is zero let builder_domain_pre_mask = [0; 4]; diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index a2972b722a2..a700c5e9abb 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -3,8 +3,9 @@ use crate::*; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ - bit::B0, UInt, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U16, U16777216, - U2, U2048, U256, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, U8192, + bit::B0, UInt, U0, U1, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U134217728, + U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, + U8192, }; use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; @@ -137,7 +138,14 @@ pub trait EthSpec: /* * New in Electra */ - type ElectraPlaceholder: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PendingBalanceDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PendingPartialWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PendingConsolidationsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxConsolidations: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxDepositReceiptsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxAttesterSlashingsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxAttestationsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxWithdrawalRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; fn default_spec() -> ChainSpec; @@ -284,8 +292,44 @@ pub trait EthSpec: Self::KzgCommitmentInclusionProofDepth::to_usize() } - fn electra_placeholder() -> usize { - Self::ElectraPlaceholder::to_usize() + /// Returns the `PENDING_BALANCE_DEPOSITS_LIMIT` constant for this specification. + fn pending_balance_deposits_limit() -> usize { + Self::PendingBalanceDepositsLimit::to_usize() + } + + /// Returns the `PENDING_PARTIAL_WITHDRAWALS_LIMIT` constant for this specification. + fn pending_partial_withdrawals_limit() -> usize { + Self::PendingPartialWithdrawalsLimit::to_usize() + } + + /// Returns the `PENDING_CONSOLIDATIONS_LIMIT` constant for this specification. + fn pending_consolidations_limit() -> usize { + Self::PendingConsolidationsLimit::to_usize() + } + + /// Returns the `MAX_CONSOLIDATIONS` constant for this specification. + fn max_consolidations() -> usize { + Self::MaxConsolidations::to_usize() + } + + /// Returns the `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` constant for this specification. + fn max_deposit_receipts_per_payload() -> usize { + Self::MaxDepositReceiptsPerPayload::to_usize() + } + + /// Returns the `MAX_ATTESTER_SLASHINGS_ELECTRA` constant for this specification. + fn max_attester_slashings_electra() -> usize { + Self::MaxAttesterSlashingsElectra::to_usize() + } + + /// Returns the `MAX_ATTESTATIONS_ELECTRA` constant for this specification. + fn max_attestations_electra() -> usize { + Self::MaxAttestationsElectra::to_usize() + } + + /// Returns the `MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD` constant for this specification. + fn max_withdrawal_requests_per_payload() -> usize { + Self::MaxWithdrawalRequestsPerPayload::to_usize() } } @@ -337,7 +381,14 @@ impl EthSpec for MainnetEthSpec { type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U16; - type ElectraPlaceholder = U16; + type PendingBalanceDepositsLimit = U134217728; + type PendingPartialWithdrawalsLimit = U134217728; + type PendingConsolidationsLimit = U262144; + type MaxConsolidations = U1; + type MaxDepositReceiptsPerPayload = U8192; + type MaxAttesterSlashingsElectra = U1; + type MaxAttestationsElectra = U8; + type MaxWithdrawalRequestsPerPayload = U16; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -390,7 +441,14 @@ impl EthSpec for MinimalEthSpec { MaxBlsToExecutionChanges, MaxBlobsPerBlock, BytesPerFieldElement, - ElectraPlaceholder + PendingBalanceDepositsLimit, + PendingPartialWithdrawalsLimit, + PendingConsolidationsLimit, + MaxConsolidations, + MaxDepositReceiptsPerPayload, + MaxAttesterSlashingsElectra, + MaxAttestationsElectra, + MaxWithdrawalRequestsPerPayload }); fn default_spec() -> ChainSpec { @@ -442,7 +500,14 @@ impl EthSpec for GnosisEthSpec { type BytesPerFieldElement = U32; type BytesPerBlob = U131072; type KzgCommitmentInclusionProofDepth = U17; - type ElectraPlaceholder = U16; + type PendingBalanceDepositsLimit = U134217728; + type PendingPartialWithdrawalsLimit = U134217728; + type PendingConsolidationsLimit = U262144; + type MaxConsolidations = U1; + type MaxDepositReceiptsPerPayload = U8192; + type MaxAttesterSlashingsElectra = U1; + type MaxAttestationsElectra = U8; + type MaxWithdrawalRequestsPerPayload = U16; fn default_spec() -> ChainSpec { ChainSpec::gnosis() From 76460ba838cf806613cb788d489020ae71c11ea0 Mon Sep 17 00:00:00 2001 From: antondlr Date: Tue, 23 Apr 2024 19:58:47 +0200 Subject: [PATCH 04/16] Only `portable` builds (docker) (#5614) * portable builds by default, build multiarch lcli --- .github/workflows/docker.yml | 113 ++++++++++++++++++----------------- lcli/Dockerfile.cross | 6 ++ 2 files changed, 65 insertions(+), 54 deletions(-) create mode 100644 lcli/Dockerfile.cross diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 54b355e631d..d1a8c9f6144 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -15,8 +15,6 @@ concurrency: env: DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - IMAGE_NAME: ${{ github.repository_owner}}/lighthouse - LCLI_IMAGE_NAME: ${{ github.repository_owner }}/lcli # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} @@ -49,19 +47,15 @@ jobs: VERSION: ${{ env.VERSION }} VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: - name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }} + name: build-docker-${{ matrix.binary }}-${{ matrix.cpu_arch }}${{ matrix.features.version_suffix }} # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release"]') || 'ubuntu-22.04' }} strategy: matrix: - binary: [aarch64, - aarch64-portable, - x86_64, - x86_64-portable] - features: [ - {version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"}, - {version_suffix: "-dev", env: "jemalloc,spec-minimal"} - ] + binary: [lighthouse, + lcli] + cpu_arch: [aarch64, + x86_64] include: - profile: maxperf @@ -69,7 +63,6 @@ jobs: env: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} - FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} steps: - uses: actions/checkout@v4 - name: Update Rust @@ -78,27 +71,40 @@ jobs: - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Cross build Lighthouse binary + + - name: Sets env vars for Lighthouse + if: startsWith(matrix.binary, 'lighthouse') + run: | + echo "CROSS_FEATURES=gnosis,spec-minimal,slasher-lmdb,jemalloc" >> $GITHUB_ENV + + - name: Set `make` command for lighthouse + if: startsWith(matrix.binary, 'lighthouse') + run: | + echo "MAKE_CMD=build-${{ matrix.cpu_arch }}-portable" >> $GITHUB_ENV + + - name: Set `make` command for lcli + if: startsWith(matrix.binary, 'lcli') + run: | + echo "MAKE_CMD=build-lcli-${{ matrix.cpu_arch }}" >> $GITHUB_ENV + + - name: Cross build binaries run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }} + env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ env.CROSS_FEATURES }} make ${{ env.MAKE_CMD }} + - name: Make bin dir run: mkdir ./bin - - name: Move cross-built binary into Docker scope (if ARM) - if: startsWith(matrix.binary, 'aarch64') - run: mv ./target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin - - name: Move cross-built binary into Docker scope (if x86_64) - if: startsWith(matrix.binary, 'x86_64') - run: mv ./target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin + + - name: Move cross-built binary into Docker scope + run: mv ./target/${{ matrix.cpu_arch }}-unknown-linux-gnu/${{ matrix.profile }}/${{ matrix.binary }} ./bin + - name: Map aarch64 to arm64 short arch - if: startsWith(matrix.binary, 'aarch64') + if: startsWith(matrix.cpu_arch, 'aarch64') run: echo "SHORT_ARCH=arm64" >> $GITHUB_ENV + - name: Map x86_64 to amd64 short arch - if: startsWith(matrix.binary, 'x86_64') + if: startsWith(matrix.cpu_arch, 'x86_64') run: echo "SHORT_ARCH=amd64" >> $GITHUB_ENV; - - name: Set modernity suffix - if: endsWith(matrix.binary, '-portable') != true - run: echo "MODERNITY_SUFFIX=-modern" >> $GITHUB_ENV; - name: Install QEMU if: env.SELF_HOSTED_RUNNERS == 'false' @@ -108,22 +114,41 @@ jobs: if: env.SELF_HOSTED_RUNNERS == 'false' uses: docker/setup-buildx-action@v3 - - name: Build and push + - name: Build and push (Lighthouse) + if: startsWith(matrix.binary, 'lighthouse') uses: docker/build-push-action@v5 with: file: ./Dockerfile.cross context: . platforms: linux/${{ env.SHORT_ARCH }} push: true - tags: ${{ env.IMAGE_NAME }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}${{ env.MODERNITY_SUFFIX }}${{ env.FEATURE_SUFFIX }} + tags: | + ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} + ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-dev + ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-modern + ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-modern-dev + + - name: Build and push (lcli) + if: startsWith(matrix.binary, 'lcli') + uses: docker/build-push-action@v5 + with: + file: ./lcli/Dockerfile.cross + context: . + platforms: linux/${{ env.SHORT_ARCH }} + push: true + + tags: | + ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} + build-docker-multiarch: - name: build-docker-multiarch${{ matrix.modernity }} + name: build-docker-${{ matrix.binary }}-multiarch runs-on: ubuntu-22.04 - needs: [build-docker-single-arch, extract-version] strategy: matrix: - modernity: ["", "-modern"] + binary: [lighthouse, + lcli] + needs: [build-docker-single-arch, extract-version] env: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} @@ -135,29 +160,9 @@ jobs: run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Create and push multiarch manifest + - name: Create and push multiarch manifests run: | - docker buildx imagetools create -t ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} \ - ${IMAGE_NAME}:${VERSION}-arm64${VERSION_SUFFIX}${{ matrix.modernity }} \ - ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; + docker buildx imagetools create -t ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}${VERSION_SUFFIX} \ + ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ + ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; - build-docker-lcli: - runs-on: ubuntu-22.04 - needs: [extract-version] - env: - VERSION: ${{ needs.extract-version.outputs.VERSION }} - VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} - steps: - - uses: actions/checkout@v4 - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Build lcli and push - uses: docker/build-push-action@v5 - with: - build-args: | - FEATURES=portable - context: . - push: true - file: ./lcli/Dockerfile - tags: ${{ env.LCLI_IMAGE_NAME }}:${{ env.VERSION }}${{ env.VERSION_SUFFIX }} diff --git a/lcli/Dockerfile.cross b/lcli/Dockerfile.cross new file mode 100644 index 00000000000..979688c9cf6 --- /dev/null +++ b/lcli/Dockerfile.cross @@ -0,0 +1,6 @@ +# This image is meant to enable cross-architecture builds. +# It assumes the lcli binary has already been +# compiled for `$TARGETPLATFORM` and moved to `./bin`. +FROM --platform=$TARGETPLATFORM ubuntu:22.04 +RUN apt update && apt -y upgrade && apt clean && rm -rf /var/lib/apt/lists/* +COPY ./bin/lcli /usr/local/bin/lcli From 4cad1fcbbe1315e2d7d6171a797db7ce4e806f72 Mon Sep 17 00:00:00 2001 From: antondlr Date: Tue, 23 Apr 2024 22:12:55 +0200 Subject: [PATCH 05/16] Add missing lcli targets in makefile (#5633) * Add missing lcli targets to Makefile --- Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Makefile b/Makefile index 4b2d0f6c5d5..4072ab1e6d8 100644 --- a/Makefile +++ b/Makefile @@ -82,6 +82,11 @@ build-aarch64: build-aarch64-portable: cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked +build-lcli-x86_64: + cross build --bin lcli --target x86_64-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked +build-lcli-aarch64: + cross build --bin lcli --target aarch64-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked + # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary cp $(1)/lighthouse $(BIN_DIR)/lighthouse From 61962898e207e03bb2826f44fd994986c9bfaa20 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 24 Apr 2024 11:22:36 +1000 Subject: [PATCH 06/16] In-memory tree states (#5533) * Consensus changes * EF tests * lcli * common and watch * account manager * cargo * fork choice * promise cache * beacon chain * interop genesis * http api * lighthouse * op pool * beacon chain misc * parallel state cache * store * fix issues in store * IT COMPILES * Remove some unnecessary module qualification * Revert Arced pubkey optimization (#5536) * Merge remote-tracking branch 'origin/unstable' into tree-states-memory * Fix caching, rebasing and some tests * Remove unused deps * Merge remote-tracking branch 'origin/unstable' into tree-states-memory * Small cleanups * Revert shuffling cache/promise cache changes * Fix state advance bugs * Fix shuffling tests * Remove some resolved FIXMEs * Remove StateProcessingStrategy * Optimise withdrawals calculation * Don't reorg if state cache is missed * Remove inconsistent state func * Fix beta compiler * Rebase early, rebase often * Fix state caching behaviour * Update to milhouse release * Fix on-disk consensus context format * Merge remote-tracking branch 'origin/unstable' into tree-states-memory * Squashed commit of the following: commit 3a16649023e2d6d40d704dbe958cec712544aba9 Author: Michael Sproul Date: Thu Apr 18 14:26:09 2024 +1000 Fix on-disk consensus context format * Keep indexed attestations, thanks Sean * Merge branch 'on-disk-consensus-context' into tree-states-memory * Merge branch 'unstable' into tree-states-memory * Address half of Sean's review * More simplifications from Sean's review * Cache state after get_advanced_hot_state --- Cargo.lock | 86 +- Cargo.toml | 12 +- beacon_node/beacon_chain/Cargo.toml | 78 +- beacon_node/beacon_chain/src/beacon_chain.rs | 325 +++----- .../beacon_chain/src/beacon_proposer_cache.rs | 8 +- .../beacon_chain/src/beacon_snapshot.rs | 25 +- .../beacon_chain/src/blob_verification.rs | 125 +-- .../beacon_chain/src/block_verification.rs | 152 ++-- beacon_node/beacon_chain/src/builder.rs | 19 +- .../beacon_chain/src/canonical_head.rs | 114 +-- beacon_node/beacon_chain/src/chain_config.rs | 3 - .../overflow_lru_cache.rs | 2 +- .../state_lru_cache.rs | 3 +- beacon_node/beacon_chain/src/errors.rs | 4 + beacon_node/beacon_chain/src/eth1_chain.rs | 29 +- beacon_node/beacon_chain/src/fork_revert.rs | 3 +- beacon_node/beacon_chain/src/lib.rs | 1 - beacon_node/beacon_chain/src/metrics.rs | 17 +- .../beacon_chain/src/shuffling_cache.rs | 2 +- .../beacon_chain/src/state_advance_timer.rs | 185 ++--- .../src/sync_committee_rewards.rs | 42 +- beacon_node/beacon_chain/src/test_utils.rs | 17 +- .../src/validator_pubkey_cache.rs | 5 +- .../beacon_chain/tests/block_verification.rs | 5 +- .../tests/payload_invalidation.rs | 4 +- beacon_node/beacon_chain/tests/rewards.rs | 28 +- beacon_node/beacon_chain/tests/store_tests.rs | 72 +- beacon_node/genesis/src/interop.rs | 10 +- beacon_node/http_api/src/attester_duties.rs | 7 +- .../http_api/src/block_packing_efficiency.rs | 2 +- beacon_node/http_api/src/lib.rs | 14 +- beacon_node/http_api/src/proposer_duties.rs | 5 +- beacon_node/http_api/src/state_id.rs | 5 +- beacon_node/http_api/tests/fork_tests.rs | 5 +- beacon_node/http_api/tests/tests.rs | 24 +- beacon_node/operation_pool/src/lib.rs | 8 +- beacon_node/src/config.rs | 7 +- beacon_node/store/Cargo.toml | 4 + beacon_node/store/src/chunked_vector.rs | 68 +- beacon_node/store/src/config.rs | 4 + beacon_node/store/src/errors.rs | 16 +- beacon_node/store/src/hot_cold_store.rs | 333 +++++--- beacon_node/store/src/impls/beacon_state.rs | 4 +- beacon_node/store/src/iter.rs | 10 +- beacon_node/store/src/lib.rs | 1 + beacon_node/store/src/partial_beacon_state.rs | 33 +- beacon_node/store/src/reconstruct.rs | 3 +- beacon_node/store/src/state_cache.rs | 303 +++++++ common/task_executor/Cargo.toml | 1 + common/task_executor/src/test_utils.rs | 3 +- consensus/state_processing/src/all_caches.rs | 6 +- .../state_processing/src/block_replayer.rs | 100 +-- .../src/common/initiate_validator_exit.rs | 3 +- consensus/state_processing/src/genesis.rs | 6 +- consensus/state_processing/src/lib.rs | 2 +- .../src/per_block_processing.rs | 6 +- .../src/per_block_processing/errors.rs | 7 + .../process_operations.rs | 7 +- .../src/per_block_processing/tests.rs | 7 +- .../altair/participation_flag_updates.rs | 9 +- .../capella/historical_summaries_update.rs | 3 + .../effective_balance_updates.rs | 6 +- .../epoch_processing_summary.rs | 16 +- .../src/per_epoch_processing/errors.rs | 9 +- .../historical_roots_update.rs | 2 +- .../src/per_epoch_processing/resets.rs | 4 +- .../src/per_epoch_processing/single_pass.rs | 66 +- .../state_processing/src/upgrade/altair.rs | 9 +- .../state_processing/src/upgrade/capella.rs | 5 +- .../state_processing/src/upgrade/deneb.rs | 1 - .../state_processing/src/upgrade/electra.rs | 1 - .../state_processing/src/upgrade/merge.rs | 1 - consensus/types/Cargo.toml | 4 +- consensus/types/benches/benches.rs | 46 +- consensus/types/examples/clone_state.rs | 51 -- consensus/types/examples/ssz_encode_state.rs | 54 -- consensus/types/examples/tree_hash_state.rs | 56 -- consensus/types/src/beacon_state.rs | 777 ++++++++++++------ .../types/src/beacon_state/clone_config.rs | 47 -- .../types/src/beacon_state/committee_cache.rs | 7 +- .../src/beacon_state/committee_cache/tests.rs | 2 +- .../types/src/beacon_state/exit_cache.rs | 11 +- consensus/types/src/beacon_state/iter.rs | 4 +- .../types/src/beacon_state/pubkey_cache.rs | 16 +- .../types/src/beacon_state/slashings_cache.rs | 6 +- consensus/types/src/beacon_state/tests.rs | 240 +----- .../types/src/beacon_state/tree_hash_cache.rs | 645 --------------- consensus/types/src/blob_sidecar.rs | 3 +- .../types/src/execution_payload_header.rs | 24 +- consensus/types/src/historical_batch.rs | 6 +- consensus/types/src/lib.rs | 6 +- consensus/types/src/light_client_bootstrap.rs | 7 +- .../types/src/light_client_finality_update.rs | 1 + consensus/types/src/light_client_update.rs | 7 + consensus/types/src/test_utils/test_random.rs | 2 +- consensus/types/src/tree_hash_impls.rs | 165 ---- lcli/src/new_testnet.rs | 2 +- lcli/src/replace_state_pubkeys.rs | 7 +- lcli/src/skip_slots.rs | 4 +- lcli/src/transition_blocks.rs | 7 +- lighthouse/tests/beacon_node.rs | 20 - testing/ef_tests/src/case_result.rs | 3 + .../src/cases/merkle_proof_validity.rs | 5 +- testing/ef_tests/src/cases/sanity_blocks.rs | 4 +- testing/ef_tests/src/cases/ssz_generic.rs | 28 +- testing/ef_tests/src/cases/ssz_static.rs | 1 - testing/ef_tests/src/cases/transition.rs | 3 +- testing/state_transition_vectors/src/exit.rs | 5 +- 108 files changed, 2032 insertions(+), 2756 deletions(-) create mode 100644 beacon_node/store/src/state_cache.rs delete mode 100644 consensus/types/examples/clone_state.rs delete mode 100644 consensus/types/examples/ssz_encode_state.rs delete mode 100644 consensus/types/examples/tree_hash_state.rs delete mode 100644 consensus/types/src/beacon_state/clone_config.rs delete mode 100644 consensus/types/src/beacon_state/tree_hash_cache.rs delete mode 100644 consensus/types/src/tree_hash_impls.rs diff --git a/Cargo.lock b/Cargo.lock index 4e3557c7efb..d63773d9a94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -361,6 +361,15 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "archery" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a8da9bc4c4053ee067669762bcaeea6e241841295a2b6c948312dad6ef4cc02" +dependencies = [ + "static_assertions", +] + [[package]] name = "ark-ff" version = "0.3.0" @@ -2810,13 +2819,13 @@ dependencies = [ [[package]] name = "ethereum_hashing" -version = "1.0.0-beta.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233dc6f434ce680dbabf4451ee3380cec46cb3c45d66660445a435619710dd35" +checksum = "6ea7b408432c13f71af01197b1d3d0069c48a27bfcfbe72a81fc346e47f6defb" dependencies = [ "cpufeatures", "lazy_static", - "ring 0.16.20", + "ring 0.17.8", "sha2 0.10.8", ] @@ -5456,6 +5465,29 @@ dependencies = [ "quote", ] +[[package]] +name = "milhouse" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3826d3602a3674b07e080ce1982350e454ec253d73f156bd927ac1b652293f4d" +dependencies = [ + "arbitrary", + "derivative", + "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", + "itertools", + "parking_lot 0.12.1", + "rayon", + "serde", + "smallvec", + "tree_hash", + "triomphe", + "typenum", + "vec_map", +] + [[package]] name = "mime" version = "0.3.17" @@ -7109,6 +7141,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "rpds" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ef5140bcb576bfd6d56cd2de709a7d17851ac1f3805e67fe9d99e42a11821f" +dependencies = [ + "archery", +] + [[package]] name = "rtnetlink" version = "0.10.1" @@ -8123,9 +8164,9 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "382939886cb24ee8ac885d09116a60f6262d827c7a9e36012b4f6d3d0116d0b3" +checksum = "625b20de2d4b3891e6972f4ce5061cb11bd52b3479270c4b177c134b571194a9" dependencies = [ "arbitrary", "derivative", @@ -8139,6 +8180,12 @@ dependencies = [ "typenum", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "state_processing" version = "0.2.0" @@ -8189,6 +8236,7 @@ name = "store" version = "0.2.0" dependencies = [ "beacon_chain", + "bls", "db-key", "directory", "ethereum_ssz", @@ -8197,11 +8245,14 @@ dependencies = [ "lazy_static", "leveldb", "lighthouse_metrics", + "logging", "lru", "parking_lot 0.12.1", + "safe_arith", "serde", "slog", "sloggers", + "smallvec", "state_processing", "strum", "tempfile", @@ -8267,9 +8318,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "superstruct" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" +checksum = "6f4e1f478a7728f8855d7e620e9a152cf8932c6614f86564c886f9b8141f3201" dependencies = [ "darling 0.13.4", "itertools", @@ -8415,6 +8466,7 @@ dependencies = [ "futures", "lazy_static", "lighthouse_metrics", + "logging", "slog", "sloggers", "tokio", @@ -8955,9 +9007,9 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c998ac5fe2b07c025444bdd522e6258110b63861c6698eedc610c071980238d" +checksum = "134d6b24a5b829f30b5ee7de05ba7384557f5f6b00e29409cdf2392f93201bfa" dependencies = [ "ethereum-types 0.14.1", "ethereum_hashing", @@ -8966,9 +9018,9 @@ dependencies = [ [[package]] name = "tree_hash_derive" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84303a9c7cda5f085a3ed9cd241d1e95e04d88aab1d679b02f212e653537ba86" +checksum = "9ce7bccc538359a213436af7bc95804bdbf1c2a21d80e22953cbe9e096837ff1" dependencies = [ "darling 0.13.4", "quote", @@ -8985,6 +9037,16 @@ dependencies = [ "rlp", ] +[[package]] +name = "triomphe" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f" +dependencies = [ + "serde", + "stable_deref_trait", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -9024,12 +9086,14 @@ dependencies = [ "maplit", "merkle_proof", "metastruct", + "milhouse", "parking_lot 0.12.1", "paste", "rand", "rand_xorshift", "rayon", "regex", + "rpds", "rusqlite", "safe_arith", "serde", diff --git a/Cargo.toml b/Cargo.toml index 69f6835f575..296dadc1f66 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,7 +114,7 @@ discv5 = { version = "0.4.1", features = ["libp2p"] } env_logger = "0.9" error-chain = "0.12" ethereum-types = "0.14" -ethereum_hashing = "1.0.0-beta.2" +ethereum_hashing = "0.6.0" ethereum_serde_utils = "0.5.2" ethereum_ssz = "0.5" ethereum_ssz_derive = "0.5" @@ -132,6 +132,7 @@ libsecp256k1 = "0.7" log = "0.4" lru = "0.12" maplit = "1" +milhouse = "0.1" num_cpus = "1" parking_lot = "0.12" paste = "1" @@ -144,6 +145,7 @@ rayon = "1.7" regex = "1" reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } ring = "0.16" +rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } serde = { version = "1", features = ["derive"] } serde_json = "1" @@ -156,9 +158,9 @@ slog-term = "2" sloggers = { version = "2", features = ["json"] } smallvec = "1.11.2" snap = "1" -ssz_types = "0.5" +ssz_types = "0.6" strum = { version = "0.24", features = ["derive"] } -superstruct = "0.6" +superstruct = "0.7" syn = "1" sysinfo = "0.26" tempfile = "3" @@ -170,8 +172,8 @@ tracing-appender = "0.2" tracing-core = "0.1" tracing-log = "0.2" tracing-subscriber = { version = "0.3", features = ["env-filter"] } -tree_hash = "0.5" -tree_hash_derive = "0.5" +tree_hash = "0.6" +tree_hash_derive = "0.6" url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } warp = { version = "0.3.7", default-features = false, features = ["tls"] } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 43c2c896f71..9c7c7febc5f 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -19,58 +19,58 @@ environment = { workspace = true } serde_json = { workspace = true } [dependencies] -serde_json = { workspace = true } +bitvec = { workspace = true } +bls = { workspace = true } +derivative = { workspace = true } +eth1 = { workspace = true } +eth2 = { workspace = true } eth2_network_config = { workspace = true } -merkle_proof = { workspace = true } -store = { workspace = true } -parking_lot = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +execution_layer = { workspace = true } +fork_choice = { workspace = true } +futures = { workspace = true } +genesis = { workspace = true } +hex = { workspace = true } +int_to_bytes = { workspace = true } +itertools = { workspace = true } +kzg = { workspace = true } lazy_static = { workspace = true } -smallvec = { workspace = true } lighthouse_metrics = { workspace = true } +logging = { workspace = true } +lru = { workspace = true } +merkle_proof = { workspace = true } +oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } operation_pool = { workspace = true } +parking_lot = { workspace = true } +proto_array = { workspace = true } +rand = { workspace = true } rayon = { workspace = true } +safe_arith = { workspace = true } +sensitive_url = { workspace = true } serde = { workspace = true } -ethereum_serde_utils = { workspace = true } +serde_json = { workspace = true } +slasher = { workspace = true } slog = { workspace = true } +slog-async = { workspace = true } +slog-term = { workspace = true } sloggers = { workspace = true } slot_clock = { workspace = true } -ethereum_hashing = { workspace = true } -ethereum_ssz = { workspace = true } +smallvec = { workspace = true } ssz_types = { workspace = true } -ethereum_ssz_derive = { workspace = true } state_processing = { workspace = true } -tree_hash_derive = { workspace = true } -tree_hash = { workspace = true } -types = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -eth1 = { workspace = true } -futures = { workspace = true } -genesis = { workspace = true } -int_to_bytes = { workspace = true } -rand = { workspace = true } -proto_array = { workspace = true } -lru = { workspace = true } -tempfile = { workspace = true } -bitvec = { workspace = true } -bls = { workspace = true } -kzg = { workspace = true } -safe_arith = { workspace = true } -fork_choice = { workspace = true } -task_executor = { workspace = true } -derivative = { workspace = true } -itertools = { workspace = true } -slasher = { workspace = true } -eth2 = { workspace = true } +store = { workspace = true } strum = { workspace = true } -logging = { workspace = true } -execution_layer = { workspace = true } -sensitive_url = { workspace = true } superstruct = { workspace = true } -hex = { workspace = true } -oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } -slog-term = { workspace = true } -slog-async = { workspace = true } +task_executor = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +types = { workspace = true } [[test]] name = "beacon_chain_tests" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c59c5e8ed10..c65dfdb449f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -58,7 +58,6 @@ use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_B use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::snapshot_cache::{BlockProductionPreState, SnapshotCache}; use crate::sync_committee_verification::{ Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution, }; @@ -103,8 +102,7 @@ use state_processing::{ }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, - BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, StateProcessingStrategy, - VerifyBlockRoot, VerifyOperation, + BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, }; use std::borrow::Cow; use std::cmp::Ordering; @@ -130,9 +128,6 @@ pub type ForkChoiceError = fork_choice::Error; /// Alias to appease clippy. type HashBlockTuple = (Hash256, RpcBlock); -/// The time-out before failure during an operation to take a read/write RwLock on the block -/// processing cache. -pub const BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// The time-out before failure during an operation to take a read/write RwLock on the /// attestation cache. pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); @@ -176,6 +171,7 @@ pub const INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON: &str = "Finalized merge transition block is invalid."; /// Defines the behaviour when a block/block-root for a skipped slot is requested. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum WhenSlotSkipped { /// If the slot is a skip slot, return `None`. /// @@ -450,8 +446,6 @@ pub struct BeaconChain { pub event_handler: Option>, /// Used to track the heads of the beacon chain. pub(crate) head_tracker: Arc, - /// A cache dedicated to block processing. - pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the attester shuffling for a given epoch and shuffling key root. pub shuffling_cache: TimeoutRwLock, /// A cache of eth1 deposit data at epoch boundaries for deposit finalization @@ -492,11 +486,6 @@ pub struct BeaconChain { pub data_availability_checker: Arc>, /// The KZG trusted setup used by this chain. pub kzg: Option>, - /// State with complete tree hash cache, ready for block production. - /// - /// NB: We can delete this once we have tree-states. - #[allow(clippy::type_complexity)] - pub block_production_state: Arc)>>>, } pub enum BeaconBlockResponseWrapper { @@ -773,7 +762,7 @@ impl BeaconChain { let iter = self.store.forwards_block_roots_iterator( start_slot, - local_head.beacon_state.clone_with(CloneConfig::none()), + local_head.beacon_state.clone(), local_head.beacon_block_root, &self.spec, )?; @@ -803,12 +792,7 @@ impl BeaconChain { let iter = self.store.forwards_block_roots_iterator_until( start_slot, end_slot, - || { - Ok(( - head.beacon_state.clone_with_only_committee_caches(), - head.beacon_block_root, - )) - }, + || Ok((head.beacon_state.clone(), head.beacon_block_root)), &self.spec, )?; Ok(iter @@ -879,7 +863,7 @@ impl BeaconChain { let iter = self.store.forwards_state_roots_iterator( start_slot, local_head.beacon_state_root(), - local_head.beacon_state.clone_with(CloneConfig::none()), + local_head.beacon_state.clone(), &self.spec, )?; @@ -900,12 +884,7 @@ impl BeaconChain { let iter = self.store.forwards_state_roots_iterator_until( start_slot, end_slot, - || { - Ok(( - head.beacon_state.clone_with_only_committee_caches(), - head.beacon_state_root(), - )) - }, + || Ok((head.beacon_state.clone(), head.beacon_state_root())), &self.spec, )?; Ok(iter @@ -3561,29 +3540,6 @@ impl BeaconChain { }); } - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(Error::SnapshotCacheLockTimeout) - .map(|mut snapshot_cache| { - snapshot_cache.insert( - BeaconSnapshot { - beacon_state: state, - beacon_block: signed_block.clone(), - beacon_block_root: block_root, - }, - None, - &self.spec, - ) - }) - .unwrap_or_else(|e| { - error!( - self.log, - "Failed to insert snapshot"; - "error" => ?e, - "task" => "process block" - ); - }); - self.head_tracker .register_block(block_root, parent_root, slot); @@ -4145,22 +4101,22 @@ impl BeaconChain { self.wait_for_fork_choice_before_block_production(slot)?; drop(fork_choice_timer); - // Producing a block requires the tree hash cache, so clone a full state corresponding to - // the head from the snapshot cache. Unfortunately we can't move the snapshot out of the - // cache (which would be fast), because we need to re-process the block after it has been - // signed. If we miss the cache or we're producing a block that conflicts with the head, - // fall back to getting the head from `slot - 1`. let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); // Atomically read some values from the head whilst avoiding holding cached head `Arc` any // longer than necessary. - let (head_slot, head_block_root) = { + let (head_slot, head_block_root, head_state_root) = { let head = self.canonical_head.cached_head(); - (head.head_slot(), head.head_block_root()) + ( + head.head_slot(), + head.head_block_root(), + head.head_state_root(), + ) }; let (state, state_root_opt) = if head_slot < slot { // Attempt an aggressive re-org if configured and the conditions are right. - if let Some(re_org_state) = self.get_state_for_re_org(slot, head_slot, head_block_root) + if let Some((re_org_state, re_org_state_root)) = + self.get_state_for_re_org(slot, head_slot, head_block_root) { info!( self.log, @@ -4168,37 +4124,16 @@ impl BeaconChain { "slot" => slot, "head_to_reorg" => %head_block_root, ); - (re_org_state.pre_state, re_org_state.state_root) - } - // Normal case: proposing a block atop the current head using the cache. - else if let Some((_, cached_state)) = - self.get_state_from_block_production_cache(head_block_root) - { - (cached_state.pre_state, cached_state.state_root) - } - // Fall back to a direct read of the snapshot cache. - else if let Some(pre_state) = - self.get_state_from_snapshot_cache_for_block_production(head_block_root) - { - warn!( - self.log, - "Block production cache miss"; - "message" => "falling back to snapshot cache clone", - "slot" => slot - ); - (pre_state.pre_state, pre_state.state_root) + (re_org_state, Some(re_org_state_root)) } else { - warn!( - self.log, - "Block production cache miss"; - "message" => "this block is more likely to be orphaned", - "slot" => slot, - ); - let state = self - .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) - .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - - (state, None) + // Fetch the head state advanced through to `slot`, which should be present in the + // state cache thanks to the state advance timer. + let (state_root, state) = self + .store + .get_advanced_hot_state(head_block_root, slot, head_state_root) + .map_err(BlockProductionError::FailedToLoadState)? + .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; + (state, Some(state_root)) } } else { warn!( @@ -4219,40 +4154,6 @@ impl BeaconChain { Ok((state, state_root_opt)) } - /// Get the state cached for block production *if* it matches `head_block_root`. - /// - /// This will clear the cache regardless of whether the block root matches, so only call this if - /// you think the `head_block_root` is likely to match! - fn get_state_from_block_production_cache( - &self, - head_block_root: Hash256, - ) -> Option<(Hash256, BlockProductionPreState)> { - // Take care to drop the lock as quickly as possible. - let mut lock = self.block_production_state.lock(); - let result = lock - .take() - .filter(|(cached_block_root, _)| *cached_block_root == head_block_root); - drop(lock); - result - } - - /// Get a state for block production from the snapshot cache. - fn get_state_from_snapshot_cache_for_block_production( - &self, - head_block_root: Hash256, - ) -> Option> { - if let Some(lock) = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - { - let result = lock.get_state_for_block_production(head_block_root); - drop(lock); - result - } else { - None - } - } - /// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable. /// /// This function will return `None` if proposer re-orgs are disabled. @@ -4261,7 +4162,7 @@ impl BeaconChain { slot: Slot, head_slot: Slot, canonical_head: Hash256, - ) -> Option> { + ) -> Option<(BeaconState, Hash256)> { let re_org_head_threshold = self.config.re_org_head_threshold?; let re_org_parent_threshold = self.config.re_org_parent_threshold?; @@ -4345,26 +4246,14 @@ impl BeaconChain { drop(proposer_head_timer); let re_org_parent_block = proposer_head.parent_node.root; - // Only attempt a re-org if we hit the block production cache or snapshot cache. - let pre_state = self - .get_state_from_block_production_cache(re_org_parent_block) - .map(|(_, state)| state) + let (state_root, state) = self + .store + .get_advanced_hot_state_from_cache(re_org_parent_block, slot) .or_else(|| { warn!( - self.log, - "Block production cache miss"; - "message" => "falling back to snapshot cache during re-org", - "slot" => slot, - "block_root" => ?re_org_parent_block - ); - self.get_state_from_snapshot_cache_for_block_production(re_org_parent_block) - }) - .or_else(|| { - debug!( self.log, "Not attempting re-org"; - "reason" => "missed snapshot cache", - "parent_block" => ?re_org_parent_block, + "reason" => "no state in cache" ); None })?; @@ -4378,7 +4267,7 @@ impl BeaconChain { "threshold_weight" => proposer_head.re_org_head_weight_threshold ); - Some(pre_state) + Some((state, state_root)) } /// Get the proposer index and `prev_randao` value for a proposal at slot `proposal_slot`. @@ -4506,20 +4395,6 @@ impl BeaconChain { let (unadvanced_state, unadvanced_state_root) = if cached_head.head_block_root() == parent_block_root { (Cow::Borrowed(head_state), cached_head.head_state_root()) - } else if let Some(snapshot) = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(Error::SnapshotCacheLockTimeout)? - .get_cloned(parent_block_root, CloneConfig::none()) - { - debug!( - self.log, - "Hit snapshot cache during withdrawals calculation"; - "slot" => proposal_slot, - "parent_block_root" => ?parent_block_root, - ); - let state_root = snapshot.beacon_state_root(); - (Cow::Owned(snapshot.beacon_state), state_root) } else { info!( self.log, @@ -4530,10 +4405,11 @@ impl BeaconChain { let block = self .get_blinded_block(&parent_block_root)? .ok_or(Error::MissingBeaconBlock(parent_block_root))?; - let state = self - .get_state(&block.state_root(), Some(block.slot()))? + let (state_root, state) = self + .store + .get_advanced_hot_state(parent_block_root, proposal_slot, block.state_root())? .ok_or(Error::MissingBeaconState(block.state_root()))?; - (Cow::Owned(state), block.state_root()) + (Cow::Owned(state), state_root) }; // Parent state epoch is the same as the proposal, we don't need to advance because the @@ -4910,6 +4786,7 @@ impl BeaconChain { drop(slot_timer); state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + state.apply_pending_mutations()?; let parent_root = if state.slot() > 0 { *state @@ -5383,7 +5260,6 @@ impl BeaconChain { &mut state, &block, signature_strategy, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &self.spec, @@ -6308,6 +6184,17 @@ impl BeaconChain { "head_block_root" => head_block_root.to_string(), ); + // If the block's state will be so far ahead of `shuffling_epoch` that even its + // previous epoch committee cache will be too new, then error. Callers of this function + // shouldn't be requesting such old shufflings for this `head_block_root`. + let head_block_epoch = head_block.slot.epoch(T::EthSpec::slots_per_epoch()); + if head_block_epoch > shuffling_epoch + 1 { + return Err(Error::InvalidStateForShuffling { + state_epoch: head_block_epoch, + shuffling_epoch, + }); + } + let state_read_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); @@ -6318,71 +6205,52 @@ impl BeaconChain { // to copy the head is liable to race-conditions. let head_state_opt = self.with_head(|head| { if head.beacon_block_root == head_block_root { - Ok(Some(( - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), - head.beacon_state_root(), - ))) + Ok(Some((head.beacon_state.clone(), head.beacon_state_root()))) } else { Ok::<_, Error>(None) } })?; + // Compute the `target_slot` to advance the block's state to. + // + // Since there's a one-epoch look-ahead on the attester shuffling, it suffices to + // only advance into the first slot of the epoch prior to `shuffling_epoch`. + // + // If the `head_block` is already ahead of that slot, then we should load the state + // at that slot, as we've determined above that the `shuffling_epoch` cache will + // not be too far in the past. + let target_slot = std::cmp::max( + shuffling_epoch + .saturating_sub(1_u64) + .start_slot(T::EthSpec::slots_per_epoch()), + head_block.slot, + ); + // If the head state is useful for this request, use it. Otherwise, read a state from - // disk. + // disk that is advanced as close as possible to `target_slot`. let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { (state, state_root) } else { - let block_state_root = head_block.state_root; - let max_slot = shuffling_epoch.start_slot(T::EthSpec::slots_per_epoch()); let (state_root, state) = self .store - .get_inconsistent_state_for_attestation_verification_only( - &head_block_root, - max_slot, - block_state_root, - )? - .ok_or(Error::MissingBeaconState(block_state_root))?; + .get_advanced_hot_state(head_block_root, target_slot, head_block.state_root)? + .ok_or(Error::MissingBeaconState(head_block.state_root))?; (state, state_root) }; - /* - * IMPORTANT - * - * Since it's possible that - * `Store::get_inconsistent_state_for_attestation_verification_only` was used to obtain - * the state, we cannot rely upon the following fields: - * - * - `state.state_roots` - * - `state.block_roots` - * - * These fields should not be used for the rest of this function. - */ - metrics::stop_timer(state_read_timer); let state_skip_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES); - // If the state is in an earlier epoch, advance it. If it's from a later epoch, reject - // it. + // If the state is still in an earlier epoch, advance it to the `target_slot` so + // that its next epoch committee cache matches the `shuffling_epoch`. if state.current_epoch() + 1 < shuffling_epoch { - // Since there's a one-epoch look-ahead on the attester shuffling, it suffices to - // only advance into the slot prior to the `shuffling_epoch`. - let target_slot = shuffling_epoch - .saturating_sub(1_u64) - .start_slot(T::EthSpec::slots_per_epoch()); - - // Advance the state into the required slot, using the "partial" method since the state - // roots are not relevant for the shuffling. + // Advance the state into the required slot, using the "partial" method since the + // state roots are not relevant for the shuffling. partial_state_advance(&mut state, Some(state_root), target_slot, &self.spec)?; - } else if state.current_epoch() > shuffling_epoch { - return Err(Error::InvalidStateForShuffling { - state_epoch: state.current_epoch(), - shuffling_epoch, - }); } - metrics::stop_timer(state_skip_timer); + let committee_building_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES); @@ -6391,8 +6259,7 @@ impl BeaconChain { state.build_committee_cache(relative_epoch, &self.spec)?; - let committee_cache = state.take_committee_cache(relative_epoch)?; - let committee_cache = Arc::new(committee_cache); + let committee_cache = state.committee_cache(relative_epoch)?.clone(); let shuffling_decision_block = shuffling_id.shuffling_decision_block; self.shuffling_cache @@ -6412,29 +6279,27 @@ impl BeaconChain { /// /// This could be a very expensive operation and should only be done in testing/analysis /// activities. + /// + /// This dump function previously used a backwards iterator but has been swapped to a forwards + /// iterator as it allows for MUCH better caching and rebasing. Memory usage of some tests went + /// from 5GB per test to 90MB. #[allow(clippy::type_complexity)] pub fn chain_dump( &self, ) -> Result>>, Error> { let mut dump = vec![]; - let mut last_slot = { - let head = self.canonical_head.cached_head(); - BeaconSnapshot { - beacon_block: Arc::new(head.snapshot.beacon_block.clone_as_blinded()), - beacon_block_root: head.snapshot.beacon_block_root, - beacon_state: head.snapshot.beacon_state.clone(), - } - }; - - dump.push(last_slot.clone()); + let mut prev_block_root = None; + let mut prev_beacon_state = None; - loop { - let beacon_block_root = last_slot.beacon_block.parent_root(); + for res in self.forwards_iter_block_roots(Slot::new(0))? { + let (beacon_block_root, _) = res?; - if beacon_block_root == Hash256::zero() { - break; // Genesis has been reached. + // Do not include snapshots at skipped slots. + if Some(beacon_block_root) == prev_block_root { + continue; } + prev_block_root = Some(beacon_block_root); let beacon_block = self .store @@ -6443,25 +6308,31 @@ impl BeaconChain { Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) })?; let beacon_state_root = beacon_block.state_root(); - let beacon_state = self + + let mut beacon_state = self .store .get_state(&beacon_state_root, Some(beacon_block.slot()))? .ok_or_else(|| { Error::DBInconsistent(format!("Missing state {:?}", beacon_state_root)) })?; - let slot = BeaconSnapshot { + // This beacon state might come from the freezer DB, which means it could have pending + // updates or lots of untethered memory. We rebase it on the previous state in order to + // address this. + beacon_state.apply_pending_mutations()?; + if let Some(prev) = prev_beacon_state { + beacon_state.rebase_on(&prev, &self.spec)?; + } + beacon_state.build_caches(&self.spec)?; + prev_beacon_state = Some(beacon_state.clone()); + + let snapshot = BeaconSnapshot { beacon_block: Arc::new(beacon_block), beacon_block_root, beacon_state, }; - - dump.push(slot.clone()); - last_slot = slot; + dump.push(snapshot); } - - dump.reverse(); - Ok(dump) } @@ -6696,6 +6567,10 @@ impl BeaconChain { self.data_availability_checker.data_availability_boundary() } + pub fn logger(&self) -> &Logger { + &self.log + } + /// Gets the `LightClientBootstrap` object for a requested block root. /// /// Returns `None` when the state or block is not found in the database. diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index ca390712b13..d10bbfbbc5f 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -17,8 +17,7 @@ use std::cmp::Ordering; use std::num::NonZeroUsize; use types::non_zero_usize::new_non_zero_usize; use types::{ - BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot, - Unsigned, + BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, }; /// The number of sets of proposer indices that should be cached. @@ -145,10 +144,7 @@ pub fn compute_proposer_duties_from_head( let (mut state, head_state_root, head_block_root) = { let head = chain.canonical_head.cached_head(); // Take a copy of the head state. - let head_state = head - .snapshot - .beacon_state - .clone_with(CloneConfig::committee_caches_only()); + let head_state = head.snapshot.beacon_state.clone(); let head_state_root = head.head_state_root(); let head_block_root = head.head_block_root(); (head_state, head_state_root, head_block_root) diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index afb13247766..e9fde48ac67 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,8 +1,8 @@ use serde::Serialize; use std::sync::Arc; use types::{ - beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, - SignedBeaconBlock, + AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, SignedBeaconBlock, + SignedBlindedBeaconBlock, }; /// Represents some block and its associated state. Generally, this will be used for tracking the @@ -14,6 +14,19 @@ pub struct BeaconSnapshot = FullPayl pub beacon_state: BeaconState, } +/// This snapshot is to be used for verifying a child of `self.beacon_block`. +#[derive(Debug)] +pub struct PreProcessingSnapshot { + /// This state is equivalent to the `self.beacon_block.state_root()` state that has been + /// advanced forward one slot using `per_slot_processing`. This state is "primed and ready" for + /// the application of another block. + pub pre_state: BeaconState, + /// This value is only set to `Some` if the `pre_state` was *not* advanced forward. + pub beacon_state_root: Option, + pub beacon_block: SignedBlindedBeaconBlock, + pub beacon_block_root: Hash256, +} + impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( @@ -48,12 +61,4 @@ impl> BeaconSnapshot { self.beacon_block_root = beacon_block_root; self.beacon_state = beacon_state; } - - pub fn clone_with(&self, clone_config: CloneConfig) -> Self { - Self { - beacon_block: self.beacon_block.clone(), - beacon_block_root: self.beacon_block_root, - beacon_state: self.beacon_state.clone_with(clone_config), - } - } } diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index a1ae260d930..263b9f9e013 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -2,7 +2,7 @@ use derivative::Derivative; use slot_clock::SlotClock; use std::sync::Arc; -use crate::beacon_chain::{BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use crate::block_verification::{ cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, BlockSlashInfo, @@ -11,15 +11,13 @@ use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::{metrics, BeaconChainError}; use kzg::{Error as KzgError, Kzg, KzgCommitment}; use merkle_proof::MerkleTreeError; -use slog::{debug, warn}; +use slog::debug; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use std::time::Duration; use tree_hash::TreeHash; use types::blob_sidecar::BlobIdentifier; -use types::{ - BeaconStateError, BlobSidecar, CloneConfig, EthSpec, Hash256, SignedBeaconBlockHeader, Slot, -}; +use types::{BeaconStateError, BlobSidecar, EthSpec, Hash256, SignedBeaconBlockHeader, Slot}; /// An error occurred while validating a gossip blob. #[derive(Debug)] @@ -514,98 +512,43 @@ pub fn validate_blob_sidecar_for_gossip( "block_root" => %block_root, "index" => %blob_index, ); - if let Some(mut snapshot) = chain - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_cloned(block_parent_root, CloneConfig::committee_caches_only()) - }) - { - if snapshot.beacon_state.slot() == blob_slot { - debug!( - chain.log, - "Cloning snapshot cache state for blob verification"; - "block_root" => %block_root, - "index" => %blob_index, - ); - ( - snapshot - .beacon_state - .get_beacon_proposer_index(blob_slot, &chain.spec)?, - snapshot.beacon_state.fork(), - ) - } else { - debug!( - chain.log, - "Cloning and advancing snapshot cache state for blob verification"; - "block_root" => %block_root, - "index" => %blob_index, - ); - let state = - cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( - &mut snapshot.beacon_state, - Some(snapshot.beacon_block_root), - blob_slot, - &chain.spec, - )?; - ( - state.get_beacon_proposer_index(blob_slot, &chain.spec)?, - state.fork(), - ) - } - } - // Need to advance the state to get the proposer index - else { - warn!( - chain.log, - "Snapshot cache miss for blob verification"; - "block_root" => %block_root, - "index" => %blob_index, - ); - - let parent_block = chain - .get_blinded_block(&block_parent_root) - .map_err(GossipBlobError::BeaconChainError)? - .ok_or_else(|| { - GossipBlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root)) - })?; - - let mut parent_state = chain - .get_state(&parent_block.state_root(), Some(parent_block.slot()))? - .ok_or_else(|| { - BeaconChainError::DBInconsistent(format!( - "Missing state {:?}", - parent_block.state_root() - )) - })?; - let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( - &mut parent_state, - Some(parent_block.state_root()), - blob_slot, - &chain.spec, - )?; - - let proposers = state.get_beacon_proposer_indices(&chain.spec)?; - let proposer_index = *proposers - .get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) - .ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?; - - let fork = state.fork(); - // Prime the proposer shuffling cache with the newly-learned value. - chain.beacon_proposer_cache.lock().insert( - blob_epoch, - proposer_shuffling_root, - proposers, - fork, - )?; - (proposer_index, fork) - } + let (parent_state_root, mut parent_state) = chain + .store + .get_advanced_hot_state(block_parent_root, blob_slot, parent_block.state_root) + .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? + .ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing state for parent block {block_parent_root:?}", + )) + })?; + + let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( + &mut parent_state, + Some(parent_state_root), + blob_slot, + &chain.spec, + )?; + + let proposers = state.get_beacon_proposer_indices(&chain.spec)?; + let proposer_index = *proposers + .get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) + .ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?; + + // Prime the proposer shuffling cache with the newly-learned value. + chain.beacon_proposer_cache.lock().insert( + blob_epoch, + proposer_shuffling_root, + proposers, + state.fork(), + )?; + (proposer_index, state.fork()) }; // Signature verify the signed block header. let signature_is_valid = { let pubkey_cache = get_validator_pubkey_cache(chain).map_err(|_| GossipBlobError::PubkeyCacheTimeout)?; + let pubkey = pubkey_cache .get(proposer_index) .ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 38648949f9c..866dde5a763 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -48,6 +48,7 @@ // returned alongside. #![allow(clippy::result_large_err)] +use crate::beacon_snapshot::PreProcessingSnapshot; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_verification_types::{ AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock, @@ -59,14 +60,10 @@ use crate::execution_payload::{ AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; use crate::observed_block_producers::SeenBlock; -use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ - beacon_chain::{ - BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, - VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, - }, + beacon_chain::{BeaconForkChoice, ForkChoiceError, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; @@ -86,22 +83,21 @@ use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_slot_processing, state_advance::partial_state_advance, - BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - StateProcessingStrategy, VerifyBlockRoot, + AllCaches, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, + VerifyBlockRoot, }; use std::borrow::Cow; use std::fmt::Debug; use std::fs; use std::io::Write; use std::sync::Arc; -use std::time::Duration; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, - ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ExecutionBlockHash, + Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; use types::{BlobSidecar, ExecPayload}; @@ -578,7 +574,7 @@ pub fn signature_verify_chain_segment( } let (first_root, first_block) = chain_segment.remove(0); - let (mut parent, first_block) = load_parent(first_root, first_block, chain)?; + let (mut parent, first_block) = load_parent(first_block, chain)?; let slot = first_block.slot(); chain_segment.insert(0, (first_root, first_block)); @@ -893,7 +889,7 @@ impl GossipVerifiedBlock { } else { // The proposer index was *not* cached and we must load the parent in order to determine // the proposer index. - let (mut parent, block) = load_parent(block_root, block, chain)?; + let (mut parent, block) = load_parent(block, chain)?; debug!( chain.log, @@ -1042,7 +1038,7 @@ impl SignatureVerifiedBlock { // Check the anchor slot before loading the parent, to avoid spurious lookups. check_block_against_anchor_slot(block.message(), chain)?; - let (mut parent, block) = load_parent(block_root, block, chain)?; + let (mut parent, block) = load_parent(block, chain)?; let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, @@ -1092,7 +1088,7 @@ impl SignatureVerifiedBlock { let (mut parent, block) = if let Some(parent) = from.parent { (parent, from.block) } else { - load_parent(from.block_root, from.block, chain)? + load_parent(from.block, chain)? }; let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( @@ -1154,7 +1150,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) } else { - load_parent(self.block_root, self.block, chain) + load_parent(self.block, chain) .map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))? }; @@ -1386,8 +1382,18 @@ impl ExecutionPendingBlock { let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); // Stage a batch of operations to be completed atomically if this block is imported - // successfully. - let mut confirmed_state_roots = vec![]; + // successfully. We include the state root of the pre-state, which may be an advanced state + // that was stored in the DB with a `temporary` flag. + let mut state = parent.pre_state; + + let mut confirmed_state_roots = if state.slot() > parent.beacon_block.slot() { + // Advanced pre-state. Delete its temporary flag. + let pre_state_root = state.update_tree_hash_cache()?; + vec![pre_state_root] + } else { + // Pre state is parent state. It is already stored in the DB without temporary status. + vec![] + }; // The block must have a higher slot than its parent. if block.slot() <= parent.beacon_block.slot() { @@ -1397,14 +1403,6 @@ impl ExecutionPendingBlock { }); } - let mut summaries = vec![]; - - // Transition the parent state to the block slot. - // - // It is important to note that we're using a "pre-state" here, one that has potentially - // been advanced one slot forward from `parent.beacon_block.slot`. - let mut state = parent.pre_state; - // Perform a sanity check on the pre-state. let parent_slot = parent.beacon_block.slot(); if state.slot() < parent_slot || state.slot() > block.slot() { @@ -1423,6 +1421,12 @@ impl ExecutionPendingBlock { eth1_deposit_index: state.eth1_deposit_index(), }; + // Transition the parent state to the block slot. + // + // It is important to note that we're using a "pre-state" here, one that has potentially + // been advanced one slot forward from `parent.beacon_block.slot`. + let mut summaries = vec![]; + let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { let state_root = if parent.beacon_block.slot() == state.slot() { @@ -1522,8 +1526,7 @@ impl ExecutionPendingBlock { let committee_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_COMMITTEE); - state.build_committee_cache(RelativeEpoch::Previous, &chain.spec)?; - state.build_committee_cache(RelativeEpoch::Current, &chain.spec)?; + state.build_all_committee_caches(&chain.spec)?; metrics::stop_timer(committee_timer); @@ -1564,7 +1567,6 @@ impl ExecutionPendingBlock { block.as_block(), // Signatures were verified earlier in this function. BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut consensus_context, &chain.spec, @@ -1839,12 +1841,9 @@ fn verify_parent_block_is_known( /// whilst attempting the operation. #[allow(clippy::type_complexity)] fn load_parent>( - block_root: Hash256, block: B, chain: &BeaconChain, ) -> Result<(PreProcessingSnapshot, B), BlockError> { - let spec = &chain.spec; - // Reject any block if its parent is not known to fork choice. // // A block that is not in fork choice is either: @@ -1863,45 +1862,10 @@ fn load_parent>( return Err(BlockError::ParentUnknown(block.into_rpc_block())); } - let block_delay = chain - .block_times_cache - .read() - .get_block_delays( - block_root, - chain - .slot_clock - .start_of(block.slot()) - .unwrap_or_else(|| Duration::from_secs(0)), - ) - .observed; - let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); - let result = if let Some((snapshot, cloned)) = chain - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|mut snapshot_cache| { - snapshot_cache.get_state_for_block_processing( - block.parent_root(), - block.slot(), - block_delay, - spec, - ) - }) { - if cloned { - metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_CLONES); - debug!( - chain.log, - "Cloned snapshot for late block/skipped slot"; - "slot" => %block.slot(), - "parent_slot" => %snapshot.beacon_block.slot(), - "parent_root" => ?block.parent_root(), - "block_delay" => ?block_delay, - ); - } - Ok((snapshot, block)) - } else { - // Load the blocks parent block from the database, returning invalid if that block is not + let result = { + // Load the block's parent block from the database, returning invalid if that block is not // found. // // We don't return a DBInconsistent error here since it's possible for a block to @@ -1925,7 +1889,7 @@ fn load_parent>( // Retrieve any state that is advanced through to at most `block.slot()`: this is // particularly important if `block` descends from the finalized/split block, but at a slot // prior to the finalized slot (which is invalid and inaccessible in our DB schema). - let (parent_state_root, parent_state) = chain + let (parent_state_root, state) = chain .store .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? .ok_or_else(|| { @@ -1934,22 +1898,46 @@ fn load_parent>( ) })?; - metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES); - debug!( - chain.log, - "Missed snapshot cache"; - "slot" => block.slot(), - "parent_slot" => parent_block.slot(), - "parent_root" => ?block.parent_root(), - "block_delay" => ?block_delay, - ); + if !state.all_caches_built() { + debug!( + chain.log, + "Parent state lacks built caches"; + "block_slot" => block.slot(), + "state_slot" => state.slot(), + ); + } + + if block.slot() != state.slot() { + debug!( + chain.log, + "Parent state is not advanced"; + "block_slot" => block.slot(), + "state_slot" => state.slot(), + ); + } + + let beacon_state_root = if state.slot() == parent_block.slot() { + // Sanity check. + if parent_state_root != parent_block.state_root() { + return Err(BeaconChainError::DBInconsistent(format!( + "Parent state at slot {} has the wrong state root: {:?} != {:?}", + state.slot(), + parent_state_root, + parent_block.state_root() + )) + .into()); + } + Some(parent_block.state_root()) + } else { + None + }; Ok(( PreProcessingSnapshot { beacon_block: parent_block, beacon_block_root: root, - pre_state: parent_state, - beacon_state_root: Some(parent_state_root), + pre_state: state, + beacon_state_root, }, block, )) @@ -2030,7 +2018,7 @@ pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobEr } else if state.slot() > block_slot { Err(Err::not_later_than_parent_error(block_slot, state.slot())) } else { - let mut state = state.clone_with(CloneConfig::committee_caches_only()); + let mut state = state.clone(); let target_slot = block_epoch.start_slot(E::slots_per_epoch()); // Advance the state into the same epoch as the block. Use the "partial" method since state diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 56381a7806f..be6b1f9b2bf 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -12,7 +12,6 @@ use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::snapshot_cache::SnapshotCache; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -32,7 +31,7 @@ use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; use slog::{crit, debug, error, info, o, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; -use state_processing::per_slot_processing; +use state_processing::{per_slot_processing, AllCaches}; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; @@ -462,7 +461,7 @@ where // Prime all caches before storing the state in the database and computing the tree hash // root. weak_subj_state - .build_caches(&self.spec) + .build_all_caches(&self.spec) .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; let weak_subj_state_root = weak_subj_state .update_tree_hash_cache() @@ -537,6 +536,13 @@ where // Write the state, block and blobs non-atomically, it doesn't matter if they're forgotten // about on a crash restart. + store + .update_finalized_state( + weak_subj_state_root, + weak_subj_block_root, + weak_subj_state.clone(), + ) + .map_err(|e| format!("Failed to set checkpoint state as finalized state: {:?}", e))?; store .put_state(&weak_subj_state_root, &weak_subj_state) .map_err(|e| format!("Failed to store weak subjectivity state: {e:?}"))?; @@ -851,10 +857,8 @@ where let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); let genesis_time = head_snapshot.beacon_state.genesis_time(); - let head_for_snapshot_cache = head_snapshot.clone(); let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; - let snapshot_cache_size = self.chain_config.snapshot_cache_size; // Calculate the weak subjectivity point in which to backfill blocks to. let genesis_backfill_slot = if self.chain_config.genesis_backfill { @@ -930,10 +934,6 @@ where fork_choice_signal_rx, event_handler: self.event_handler, head_tracker, - snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( - snapshot_cache_size, - head_for_snapshot_cache, - )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new( shuffling_cache_size, head_shuffling_ids, @@ -962,7 +962,6 @@ where .map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?, ), kzg: self.kzg.clone(), - block_production_state: Arc::new(Mutex::new(None)), }; let head = beacon_chain.head_snapshot(); diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 734575d2c0d..a84cfab298d 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -35,10 +35,7 @@ use crate::beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT; use crate::persisted_fork_choice::PersistedForkChoice; use crate::shuffling_cache::BlockShufflingIds; use crate::{ - beacon_chain::{ - BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, - BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY, - }, + beacon_chain::{BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, FORK_CHOICE_DB_KEY}, block_times_cache::BlockTimesCache, events::ServerSentEventHandler, metrics, @@ -54,6 +51,7 @@ use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use slog::{crit, debug, error, warn, Logger}; use slot_clock::SlotClock; +use state_processing::AllCaches; use std::sync::Arc; use std::time::Duration; use store::{iter::StateRootsIterator, KeyValueStoreOp, StoreItem}; @@ -466,9 +464,7 @@ impl BeaconChain { pub fn head_beacon_state_cloned(&self) -> BeaconState { // Don't clone whilst holding the read-lock, take an Arc-clone to reduce lock contention. let snapshot: Arc<_> = self.head_snapshot(); - snapshot - .beacon_state - .clone_with(CloneConfig::committee_caches_only()) + snapshot.beacon_state.clone() } /// Execute the fork choice algorithm and enthrone the result as the canonical head. @@ -652,48 +648,31 @@ impl BeaconChain { let new_cached_head = if new_view.head_block_root != old_view.head_block_root { metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); - // Try and obtain the snapshot for `beacon_block_root` from the snapshot cache, falling - // back to a database read if that fails. - let new_snapshot = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_cloned( + let mut new_snapshot = { + let beacon_block = self + .store + .get_full_block(&new_view.head_block_root)? + .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; + + let (_, beacon_state) = self + .store + .get_advanced_hot_state( new_view.head_block_root, - CloneConfig::committee_caches_only(), - ) - }) - .map::, _>(Ok) - .unwrap_or_else(|| { - let beacon_block = self - .store - .get_full_block(&new_view.head_block_root)? - .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; - - let (_, beacon_state) = self - .store - .get_advanced_hot_state( - new_view.head_block_root, - current_slot, - beacon_block.state_root(), - )? - .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; - - Ok(BeaconSnapshot { - beacon_block: Arc::new(beacon_block), - beacon_block_root: new_view.head_block_root, - beacon_state, - }) - }) - .and_then(|mut snapshot| { - // Regardless of where we got the state from, attempt to build the committee - // caches. - snapshot - .beacon_state - .build_all_committee_caches(&self.spec) - .map_err(Into::into) - .map(|()| snapshot) - })?; + current_slot, + beacon_block.state_root(), + )? + .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; + + BeaconSnapshot { + beacon_block: Arc::new(beacon_block), + beacon_block_root: new_view.head_block_root, + beacon_state, + } + }; + + // Regardless of where we got the state from, attempt to build all the + // caches except the tree hash cache. + new_snapshot.beacon_state.build_all_caches(&self.spec)?; let new_cached_head = CachedHead { snapshot: Arc::new(new_snapshot), @@ -834,25 +813,6 @@ impl BeaconChain { .beacon_state .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); - // Update the snapshot cache with the latest head value. - // - // This *could* be done inside `recompute_head`, however updating the head on the snapshot - // cache is not critical so we avoid placing it on a critical path. Note that this function - // will not return an error if the update fails, it will just log an error. - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.update_head(new_snapshot.beacon_block_root); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "update head" - ); - }); - match BlockShufflingIds::try_from_head( new_snapshot.beacon_block_root, &new_snapshot.beacon_state, @@ -998,26 +958,6 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.prune(new_view.finalized_checkpoint.epoch); - debug!( - self.log, - "Snapshot cache pruned"; - "new_len" => snapshot_cache.len(), - "remaining_roots" => ?snapshot_cache.beacon_block_roots(), - ); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "prune" - ); - }); - self.attester_cache .prune_below(new_view.finalized_checkpoint.epoch); diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 0772aff6710..255b8f00497 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -75,8 +75,6 @@ pub struct ChainConfig { pub optimistic_finalized_sync: bool, /// The size of the shuffling cache, pub shuffling_cache_size: usize, - /// The size of the snapshot cache. - pub snapshot_cache_size: usize, /// If using a weak-subjectivity sync, whether we should download blocks all the way back to /// genesis. pub genesis_backfill: bool, @@ -116,7 +114,6 @@ impl Default for ChainConfig { // This value isn't actually read except in tests. optimistic_finalized_sync: true, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, - snapshot_cache_size: crate::snapshot_cache::DEFAULT_SNAPSHOT_CACHE_SIZE, genesis_backfill: false, always_prepare_payload: false, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index f4c1bc308c0..3a8bccbad5c 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -1059,7 +1059,7 @@ mod test { let chain = &harness.chain; let log = chain.log.clone(); let head = chain.head_snapshot(); - let parent_state = head.beacon_state.clone_with_only_committee_caches(); + let parent_state = head.beacon_state.clone(); let target_slot = chain.slot().expect("should get slot") + 1; let parent_root = head.beacon_block_root; diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index b6dbf2b952f..f8a243bd9e8 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -8,7 +8,7 @@ use crate::{ use lru::LruCache; use parking_lot::RwLock; use ssz_derive::{Decode, Encode}; -use state_processing::{BlockReplayer, StateProcessingStrategy}; +use state_processing::BlockReplayer; use std::sync::Arc; use store::OnDiskConsensusContext; use types::beacon_block_body::KzgCommitments; @@ -189,7 +189,6 @@ impl StateLRUCache { let block_replayer: BlockReplayer<'_, T::EthSpec, AvailabilityCheckError, _> = BlockReplayer::new(parent_state, &self.spec) .no_signature_verification() - .state_processing_strategy(StateProcessingStrategy::Accurate) .state_root_iter(state_roots.into_iter()) .minimal_block_root_verification(); diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 9c82e964cc0..340f1f9f797 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -31,6 +31,7 @@ use state_processing::{ use std::time::Duration; use task_executor::ShutdownReason; use tokio::task::JoinError; +use types::milhouse::Error as MilhouseError; use types::*; macro_rules! easy_from_to { @@ -224,6 +225,7 @@ pub enum BeaconChainError { AvailabilityCheckError(AvailabilityCheckError), LightClientError(LightClientError), UnsupportedFork, + MilhouseError(MilhouseError), } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -253,6 +255,7 @@ easy_from_to!(InconsistentFork, BeaconChainError); easy_from_to!(AvailabilityCheckError, BeaconChainError); easy_from_to!(EpochCacheError, BeaconChainError); easy_from_to!(LightClientError, BeaconChainError); +easy_from_to!(MilhouseError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { @@ -279,6 +282,7 @@ pub enum BlockProductionError { TerminalPoWBlockLookupFailed(execution_layer::Error), GetPayloadFailed(execution_layer::Error), FailedToReadFinalizedBlock(store::Error), + FailedToLoadState(store::Error), MissingFinalizedBlock(Hash256), BlockTooLarge(usize), ShuttingDown, diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 3ec39f9d192..31297244e3e 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1020,6 +1020,7 @@ mod test { mod collect_valid_votes { use super::*; + use types::List; fn get_eth1_data_vec(n: u64, block_number_offset: u64) -> Vec<(Eth1Data, BlockNumber)> { (0..n) @@ -1067,12 +1068,14 @@ mod test { let votes_to_consider = get_eth1_data_vec(slots, 0); - *state.eth1_data_votes_mut() = votes_to_consider[0..slots as usize / 4] - .iter() - .map(|(eth1_data, _)| eth1_data) - .cloned() - .collect::>() - .into(); + *state.eth1_data_votes_mut() = List::new( + votes_to_consider[0..slots as usize / 4] + .iter() + .map(|(eth1_data, _)| eth1_data) + .cloned() + .collect::>(), + ) + .unwrap(); let votes = collect_valid_votes(&state, &votes_to_consider.clone().into_iter().collect()); @@ -1096,12 +1099,14 @@ mod test { .expect("should have some eth1 data") .clone(); - *state.eth1_data_votes_mut() = vec![duplicate_eth1_data.clone(); 4] - .iter() - .map(|(eth1_data, _)| eth1_data) - .cloned() - .collect::>() - .into(); + *state.eth1_data_votes_mut() = List::new( + vec![duplicate_eth1_data.clone(); 4] + .iter() + .map(|(eth1_data, _)| eth1_data) + .cloned() + .collect::>(), + ) + .unwrap(); let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect()); assert_votes!( diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 084ae95e096..8d1c29f46f6 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -5,7 +5,7 @@ use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext, - StateProcessingStrategy, VerifyBlockRoot, + VerifyBlockRoot, }; use std::sync::Arc; use std::time::Duration; @@ -175,7 +175,6 @@ pub fn reset_fork_choice_to_finalization, Cold: It &mut state, &block, BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 71c96d0fd55..c1df9ede87a 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -51,7 +51,6 @@ mod pre_finalization_cache; pub mod proposer_prep_service; pub mod schema_change; pub mod shuffling_cache; -pub mod snapshot_cache; pub mod state_advance_timer; pub mod sync_committee_rewards; pub mod sync_committee_verification; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index df718413cc0..fc3f032cdc3 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -4,12 +4,8 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use lazy_static::lazy_static; pub use lighthouse_metrics::*; use slot_clock::SlotClock; -use std::time::Duration; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; -/// The maximum time to wait for the snapshot cache lock during a metrics scrape. -const SNAPSHOT_CACHE_TIMEOUT: Duration = Duration::from_millis(100); - // Attestation simulator metrics pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL: &str = "validator_monitor_attestation_simulator_head_attester_hit_total"; @@ -1204,15 +1200,10 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { let attestation_stats = beacon_chain.op_pool.attestation_stats(); - if let Some(snapshot_cache) = beacon_chain - .snapshot_cache - .try_write_for(SNAPSHOT_CACHE_TIMEOUT) - { - set_gauge( - &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, - snapshot_cache.len() as i64, - ) - } + set_gauge_by_usize( + &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, + beacon_chain.store.state_cache_len(), + ); set_gauge_by_usize( &BEACON_REQRESP_PRE_IMPORT_CACHE_SIZE, diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index b3de6f91c92..04d58882639 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -339,7 +339,7 @@ mod test { .clone(); let committee_b = state.committee_cache(RelativeEpoch::Next).unwrap().clone(); assert!(committee_a != committee_b); - (Arc::new(committee_a), Arc::new(committee_b)) + (committee_a, committee_b) } /// Builds a deterministic but incoherent shuffling ID from a `u64`. diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 39d35f81113..1f928a16e42 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -15,9 +15,7 @@ //! 2. There's a possibility that the head block is never built upon, causing wasted CPU cycles. use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::{ - beacon_chain::{ATTESTATION_CACHE_LOCK_TIMEOUT, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}, - chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, - snapshot_cache::StateAdvance, + beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT, chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, BeaconChain, BeaconChainError, BeaconChainTypes, }; use slog::{debug, error, warn, Logger}; @@ -27,9 +25,10 @@ use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; +use store::KeyValueStore; use task_executor::TaskExecutor; use tokio::time::{sleep, sleep_until, Instant}; -use types::{AttestationShufflingId, EthSpec, Hash256, RelativeEpoch, Slot}; +use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform /// the state advancement. @@ -45,14 +44,13 @@ const MAX_ADVANCE_DISTANCE: u64 = 4; /// impact whilst having 8 epochs without a block is a comfortable grace period. const MAX_FORK_CHOICE_DISTANCE: u64 = 256; -/// Drop any unused block production state cache after this many slots. -const MAX_BLOCK_PRODUCTION_CACHE_DISTANCE: u64 = 4; - #[derive(Debug)] enum Error { BeaconChain(BeaconChainError), // We don't use the inner value directly, but it's used in the Debug impl. HeadMissingFromSnapshotCache(#[allow(dead_code)] Hash256), + BeaconState(#[allow(dead_code)] BeaconStateError), + Store(#[allow(dead_code)] store::Error), MaxDistanceExceeded { current_slot: Slot, head_slot: Slot, @@ -72,6 +70,18 @@ impl From for Error { } } +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +impl From for Error { + fn from(e: store::Error) -> Self { + Self::Store(e) + } +} + /// Provides a simple thread-safe lock to be used for task co-ordination. Practically equivalent to /// `Mutex<()>`. #[derive(Clone)] @@ -231,7 +241,7 @@ async fn state_advance_timer( // Prepare proposers so that the node can send payload attributes in the case where // it decides to abandon a proposer boost re-org. - let proposer_head = beacon_chain + beacon_chain .prepare_beacon_proposer(current_slot) .await .unwrap_or_else(|e| { @@ -248,56 +258,6 @@ async fn state_advance_timer( // in `ForkChoiceSignalTx`. beacon_chain.task_executor.clone().spawn_blocking( move || { - // If we're proposing, clone the head state preemptively so that it isn't on - // the hot path of proposing. We can delete this once we have tree-states. - if let Some(proposer_head) = proposer_head { - let mut cache = beacon_chain.block_production_state.lock(); - - // Avoid holding two states in memory. It's OK to hold the lock because - // we always lock the block production cache before the snapshot cache - // and we prefer for block production to wait for the block production - // cache if a clone is in-progress. - if cache - .as_ref() - .map_or(false, |(cached_head, _)| *cached_head != proposer_head) - { - drop(cache.take()); - } - if let Some(proposer_state) = beacon_chain - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(proposer_head) - }) - { - *cache = Some((proposer_head, proposer_state)); - debug!( - log, - "Cloned state ready for block production"; - "head_block_root" => ?proposer_head, - "slot" => next_slot - ); - } else { - warn!( - log, - "Block production state missing from snapshot cache"; - "head_block_root" => ?proposer_head, - "slot" => next_slot - ); - } - } else { - // If we aren't proposing, drop any old block production cache to save - // memory. - let mut cache = beacon_chain.block_production_state.lock(); - if let Some((_, state)) = &*cache { - if state.pre_state.slot() + MAX_BLOCK_PRODUCTION_CACHE_DISTANCE - <= current_slot - { - drop(cache.take()); - } - } - } - // Signal block proposal for the next slot (if it happens to be waiting). if let Some(tx) = &beacon_chain.fork_choice_signal_tx { if let Err(e) = tx.notify_fork_choice_complete(next_slot) { @@ -318,9 +278,9 @@ async fn state_advance_timer( } } -/// Reads the `snapshot_cache` from the `beacon_chain` and attempts to take a clone of the +/// Reads the `state_cache` from the `beacon_chain` and attempts to take a clone of the /// `BeaconState` of the head block. If it obtains this clone, the state will be advanced a single -/// slot then placed back in the `snapshot_cache` to be used for block verification. +/// slot then placed in the `state_cache` to be used for block verification. /// /// See the module-level documentation for rationale. fn advance_head( @@ -345,46 +305,42 @@ fn advance_head( } } - let head_root = beacon_chain.head_beacon_block_root(); + let (head_block_root, head_block_state_root) = { + let snapshot = beacon_chain.head_snapshot(); + (snapshot.beacon_block_root, snapshot.beacon_state_root()) + }; - let (head_slot, head_state_root, mut state) = match beacon_chain - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::SnapshotCacheLockTimeout)? - .get_for_state_advance(head_root) - { - StateAdvance::AlreadyAdvanced => { + let (head_state_root, mut state) = beacon_chain + .store + .get_advanced_hot_state(head_block_root, current_slot, head_block_state_root)? + .ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?; + + // Protect against advancing a state more than a single slot. + // + // Advancing more than one slot without storing the intermediate state would corrupt the + // database. Future works might store temporary, intermediate states inside this function. + match state.slot().cmp(&state.latest_block_header().slot) { + std::cmp::Ordering::Equal => (), + std::cmp::Ordering::Greater => { return Err(Error::StateAlreadyAdvanced { - block_root: head_root, - }) + block_root: head_block_root, + }); } - StateAdvance::BlockNotFound => return Err(Error::HeadMissingFromSnapshotCache(head_root)), - StateAdvance::State { - state, - state_root, - block_slot, - } => (block_slot, state_root, *state), - }; + std::cmp::Ordering::Less => { + return Err(Error::BadStateSlot { + _block_slot: state.latest_block_header().slot, + _state_slot: state.slot(), + }); + } + } let initial_slot = state.slot(); let initial_epoch = state.current_epoch(); - let state_root = if state.slot() == head_slot { - Some(head_state_root) - } else { - // Protect against advancing a state more than a single slot. - // - // Advancing more than one slot without storing the intermediate state would corrupt the - // database. Future works might store temporary, intermediate states inside this function. - return Err(Error::BadStateSlot { - _block_slot: head_slot, - _state_slot: state.slot(), - }); - }; - // Advance the state a single slot. - if let Some(summary) = per_slot_processing(&mut state, state_root, &beacon_chain.spec) - .map_err(BeaconChainError::from)? + if let Some(summary) = + per_slot_processing(&mut state, Some(head_state_root), &beacon_chain.spec) + .map_err(BeaconChainError::from)? { // Expose Prometheus metrics. if let Err(e) = summary.observe_metrics() { @@ -418,7 +374,7 @@ fn advance_head( debug!( log, "Advanced head state one slot"; - "head_root" => ?head_root, + "head_block_root" => ?head_block_root, "state_slot" => state.slot(), "current_slot" => current_slot, ); @@ -437,14 +393,14 @@ fn advance_head( if initial_epoch < state.current_epoch() { // Update the proposer cache. // - // We supply the `head_root` as the decision block since the prior `if` statement guarantees + // We supply the `head_block_root` as the decision block since the prior `if` statement guarantees // the head root is the latest block from the prior epoch. beacon_chain .beacon_proposer_cache .lock() .insert( state.current_epoch(), - head_root, + head_block_root, state .get_beacon_proposer_indices(&beacon_chain.spec) .map_err(BeaconChainError::from)?, @@ -453,8 +409,9 @@ fn advance_head( .map_err(BeaconChainError::from)?; // Update the attester cache. - let shuffling_id = AttestationShufflingId::new(head_root, &state, RelativeEpoch::Next) - .map_err(BeaconChainError::from)?; + let shuffling_id = + AttestationShufflingId::new(head_block_root, &state, RelativeEpoch::Next) + .map_err(BeaconChainError::from)?; let committee_cache = state .committee_cache(RelativeEpoch::Next) .map_err(BeaconChainError::from)?; @@ -467,7 +424,7 @@ fn advance_head( debug!( log, "Primed proposer and attester caches"; - "head_root" => ?head_root, + "head_block_root" => ?head_block_root, "next_epoch_shuffling_root" => ?shuffling_id.shuffling_decision_block, "state_epoch" => state.current_epoch(), "current_epoch" => current_slot.epoch(T::EthSpec::slots_per_epoch()), @@ -477,22 +434,13 @@ fn advance_head( // Apply the state to the attester cache, if the cache deems it interesting. beacon_chain .attester_cache - .maybe_cache_state(&state, head_root, &beacon_chain.spec) + .maybe_cache_state(&state, head_block_root, &beacon_chain.spec) .map_err(BeaconChainError::from)?; let final_slot = state.slot(); - // Insert the advanced state back into the snapshot cache. - beacon_chain - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::SnapshotCacheLockTimeout)? - .update_pre_state(head_root, state) - .ok_or(Error::HeadMissingFromSnapshotCache(head_root))?; - // If we have moved into the next slot whilst processing the state then this function is going - // to become ineffective and likely become a hindrance as we're stealing the tree hash cache - // from the snapshot cache (which may force the next block to rebuild a new one). + // to become ineffective. // // If this warning occurs very frequently on well-resourced machines then we should consider // starting it earlier in the slot. Otherwise, it's a good indication that the machine is too @@ -503,7 +451,7 @@ fn advance_head( warn!( log, "State advance too slow"; - "head_root" => %head_root, + "head_block_root" => %head_block_root, "advanced_slot" => final_slot, "current_slot" => current_slot, "starting_slot" => starting_slot, @@ -511,10 +459,25 @@ fn advance_head( ); } + // Write the advanced state to the database with a temporary flag that will be deleted when + // a block is imported on top of this state. We should delete this once we bring in the DB + // changes from tree-states that allow us to prune states without temporary flags. + let advanced_state_root = state.update_tree_hash_cache()?; + let txn_lock = beacon_chain.store.hot_db.begin_rw_transaction(); + let state_already_exists = beacon_chain + .store + .load_hot_state_summary(&advanced_state_root)? + .is_some(); + let temporary = !state_already_exists; + beacon_chain + .store + .put_state_possibly_temporary(&advanced_state_root, &state, temporary)?; + drop(txn_lock); + debug!( log, "Completed state advance"; - "head_root" => ?head_root, + "head_block_root" => ?head_block_root, "advanced_slot" => final_slot, "initial_slot" => initial_slot, ); diff --git a/beacon_node/beacon_chain/src/sync_committee_rewards.rs b/beacon_node/beacon_chain/src/sync_committee_rewards.rs index 2221aa1d5eb..9b35cff9432 100644 --- a/beacon_node/beacon_chain/src/sync_committee_rewards.rs +++ b/beacon_node/beacon_chain/src/sync_committee_rewards.rs @@ -38,9 +38,26 @@ impl BeaconChain { })?; let mut balances = HashMap::::new(); + for &validator_index in &sync_committee_indices { + balances.insert( + validator_index, + *state + .balances() + .get(validator_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)?, + ); + } + + let proposer_index = block.proposer_index() as usize; + balances.insert( + proposer_index, + *state + .balances() + .get(proposer_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)?, + ); let mut total_proposer_rewards = 0; - let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; // Apply rewards to participant balances. Keep track of proposer rewards for (validator_index, participant_bit) in sync_committee_indices @@ -48,15 +65,15 @@ impl BeaconChain { .zip(sync_aggregate.sync_committee_bits.iter()) { let participant_balance = balances - .entry(*validator_index) - .or_insert_with(|| state.balances()[*validator_index]); + .get_mut(validator_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)?; if participant_bit { participant_balance.safe_add_assign(participant_reward_value)?; balances - .entry(proposer_index) - .or_insert_with(|| state.balances()[proposer_index]) + .get_mut(&proposer_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)? .safe_add_assign(proposer_reward_per_bit)?; total_proposer_rewards.safe_add_assign(proposer_reward_per_bit)?; @@ -67,18 +84,17 @@ impl BeaconChain { Ok(balances .iter() - .filter_map(|(i, new_balance)| { - let reward = if *i != proposer_index { - *new_balance as i64 - state.balances()[*i] as i64 - } else if sync_committee_indices.contains(i) { - *new_balance as i64 - - state.balances()[*i] as i64 - - total_proposer_rewards as i64 + .filter_map(|(&i, &new_balance)| { + let initial_balance = *state.balances().get(i)? as i64; + let reward = if i != proposer_index { + new_balance as i64 - initial_balance + } else if sync_committee_indices.contains(&i) { + new_balance as i64 - initial_balance - total_proposer_rewards as i64 } else { return None; }; Some(SyncCommitteeReward { - validator_index: *i as u64, + validator_index: i as u64, reward, }) }) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index debc4881a60..d3eeff8a7d9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -46,10 +46,7 @@ use slog_async::Async; use slog_term::{FullFormat, TermDecorator}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; -use state_processing::{ - state_advance::{complete_state_advance, partial_state_advance}, - StateProcessingStrategy, -}; +use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; @@ -756,10 +753,7 @@ where pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); - ( - head.beacon_state.clone_with_only_committee_caches(), - state_root, - ) + (head.beacon_state.clone(), state_root) } pub fn head_slot(&self) -> Slot { @@ -802,8 +796,9 @@ where pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option> { self.chain .store - .load_hot_state(&state_hash.into(), StateProcessingStrategy::Accurate) + .load_hot_state(&state_hash.into()) .unwrap() + .map(|(state, _)| state) } pub fn get_cold_state(&self, state_hash: BeaconStateHash) -> Option> { @@ -1017,9 +1012,7 @@ where return Err(BeaconChainError::CannotAttestToFutureState); } else if state.current_epoch() < epoch { let mut_state = state.to_mut(); - // Only perform a "partial" state advance since we do not require the state roots to be - // accurate. - partial_state_advance( + complete_state_advance( mut_state, Some(state_root), epoch.start_slot(E::slots_per_epoch()), diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 2cf0c326158..e1b50706286 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -81,8 +81,9 @@ impl ValidatorPubkeyCache { ) -> Result>, BeaconChainError> { if state.validators().len() > self.pubkeys.len() { self.import( - state.validators()[self.pubkeys.len()..] - .iter() + state + .validators() + .iter_from(self.pubkeys.len())? .map(|v| v.pubkey), ) } else { diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 4d37557f0d1..98a112daffe 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -15,8 +15,7 @@ use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ common::get_indexed_attestation, per_block_processing::{per_block_processing, BlockSignatureStrategy}, - per_slot_processing, BlockProcessingError, ConsensusContext, StateProcessingStrategy, - VerifyBlockRoot, + per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot, }; use std::marker::PhantomData; use std::sync::Arc; @@ -1309,7 +1308,6 @@ async fn add_base_block_to_altair_chain() { &mut state, &base_block, BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &harness.chain.spec, @@ -1445,7 +1443,6 @@ async fn add_altair_block_to_base_chain() { &mut state, &altair_block, BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &harness.chain.spec, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index f1262596f70..0ef348319af 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -223,7 +223,7 @@ impl InvalidPayloadRig { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let head = self.harness.chain.head_snapshot(); - let state = head.beacon_state.clone_with_only_committee_caches(); + let state = head.beacon_state.clone(); let slot = slot_override.unwrap_or(state.slot() + 1); let ((block, blobs), post_state) = self.harness.make_block(state, slot).await; let block_root = block.canonical_root(); @@ -2048,7 +2048,7 @@ async fn weights_after_resetting_optimistic_status() { .fork_choice_read_lock() .get_block_weight(&head.head_block_root()) .unwrap(), - head.snapshot.beacon_state.validators()[0].effective_balance, + head.snapshot.beacon_state.validators().get(0).unwrap().effective_balance, "proposer boost should be removed from the head block and the vote of a single validator applied" ); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index a78463ef5d7..1c80525223a 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -105,8 +105,8 @@ async fn test_sync_committee_rewards() { .get_validator_index(&validator.pubkey) .unwrap() .unwrap(); - let pre_state_balance = parent_state.balances()[validator_index]; - let post_state_balance = state.balances()[validator_index]; + let pre_state_balance = *parent_state.balances().get(validator_index).unwrap(); + let post_state_balance = *state.balances().get(validator_index).unwrap(); let sync_committee_reward = rewards.get(&(validator_index as u64)).unwrap_or(&0); if validator_index == proposer_index { @@ -141,7 +141,7 @@ async fn test_verify_attestation_rewards_base() { ) .await; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); // extend slots to beginning of epoch N + 2 harness.extend_slots(E::slots_per_epoch() as usize).await; @@ -163,7 +163,7 @@ async fn test_verify_attestation_rewards_base() { let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } @@ -185,7 +185,7 @@ async fn test_verify_attestation_rewards_base_inactivity_leak() { AttestationStrategy::SomeValidators(half_validators.clone()), ) .await; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); // extend slots to beginning of epoch N + 2 harness.advance_slot(); @@ -215,7 +215,7 @@ async fn test_verify_attestation_rewards_base_inactivity_leak() { let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } @@ -241,7 +241,7 @@ async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoc // advance to create first justification epoch and get initial balances harness.extend_slots(E::slots_per_epoch() as usize).await; target_epoch += 1; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); //assert previous_justified_checkpoint matches 0 as we were in inactivity leak from beginning assert_eq!( @@ -284,7 +284,7 @@ async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoc let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } @@ -298,7 +298,7 @@ async fn test_verify_attestation_rewards_altair() { harness .extend_slots((E::slots_per_epoch() * (target_epoch + 1)) as usize) .await; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map let mut proposal_rewards_map: HashMap = HashMap::new(); @@ -364,7 +364,7 @@ async fn test_verify_attestation_rewards_altair() { apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } @@ -386,7 +386,7 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { half_validators.clone(), ) .await; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map let mut proposal_rewards_map: HashMap = HashMap::new(); @@ -458,7 +458,7 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } @@ -492,7 +492,7 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep // advance for first justification epoch and get balances harness.extend_slots(E::slots_per_epoch() as usize).await; target_epoch += 1; - let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + let initial_balances: Vec = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map let mut proposal_rewards_map: HashMap = HashMap::new(); @@ -568,7 +568,7 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().clone().into(); + let balances: Vec = harness.get_current_state().balances().to_vec(); assert_eq!(expected_balances, balances); } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 51d62cf8a19..ba8a6bf7016 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -719,52 +719,6 @@ async fn forwards_iter_block_and_state_roots_until() { test_range(Slot::new(0), head_state.slot()); } -#[tokio::test] -async fn block_replay_with_inaccurate_state_roots() { - let num_blocks_produced = E::slots_per_epoch() * 3 + 31; - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - let chain = &harness.chain; - - harness - .extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - // Slot must not be 0 mod 32 or else no blocks will be replayed. - let (mut head_state, head_state_root) = harness.get_current_state_and_root(); - let head_block_root = harness.head_block_root(); - assert_ne!(head_state.slot() % 32, 0); - - let (_, mut fast_head_state) = store - .get_inconsistent_state_for_attestation_verification_only( - &head_block_root, - head_state.slot(), - head_state_root, - ) - .unwrap() - .unwrap(); - assert_eq!(head_state.validators(), fast_head_state.validators()); - - head_state.build_all_committee_caches(&chain.spec).unwrap(); - fast_head_state - .build_all_committee_caches(&chain.spec) - .unwrap(); - - assert_eq!( - head_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .unwrap(), - fast_head_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .unwrap() - ); -} - #[tokio::test] async fn block_replayer_hooks() { let db_path = tempdir().unwrap(); @@ -795,7 +749,7 @@ async fn block_replayer_hooks() { let mut post_block_slots = vec![]; let mut replay_state = BlockReplayer::::new(state, &chain.spec) - .pre_slot_hook(Box::new(|state| { + .pre_slot_hook(Box::new(|_, state| { pre_slots.push(state.slot()); Ok(()) })) @@ -834,6 +788,8 @@ async fn block_replayer_hooks() { assert_eq!(post_block_slots, block_slots); // States match. + end_state.apply_pending_mutations().unwrap(); + replay_state.apply_pending_mutations().unwrap(); end_state.drop_all_caches().unwrap(); replay_state.drop_all_caches().unwrap(); assert_eq!(end_state, replay_state); @@ -1219,9 +1175,17 @@ fn check_shuffling_compatible( |committee_cache, _| { let state_cache = head_state.committee_cache(RelativeEpoch::Current).unwrap(); if current_epoch_shuffling_is_compatible { - assert_eq!(committee_cache, state_cache, "block at slot {slot}"); + assert_eq!( + committee_cache, + state_cache.as_ref(), + "block at slot {slot}" + ); } else { - assert_ne!(committee_cache, state_cache, "block at slot {slot}"); + assert_ne!( + committee_cache, + state_cache.as_ref(), + "block at slot {slot}" + ); } Ok(()) }, @@ -1251,9 +1215,9 @@ fn check_shuffling_compatible( |committee_cache, _| { let state_cache = head_state.committee_cache(RelativeEpoch::Previous).unwrap(); if previous_epoch_shuffling_is_compatible { - assert_eq!(committee_cache, state_cache); + assert_eq!(committee_cache, state_cache.as_ref()); } else { - assert_ne!(committee_cache, state_cache); + assert_ne!(committee_cache, state_cache.as_ref()); } Ok(()) }, @@ -3605,16 +3569,16 @@ fn check_split_slot(harness: &TestHarness, store: Arc, L /// Check that all the states in a chain dump have the correct tree hash. fn check_chain_dump(harness: &TestHarness, expected_len: u64) { - let chain_dump = harness.chain.chain_dump().unwrap(); + let mut chain_dump = harness.chain.chain_dump().unwrap(); let split_slot = harness.chain.store.get_split_slot(); assert_eq!(chain_dump.len() as u64, expected_len); - for checkpoint in &chain_dump { + for checkpoint in &mut chain_dump { // Check that the tree hash of the stored state is as expected assert_eq!( checkpoint.beacon_state_root(), - checkpoint.beacon_state.tree_hash_root(), + checkpoint.beacon_state.update_tree_hash_cache().unwrap(), "tree hash of stored state is incorrect" ); diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index b4753e92f1f..4c78b8efd8f 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -178,13 +178,14 @@ mod test { } for v in state.validators() { - let creds = v.withdrawal_credentials.as_bytes(); + let creds = v.withdrawal_credentials; assert_eq!( - creds[0], spec.bls_withdrawal_prefix_byte, + creds.as_bytes()[0], + spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" ); assert_eq!( - &creds[1..], + &creds.as_bytes()[1..], &hash(&v.pubkey.as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) @@ -240,7 +241,8 @@ mod test { } for (index, v) in state.validators().iter().enumerate() { - let creds = v.withdrawal_credentials.as_bytes(); + let withdrawal_credientials = v.withdrawal_credentials; + let creds = withdrawal_credientials.as_bytes(); if index % 2 == 0 { assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index f3242a2b374..6c7dc3348c1 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -5,9 +5,7 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{self as api_types}; use slot_clock::SlotClock; use state_processing::state_advance::partial_state_advance; -use types::{ - AttestationDuty, BeaconState, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, RelativeEpoch, -}; +use types::{AttestationDuty, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, RelativeEpoch}; /// The struct that is returned to the requesting HTTP client. type ApiDuties = api_types::DutiesResponse>; @@ -90,8 +88,7 @@ fn compute_historic_attester_duties( if head.beacon_state.current_epoch() <= request_epoch { Some(( head.beacon_state_root(), - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), + head.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), )) } else { diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index d78f1f7c66e..f105fdf0a7d 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -279,7 +279,7 @@ pub fn get_block_packing_efficiency( )); let pre_slot_hook = - |state: &mut BeaconState| -> Result<(), PackingEfficiencyError> { + |_, state: &mut BeaconState| -> Result<(), PackingEfficiencyError> { // Add attestations to `available_attestations`. handler.lock().add_attestations(state.slot())?; Ok(()) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index cc117c3fb92..46d3ad7569b 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -61,7 +61,6 @@ use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; pub use state_id::StateId; -use std::borrow::Cow; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; @@ -864,10 +863,10 @@ pub fn serve( None }; - let committee_cache = if let Some(ref shuffling) = + let committee_cache = if let Some(shuffling) = maybe_cached_shuffling { - Cow::Borrowed(&**shuffling) + shuffling } else { let possibly_built_cache = match RelativeEpoch::from_epoch(current_epoch, epoch) { @@ -876,16 +875,13 @@ pub fn serve( relative_epoch, ) => { - state - .committee_cache(relative_epoch) - .map(Cow::Borrowed) + state.committee_cache(relative_epoch).cloned() } _ => CommitteeCache::initialized( state, epoch, &chain.spec, - ) - .map(Cow::Owned), + ), } .map_err(|e| { match e { @@ -933,7 +929,7 @@ pub fn serve( { cache_write.insert_committee_cache( shuffling_id, - &*possibly_built_cache, + &possibly_built_cache, ); } } diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index c31dd9b1faa..ab8952976c8 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -10,7 +10,7 @@ use safe_arith::SafeArith; use slog::{debug, Logger}; use slot_clock::SlotClock; use std::cmp::Ordering; -use types::{CloneConfig, Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, EthSpec, Hash256, Slot}; /// The struct that is returned to the requesting HTTP client. type ApiDuties = api_types::DutiesResponse>; @@ -192,8 +192,7 @@ fn compute_historic_proposer_duties( if head.beacon_state.current_epoch() <= epoch { Some(( head.beacon_state_root(), - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), + head.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), )) } else { diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 1a76333e2d4..fdc99fa954e 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -178,10 +178,7 @@ impl StateId { .head_and_execution_status() .map_err(warp_utils::reject::beacon_chain_error)?; return Ok(( - cached_head - .snapshot - .beacon_state - .clone_with_only_committee_caches(), + cached_head.snapshot.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), false, )); diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 74b26475639..ad32ff1d579 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -128,17 +128,18 @@ async fn attestations_across_fork_with_skip_slots() { let all_validators = harness.get_all_validators(); let fork_slot = fork_epoch.start_slot(E::slots_per_epoch()); - let fork_state = harness + let mut fork_state = harness .chain .state_at_slot(fork_slot, StateSkipConfig::WithStateRoots) .unwrap(); + let fork_state_root = fork_state.update_tree_hash_cache().unwrap(); harness.set_current_slot(fork_slot); let attestations = harness.make_attestations( &all_validators, &fork_state, - fork_state.canonical_root(), + fork_state_root, (*fork_state.get_block_root(fork_slot - 1).unwrap()).into(), fork_slot, ); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index e4580e4ffdb..d44b9a688ce 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -806,7 +806,7 @@ impl ApiTester { let state_opt = state_id.state(&self.chain).ok(); let validators: Vec = match state_opt.as_ref() { Some((state, _execution_optimistic, _finalized)) => { - state.validators().clone().into() + state.validators().clone().to_vec() } None => vec![], }; @@ -822,7 +822,7 @@ impl ApiTester { ValidatorId::PublicKey( validators .get(i as usize) - .map_or(PublicKeyBytes::empty(), |val| val.pubkey.clone()), + .map_or(PublicKeyBytes::empty(), |val| val.pubkey), ) }) .collect::>(); @@ -865,7 +865,7 @@ impl ApiTester { if i < state.balances().len() as u64 { validators.push(ValidatorBalanceData { index: i as u64, - balance: state.balances()[i as usize], + balance: *state.balances().get(i as usize).unwrap(), }); } } @@ -892,7 +892,7 @@ impl ApiTester { .ok() .map(|(state, _execution_optimistic, _finalized)| state); let validators: Vec = match state_opt.as_ref() { - Some(state) => state.validators().clone().into(), + Some(state) => state.validators().to_vec(), None => vec![], }; let validator_index_ids = validator_indices @@ -907,7 +907,7 @@ impl ApiTester { ValidatorId::PublicKey( validators .get(i as usize) - .map_or(PublicKeyBytes::empty(), |val| val.pubkey.clone()), + .map_or(PublicKeyBytes::empty(), |val| val.pubkey), ) }) .collect::>(); @@ -955,7 +955,7 @@ impl ApiTester { if i >= state.validators().len() as u64 { continue; } - let validator = state.validators()[i as usize].clone(); + let validator = state.validators().get(i as usize).unwrap().clone(); let status = ValidatorStatus::from_validator( &validator, epoch, @@ -967,7 +967,7 @@ impl ApiTester { { validators.push(ValidatorData { index: i as u64, - balance: state.balances()[i as usize], + balance: *state.balances().get(i as usize).unwrap(), status, validator, }); @@ -995,13 +995,13 @@ impl ApiTester { .ok() .map(|(state, _execution_optimistic, _finalized)| state); let validators = match state_opt.as_ref() { - Some(state) => state.validators().clone().into(), + Some(state) => state.validators().to_vec(), None => vec![], }; for (i, validator) in validators.into_iter().enumerate() { let validator_ids = &[ - ValidatorId::PublicKey(validator.pubkey.clone()), + ValidatorId::PublicKey(validator.pubkey), ValidatorId::Index(i as u64), ]; @@ -1025,7 +1025,7 @@ impl ApiTester { ValidatorData { index: i as u64, - balance: state.balances()[i], + balance: *state.balances().get(i).unwrap(), status: ValidatorStatus::from_validator( &validator, epoch, @@ -2360,7 +2360,7 @@ impl ApiTester { .unwrap() { let expected = AttesterData { - pubkey: state.validators()[i as usize].pubkey.clone().into(), + pubkey: state.validators().get(i as usize).unwrap().pubkey, validator_index: i, committees_at_slot: duty.committees_at_slot, committee_index: duty.index, @@ -2465,7 +2465,7 @@ impl ApiTester { let index = state .get_beacon_proposer_index(slot, &self.chain.spec) .unwrap(); - let pubkey = state.validators()[index].pubkey.clone().into(); + let pubkey = state.validators().get(index).unwrap().pubkey; ProposerData { pubkey, diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 7cd40a4bb60..6e744ccf62a 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1272,7 +1272,7 @@ mod release_tests { // Each validator will have a multiple of 1_000_000_000 wei. // Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000). for i in 0..state.validators().len() { - state.validators_mut()[i].effective_balance = 1_000_000_000 * i as u64; + state.validators_mut().get_mut(i).unwrap().effective_balance = 1_000_000_000 * i as u64; } let num_validators = num_committees @@ -1530,9 +1530,9 @@ mod release_tests { let spec = &harness.spec; let mut state = harness.get_current_state(); let op_pool = OperationPool::::new(); - state.validators_mut()[1].effective_balance = 17_000_000_000; - state.validators_mut()[2].effective_balance = 17_000_000_000; - state.validators_mut()[3].effective_balance = 17_000_000_000; + state.validators_mut().get_mut(1).unwrap().effective_balance = 17_000_000_000; + state.validators_mut().get_mut(2).unwrap().effective_balance = 17_000_000_000; + state.validators_mut().get_mut(3).unwrap().effective_balance = 17_000_000_000; let slashing_1 = harness.make_attester_slashing(vec![1, 2, 3]); let slashing_2 = harness.make_attester_slashing(vec![4, 5, 6]); diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index fd2cf473cb3..d31b576989c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -179,9 +179,6 @@ pub fn get_config( if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { client_config.chain.shuffling_cache_size = cache_size; } - if let Some(cache_size) = clap_utils::parse_optional(cli_args, "state-cache-size")? { - client_config.chain.snapshot_cache_size = cache_size; - } /* * Prometheus metrics HTTP server @@ -408,6 +405,10 @@ pub fn get_config( .map_err(|_| "block-cache-size is not a valid integer".to_string())?; } + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "state-cache-size")? { + client_config.store.state_cache_size = cache_size; + } + if let Some(historic_state_cache_size) = cli_args.value_of("historic-state-cache-size") { client_config.store.historic_state_cache_size = historic_state_cache_size .parse() diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 7bf1ef76bef..b7822670078 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -25,3 +25,7 @@ lru = { workspace = true } sloggers = { workspace = true } directory = { workspace = true } strum = { workspace = true } +safe_arith = { workspace = true } +bls = { workspace = true } +smallvec = { workspace = true } +logging = { workspace = true } diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index a0c50e5a2b5..4450989d590 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -1,6 +1,6 @@ //! Space-efficient storage for `BeaconState` vector fields. //! -//! This module provides logic for splitting the `FixedVector` fields of a `BeaconState` into +//! This module provides logic for splitting the `Vector` fields of a `BeaconState` into //! chunks, and storing those chunks in contiguous ranges in the on-disk database. The motiviation //! for doing this is avoiding massive duplication in every on-disk state. For example, rather than //! storing the whole `historical_roots` vector, which is updated once every couple of thousand @@ -60,12 +60,13 @@ fn genesis_value_key() -> [u8; 8] { /// type-level. We require their value-level witnesses to be `Copy` so that we can avoid the /// turbofish when calling functions like `store_updated_vector`. pub trait Field: Copy { - /// The type of value stored in this field: the `T` from `FixedVector`. + /// The type of value stored in this field: the `T` from `Vector`. /// /// The `Default` impl will be used to fill extra vector entries. - type Value: Decode + Encode + Default + Clone + PartialEq + std::fmt::Debug; + type Value: Default + std::fmt::Debug + milhouse::Value; + // Decode + Encode + Default + Clone + PartialEq + std::fmt::Debug - /// The length of this field: the `N` from `FixedVector`. + /// The length of this field: the `N` from `Vector`. type Length: Unsigned; /// The database column where the integer-indexed chunks for this field should be stored. @@ -273,10 +274,10 @@ pub trait Field: Copy { } } -/// Marker trait for fixed-length fields (`FixedVector`). +/// Marker trait for fixed-length fields (`Vector`). pub trait FixedLengthField: Field {} -/// Marker trait for variable-length fields (`VariableList`). +/// Marker trait for variable-length fields (`List`). pub trait VariableLengthField: Field {} /// Macro to implement the `Field` trait on a new unit struct type. @@ -331,7 +332,7 @@ field!( activation_slot: Some(Slot::new(0)), deactivation_slot: None }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index) + |state: &BeaconState<_>, index, _| safe_modulo_vector_index(state.block_roots(), index) ); field!( @@ -345,7 +346,7 @@ field!( activation_slot: Some(Slot::new(0)), deactivation_slot: None, }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index) + |state: &BeaconState<_>, index, _| safe_modulo_vector_index(state.state_roots(), index) ); field!( @@ -361,7 +362,7 @@ field!( .capella_fork_epoch .map(|fork_epoch| fork_epoch.start_slot(E::slots_per_epoch())), }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index) + |state: &BeaconState<_>, index, _| safe_modulo_list_index(state.historical_roots(), index) ); field!( @@ -371,7 +372,7 @@ field!( E::EpochsPerHistoricalVector, DBColumn::BeaconRandaoMixes, |_| OncePerEpoch { lag: 1 }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index) + |state: &BeaconState<_>, index, _| safe_modulo_vector_index(state.randao_mixes(), index) ); field!( @@ -387,7 +388,7 @@ field!( .map(|fork_epoch| fork_epoch.start_slot(E::slots_per_epoch())), deactivation_slot: None, }, - |state: &BeaconState<_>, index, _| safe_modulo_index( + |state: &BeaconState<_>, index, _| safe_modulo_list_index( state .historical_summaries() .map_err(|_| ChunkError::InvalidFork)?, @@ -565,7 +566,7 @@ pub fn load_vector_from_db, E: EthSpec, S: KeyValueStore< store: &S, slot: Slot, spec: &ChainSpec, -) -> Result, Error> { +) -> Result, Error> { // Do a range query let chunk_size = F::chunk_size(); let (start_vindex, end_vindex) = F::start_and_end_vindex(slot, spec); @@ -589,7 +590,7 @@ pub fn load_vector_from_db, E: EthSpec, S: KeyValueStore< default, )?; - Ok(result.into()) + Ok(Vector::new(result).map_err(ChunkError::Milhouse)?) } /// The historical roots are stored in vector chunks, despite not actually being a vector. @@ -597,7 +598,7 @@ pub fn load_variable_list_from_db, E: EthSpec, S: KeyV store: &S, slot: Slot, spec: &ChainSpec, -) -> Result, Error> { +) -> Result, Error> { let chunk_size = F::chunk_size(); let (start_vindex, end_vindex) = F::start_and_end_vindex(slot, spec); let start_cindex = start_vindex / chunk_size; @@ -617,15 +618,35 @@ pub fn load_variable_list_from_db, E: EthSpec, S: KeyV } } - Ok(result.into()) + Ok(List::new(result).map_err(ChunkError::Milhouse)?) } -/// Index into a field of the state, avoiding out of bounds and division by 0. -fn safe_modulo_index(values: &[T], index: u64) -> Result { +/// Index into a `List` field of the state, avoiding out of bounds and division by 0. +fn safe_modulo_list_index( + values: &List, + index: u64, +) -> Result { + if values.is_empty() { + Err(ChunkError::ZeroLengthList) + } else { + values + .get(index as usize % values.len()) + .copied() + .ok_or(ChunkError::IndexOutOfBounds { index }) + } +} + +fn safe_modulo_vector_index( + values: &Vector, + index: u64, +) -> Result { if values.is_empty() { Err(ChunkError::ZeroLengthVector) } else { - Ok(values[index as usize % values.len()]) + values + .get(index as usize % values.len()) + .copied() + .ok_or(ChunkError::IndexOutOfBounds { index }) } } @@ -712,6 +733,10 @@ where #[derive(Debug, PartialEq)] pub enum ChunkError { ZeroLengthVector, + ZeroLengthList, + IndexOutOfBounds { + index: u64, + }, InvalidSize { chunk_index: usize, expected: usize, @@ -744,6 +769,13 @@ pub enum ChunkError { length: usize, }, InvalidFork, + Milhouse(milhouse::Error), +} + +impl From for ChunkError { + fn from(e: milhouse::Error) -> ChunkError { + Self::Milhouse(e) + } } #[cfg(test)] diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 681d424e282..d43999d8220 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -9,6 +9,7 @@ use types::{EthSpec, MinimalEthSpec}; pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(5); +pub const DEFAULT_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1); pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1; pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0; @@ -22,6 +23,8 @@ pub struct StoreConfig { pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: NonZeroUsize, + /// Maximum number of states to store in the in-memory state cache. + pub state_cache_size: NonZeroUsize, /// Maximum number of states from freezer database to store in the in-memory state cache. pub historic_state_cache_size: NonZeroUsize, /// Whether to compact the database on initialization. @@ -57,6 +60,7 @@ impl Default for StoreConfig { slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64, slots_per_restore_point_set_explicitly: false, block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, + state_cache_size: DEFAULT_STATE_CACHE_SIZE, historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE, compact_on_init: false, compact_on_prune: true, diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 96e02b80ff8..91e6a920ba3 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -3,7 +3,7 @@ use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{BeaconStateError, Hash256, InconsistentFork, Slot}; +use types::{BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot}; pub type Result = std::result::Result; @@ -49,6 +49,14 @@ pub enum Error { InvalidBytes, UnableToDowngrade, InconsistentFork(InconsistentFork), + CacheBuildError(EpochCacheError), + RandaoMixOutOfBounds, + FinalizedStateDecreasingSlot, + FinalizedStateUnaligned, + StateForCacheHasPendingUpdates { + state_root: Hash256, + slot: Slot, + }, } pub trait HandleUnavailable { @@ -113,6 +121,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: EpochCacheError) -> Error { + Error::CacheBuildError(e) + } +} + #[derive(Debug)] pub struct DBError { pub message: String, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 70e02164e08..484a1139bf9 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -17,6 +17,7 @@ use crate::metadata::{ PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::metrics; +use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, StoreOp, @@ -30,7 +31,8 @@ use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ - BlockProcessingError, BlockReplayer, SlotProcessingError, StateProcessingStrategy, + block_replayer::PreSlotHook, AllCaches, BlockProcessingError, BlockReplayer, + SlotProcessingError, }; use std::cmp::min; use std::marker::PhantomData; @@ -66,12 +68,16 @@ pub struct HotColdDB, Cold: ItemStore> { pub hot_db: Hot, /// LRU cache of deserialized blocks and blobs. Updated whenever a block or blob is loaded. block_cache: Mutex>, + /// Cache of beacon states. + /// + /// LOCK ORDERING: this lock must always be locked *after* the `split` if both are required. + state_cache: Mutex>, /// LRU cache of replayed states. - state_cache: Mutex>>, + historic_state_cache: Mutex>>, /// Chain spec. pub(crate) spec: ChainSpec, /// Logger. - pub(crate) log: Logger, + pub log: Logger, /// Mere vessel for E. _phantom: PhantomData, } @@ -178,7 +184,8 @@ impl HotColdDB, MemoryStore> { blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), - state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), + state_cache: Mutex::new(StateCache::new(config.state_cache_size)), + historic_state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, log, @@ -192,8 +199,6 @@ impl HotColdDB, MemoryStore> { impl HotColdDB, LevelDB> { /// Open a new or existing database, with the given paths to the hot and cold DBs. /// - /// The `slots_per_restore_point` parameter must be a divisor of `SLOTS_PER_HISTORICAL_ROOT`. - /// /// The `migrate_schema` function is passed in so that the parent `BeaconChain` can provide /// context and access `BeaconChain`-level code without creating a circular dependency. pub fn open( @@ -215,7 +220,8 @@ impl HotColdDB, LevelDB> { blobs_db: LevelDB::open(blobs_db_path)?, hot_db: LevelDB::open(hot_path)?, block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), - state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), + state_cache: Mutex::new(StateCache::new(config.state_cache_size)), + historic_state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, log, @@ -352,6 +358,21 @@ impl HotColdDB, LevelDB> { } impl, Cold: ItemStore> HotColdDB { + pub fn update_finalized_state( + &self, + state_root: Hash256, + block_root: Hash256, + state: BeaconState, + ) -> Result<(), Error> { + self.state_cache + .lock() + .update_finalized_state(state_root, block_root, state) + } + + pub fn state_cache_len(&self) -> usize { + self.state_cache.lock().len() + } + /// Store a block and update the LRU cache. pub fn put_block( &self, @@ -615,11 +636,26 @@ impl, Cold: ItemStore> HotColdDB /// Store a state in the store. pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { + self.put_state_possibly_temporary(state_root, state, false) + } + + /// Store a state in the store. + /// + /// The `temporary` flag indicates whether this state should be considered canonical. + pub fn put_state_possibly_temporary( + &self, + state_root: &Hash256, + state: &BeaconState, + temporary: bool, + ) -> Result<(), Error> { let mut ops: Vec = Vec::new(); if state.slot() < self.get_split_slot() { self.store_cold_state(state_root, state, &mut ops)?; self.cold_db.do_atomically(ops) } else { + if temporary { + ops.push(TemporaryFlag.as_kv_store_op(*state_root)); + } self.store_hot_state(state_root, state, &mut ops)?; self.hot_db.do_atomically(ops) } @@ -648,45 +684,16 @@ impl, Cold: ItemStore> HotColdDB // chain. This way we avoid returning a state that doesn't match `state_root`. self.load_cold_state(state_root) } else { - self.load_hot_state(state_root, StateProcessingStrategy::Accurate) + self.get_hot_state(state_root) } } else { - match self.load_hot_state(state_root, StateProcessingStrategy::Accurate)? { + match self.get_hot_state(state_root)? { Some(state) => Ok(Some(state)), None => self.load_cold_state(state_root), } } } - /// Fetch a state from the store, but don't compute all of the values when replaying blocks - /// upon that state (e.g., state roots). Additionally, only states from the hot store are - /// returned. - /// - /// See `Self::get_advanced_hot_state` for information about `max_slot`. - /// - /// ## Warning - /// - /// The returned state **is not a valid beacon state**, it can only be used for obtaining - /// shuffling to process attestations. At least the following components of the state will be - /// broken/invalid: - /// - /// - `state.state_roots` - /// - `state.block_roots` - pub fn get_inconsistent_state_for_attestation_verification_only( - &self, - block_root: &Hash256, - max_slot: Slot, - state_root: Hash256, - ) -> Result)>, Error> { - metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); - self.get_advanced_hot_state_with_strategy( - *block_root, - max_slot, - state_root, - StateProcessingStrategy::Inconsistent, - ) - } - /// Get a state with `latest_block_root == block_root` advanced through to at most `max_slot`. /// /// The `state_root` argument is used to look up the block's un-advanced state in case an @@ -697,35 +704,29 @@ impl, Cold: ItemStore> HotColdDB /// - `result_state_root == state.canonical_root()` /// - `state.slot() <= max_slot` /// - `state.get_latest_block_root(result_state_root) == block_root` - /// - /// Presently this is only used to avoid loading the un-advanced split state, but in future will - /// be expanded to return states from an in-memory cache. pub fn get_advanced_hot_state( &self, block_root: Hash256, max_slot: Slot, state_root: Hash256, ) -> Result)>, Error> { - self.get_advanced_hot_state_with_strategy( - block_root, - max_slot, - state_root, - StateProcessingStrategy::Accurate, - ) - } + if let Some(cached) = self.get_advanced_hot_state_from_cache(block_root, max_slot) { + return Ok(Some(cached)); + } - /// Same as `get_advanced_hot_state` but taking a `StateProcessingStrategy`. - pub fn get_advanced_hot_state_with_strategy( - &self, - block_root: Hash256, - max_slot: Slot, - state_root: Hash256, - state_processing_strategy: StateProcessingStrategy, - ) -> Result)>, Error> { // Hold a read lock on the split point so it can't move while we're trying to load the // state. let split = self.split.read_recursive(); + if state_root != split.state_root { + warn!( + self.log, + "State cache missed"; + "state_root" => ?state_root, + "block_root" => ?block_root, + ); + } + // Sanity check max-slot against the split slot. if max_slot < split.slot { return Err(HotColdDBError::FinalizedStateNotInHotDatabase { @@ -741,11 +742,40 @@ impl, Cold: ItemStore> HotColdDB } else { state_root }; - let state = self - .load_hot_state(&state_root, state_processing_strategy)? - .map(|state| (state_root, state)); + let mut opt_state = self + .load_hot_state(&state_root)? + .map(|(state, _block_root)| (state_root, state)); + + if let Some((state_root, state)) = opt_state.as_mut() { + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; + self.state_cache + .lock() + .put_state(*state_root, block_root, state)?; + debug!( + self.log, + "Cached state"; + "state_root" => ?state_root, + "slot" => state.slot(), + ); + } drop(split); - Ok(state) + Ok(opt_state) + } + + /// Same as `get_advanced_hot_state` but will return `None` if no compatible state is cached. + /// + /// If this function returns `Some(state)` then that `state` will always have + /// `latest_block_header` matching `block_root` but may not be advanced all the way through to + /// `max_slot`. + pub fn get_advanced_hot_state_from_cache( + &self, + block_root: Hash256, + max_slot: Slot, + ) -> Option<(Hash256, BeaconState)> { + self.state_cache + .lock() + .get_by_block_root(block_root, max_slot) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. @@ -755,17 +785,10 @@ impl, Cold: ItemStore> HotColdDB /// (which are frozen, and won't be deleted), or valid descendents of the finalized checkpoint /// (which will be deleted by this function but shouldn't be). pub fn delete_state(&self, state_root: &Hash256, slot: Slot) -> Result<(), Error> { - // Delete the state summary. - self.hot_db - .key_delete(DBColumn::BeaconStateSummary.into(), state_root.as_bytes())?; - - // Delete the full state if it lies on an epoch boundary. - if slot % E::slots_per_epoch() == 0 { - self.hot_db - .key_delete(DBColumn::BeaconState.into(), state_root.as_bytes())?; - } - - Ok(()) + self.do_atomically_with_block_and_blobs_cache(vec![StoreOp::DeleteState( + *state_root, + Some(slot), + )]) } pub fn forwards_block_roots_iterator( @@ -833,17 +856,9 @@ impl, Cold: ItemStore> HotColdDB }) = self.load_hot_state_summary(state_root)? { // NOTE: minor inefficiency here because we load an unnecessary hot state summary - // - // `StateProcessingStrategy` should be irrelevant here since we never replay blocks for an epoch - // boundary state in the hot DB. - let state = self - .load_hot_state( - &epoch_boundary_state_root, - StateProcessingStrategy::Accurate, - )? - .ok_or(HotColdDBError::MissingEpochBoundaryState( - epoch_boundary_state_root, - ))?; + let (state, _) = self.load_hot_state(&epoch_boundary_state_root)?.ok_or( + HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), + )?; Ok(Some(state)) } else { // Try the cold DB @@ -1029,11 +1044,14 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteBlock(block_root) => { guard.delete_block(&block_root); + self.state_cache.lock().delete_block_states(&block_root); } - StoreOp::DeleteBlobs(_) => (), + StoreOp::DeleteState(state_root, _) => { + self.state_cache.lock().delete_state(&state_root) + } - StoreOp::DeleteState(_, _) => (), + StoreOp::DeleteBlobs(_) => (), StoreOp::DeleteExecutionPayload(_) => (), @@ -1070,6 +1088,26 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { + // Put the state in the cache. + let block_root = state.get_latest_block_root(*state_root); + + // Avoid storing states in the database if they already exist in the state cache. + // The exception to this is the finalized state, which must exist in the cache before it + // is stored on disk. + if let PutStateOutcome::Duplicate = + self.state_cache + .lock() + .put_state(*state_root, block_root, state)? + { + debug!( + self.log, + "Skipping storage of cached state"; + "slot" => state.slot(), + "state_root" => ?state_root + ); + return Ok(()); + } + // On the epoch boundary, store the full state. if state.slot() % E::slots_per_epoch() == 0 { trace!( @@ -1091,14 +1129,51 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + /// Get a post-finalization state from the database or store. + pub fn get_hot_state(&self, state_root: &Hash256) -> Result>, Error> { + if let Some(state) = self.state_cache.lock().get_by_state_root(*state_root) { + return Ok(Some(state)); + } + + if *state_root != self.get_split_info().state_root { + // Do not warn on start up when loading the split state. + warn!( + self.log, + "State cache missed"; + "state_root" => ?state_root, + ); + } + + let state_from_disk = self.load_hot_state(state_root)?; + + if let Some((mut state, block_root)) = state_from_disk { + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; + self.state_cache + .lock() + .put_state(*state_root, block_root, &state)?; + debug!( + self.log, + "Cached state"; + "state_root" => ?state_root, + "slot" => state.slot(), + ); + Ok(Some(state)) + } else { + Ok(None) + } + } + /// Load a post-finalization state from the hot database. /// /// Will replay blocks from the nearest epoch boundary. + /// + /// Return the `(state, latest_block_root)` where `latest_block_root` is the root of the last + /// block applied to `state`. pub fn load_hot_state( &self, state_root: &Hash256, - state_processing_strategy: StateProcessingStrategy, - ) -> Result>, Error> { + ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); // If the state is marked as temporary, do not return it. It will become visible @@ -1113,16 +1188,47 @@ impl, Cold: ItemStore> HotColdDB epoch_boundary_state_root, }) = self.load_hot_state_summary(state_root)? { - let boundary_state = + let mut boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root, &self.spec)?.ok_or( HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), )?; + // Immediately rebase the state from disk on the finalized state so that we can reuse + // parts of the tree for state root calculation in `replay_blocks`. + self.state_cache + .lock() + .rebase_on_finalized(&mut boundary_state, &self.spec)?; + // Optimization to avoid even *thinking* about replaying blocks if we're already // on an epoch boundary. - let state = if slot % E::slots_per_epoch() == 0 { + let mut state = if slot % E::slots_per_epoch() == 0 { boundary_state } else { + // Cache ALL intermediate states that are reached during block replay. We may want + // to restrict this in future to only cache epoch boundary states. At worst we will + // cache up to 32 states for each state loaded, which should not flush out the cache + // entirely. + let state_cache_hook = |state_root, state: &mut BeaconState| { + // Ensure all caches are built before attempting to cache. + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; + + let latest_block_root = state.get_latest_block_root(state_root); + let state_slot = state.slot(); + if let PutStateOutcome::New = + self.state_cache + .lock() + .put_state(state_root, latest_block_root, state)? + { + debug!( + self.log, + "Cached ancestor state"; + "state_root" => ?state_root, + "slot" => state_slot, + ); + } + Ok(()) + }; let blocks = self.load_blocks_to_replay(boundary_state.slot(), slot, latest_block_root)?; self.replay_blocks( @@ -1130,11 +1236,12 @@ impl, Cold: ItemStore> HotColdDB blocks, slot, no_state_root_iter(), - state_processing_strategy, + Some(Box::new(state_cache_hook)), )? }; + state.apply_pending_mutations()?; - Ok(Some(state)) + Ok(Some((state, latest_block_root))) } else { Ok(None) } @@ -1233,7 +1340,9 @@ impl, Cold: ItemStore> HotColdDB partial_state.load_randao_mixes(&self.cold_db, &self.spec)?; partial_state.load_historical_summaries(&self.cold_db, &self.spec)?; - partial_state.try_into() + let mut state: BeaconState = partial_state.try_into()?; + state.apply_pending_mutations()?; + Ok(state) } /// Load a restore point state by its `restore_point_index`. @@ -1247,7 +1356,7 @@ impl, Cold: ItemStore> HotColdDB /// Load a frozen state that lies between restore points. fn load_cold_intermediate_state(&self, slot: Slot) -> Result, Error> { - if let Some(state) = self.state_cache.lock().get(&slot) { + if let Some(state) = self.historic_state_cache.lock().get(&slot) { return Ok(state.clone()); } @@ -1261,7 +1370,7 @@ impl, Cold: ItemStore> HotColdDB let mut low_state: Option> = None; // Try to get a more recent state from the cache to avoid massive blocks replay. - for (s, state) in self.state_cache.lock().iter() { + for (s, state) in self.historic_state_cache.lock().iter() { if s.as_u64() / self.config.slots_per_restore_point == low_restore_point_idx && *s < slot && low_slot < *s @@ -1299,16 +1408,11 @@ impl, Cold: ItemStore> HotColdDB &self.spec, )?; - let state = self.replay_blocks( - low_state, - blocks, - slot, - Some(state_root_iter), - StateProcessingStrategy::Accurate, - )?; + let mut state = self.replay_blocks(low_state, blocks, slot, Some(state_root_iter), None)?; + state.apply_pending_mutations()?; // If state is not error, put it in the cache. - self.state_cache.lock().put(slot, state.clone()); + self.historic_state_cache.lock().put(slot, state.clone()); Ok(state) } @@ -1390,16 +1494,15 @@ impl, Cold: ItemStore> HotColdDB /// /// Will skip slots as necessary. The returned state is not guaranteed /// to have any caches built, beyond those immediately required by block processing. - fn replay_blocks( + pub fn replay_blocks( &self, state: BeaconState, blocks: Vec>>, target_slot: Slot, state_root_iter: Option>>, - state_processing_strategy: StateProcessingStrategy, + pre_slot_hook: Option>, ) -> Result, Error> { let mut block_replayer = BlockReplayer::new(state, &self.spec) - .state_processing_strategy(state_processing_strategy) .no_signature_verification() .minimal_block_root_verification(); @@ -1408,17 +1511,20 @@ impl, Cold: ItemStore> HotColdDB block_replayer = block_replayer.state_root_iter(state_root_iter); } + if let Some(pre_slot_hook) = pre_slot_hook { + block_replayer = block_replayer.pre_slot_hook(pre_slot_hook); + } + block_replayer .apply_blocks(blocks, Some(target_slot)) .map(|block_replayer| { if have_state_root_iterator && block_replayer.state_root_miss() { warn!( self.log, - "State root iterator miss"; + "State root cache miss during block replay"; "slot" => target_slot, ); } - block_replayer.into_state() }) } @@ -2213,7 +2319,7 @@ impl, Cold: ItemStore> HotColdDB } /// This function fills in missing block roots between last restore point slot and split - /// slot, if any. + /// slot, if any. pub fn heal_freezer_block_roots_at_split(&self) -> Result<(), Error> { let split = self.get_split_info(); let last_restore_point_slot = (split.slot - 1) / self.config.slots_per_restore_point @@ -2528,15 +2634,22 @@ pub fn migrate_database, Cold: ItemStore>( }; store.hot_db.put_sync(&SPLIT_KEY, &split)?; - // Split point is now persisted in the hot database on disk. The in-memory split point - // hasn't been modified elsewhere since we keep a write lock on it. It's safe to update + // Split point is now persisted in the hot database on disk. The in-memory split point + // hasn't been modified elsewhere since we keep a write lock on it. It's safe to update // the in-memory split point now. *split_guard = split; } - // Delete the states from the hot database if we got this far. + // Delete the blocks and states from the hot database if we got this far. store.do_atomically_with_block_and_blobs_cache(hot_db_ops)?; + // Update the cache's view of the finalized state. + store.update_finalized_state( + finalized_state_root, + finalized_block_root, + finalized_state.clone(), + )?; + debug!( store.log, "Freezer migration complete"; diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index d08bf564927..f752bf39795 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -46,14 +46,14 @@ pub fn get_full_state, E: EthSpec>( #[derive(Encode)] pub struct StorageContainer { state: BeaconState, - committee_caches: Vec, + committee_caches: Vec>, } impl StorageContainer { /// Create a new instance for storing a `BeaconState`. pub fn new(state: &BeaconState) -> Self { Self { - state: state.clone_with(CloneConfig::none()), + state: state.clone(), committee_caches: state.committee_caches().to_vec(), } } diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index e459c1c3575..03090ca14c5 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -412,15 +412,16 @@ mod test { let mut hashes = (0..).map(Hash256::from_low_u64_be); let roots_a = state_a.block_roots_mut(); for i in 0..roots_a.len() { - roots_a[i] = hashes.next().unwrap() + *roots_a.get_mut(i).unwrap() = hashes.next().unwrap(); } let roots_b = state_b.block_roots_mut(); for i in 0..roots_b.len() { - roots_b[i] = hashes.next().unwrap() + *roots_b.get_mut(i).unwrap() = hashes.next().unwrap(); } let state_a_root = hashes.next().unwrap(); - state_b.state_roots_mut()[0] = state_a_root; + *state_b.state_roots_mut().get_mut(0).unwrap() = state_a_root; + state_a.apply_pending_mutations().unwrap(); store.put_state(&state_a_root, &state_a).unwrap(); let iter = BlockRootsIterator::new(&store, &state_b); @@ -472,6 +473,9 @@ mod test { let state_a_root = Hash256::from_low_u64_be(slots_per_historical_root as u64); let state_b_root = Hash256::from_low_u64_be(slots_per_historical_root as u64 * 2); + state_a.apply_pending_mutations().unwrap(); + state_b.apply_pending_mutations().unwrap(); + store.put_state(&state_a_root, &state_a).unwrap(); store.put_state(&state_b_root, &state_b).unwrap(); diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index c3136a910db..66032d89c52 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -26,6 +26,7 @@ pub mod metadata; pub mod metrics; mod partial_beacon_state; pub mod reconstruct; +pub mod state_cache; pub mod iter; diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 4e5a2b8e64b..25438fc7e0a 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -34,42 +34,42 @@ where pub latest_block_header: BeaconBlockHeader, #[ssz(skip_serializing, skip_deserializing)] - pub block_roots: Option>, + pub block_roots: Option>, #[ssz(skip_serializing, skip_deserializing)] - pub state_roots: Option>, + pub state_roots: Option>, #[ssz(skip_serializing, skip_deserializing)] - pub historical_roots: Option>, + pub historical_roots: Option>, // Ethereum 1.0 chain data pub eth1_data: Eth1Data, - pub eth1_data_votes: VariableList, + pub eth1_data_votes: List, pub eth1_deposit_index: u64, // Registry - pub validators: VariableList, - pub balances: VariableList, + pub validators: List, + pub balances: List, // Shuffling /// Randao value from the current slot, for patching into the per-epoch randao vector. pub latest_randao_value: Hash256, #[ssz(skip_serializing, skip_deserializing)] - pub randao_mixes: Option>, + pub randao_mixes: Option>, // Slashings - slashings: FixedVector, + slashings: Vector, // Attestations (genesis fork only) #[superstruct(only(Base))] - pub previous_epoch_attestations: VariableList, E::MaxPendingAttestations>, + pub previous_epoch_attestations: List, E::MaxPendingAttestations>, #[superstruct(only(Base))] - pub current_epoch_attestations: VariableList, E::MaxPendingAttestations>, + pub current_epoch_attestations: List, E::MaxPendingAttestations>, // Participation (Altair and later) #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub previous_epoch_participation: VariableList, + pub previous_epoch_participation: List, #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub current_epoch_participation: VariableList, + pub current_epoch_participation: List, // Finality pub justification_bits: BitVector, @@ -79,7 +79,7 @@ where // Inactivity #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub inactivity_scores: VariableList, + pub inactivity_scores: List, // Light-client sync committees #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] @@ -117,7 +117,7 @@ where #[ssz(skip_serializing, skip_deserializing)] #[superstruct(only(Capella, Deneb, Electra))] - pub historical_summaries: Option>, + pub historical_summaries: Option>, } /// Implement the conversion function from BeaconState -> PartialBeaconState. @@ -369,7 +369,9 @@ impl PartialBeaconState { // Patch the value for the current slot into the index for the current epoch let current_epoch = self.slot().epoch(E::slots_per_epoch()); let len = randao_mixes.len(); - randao_mixes[current_epoch.as_usize() % len] = *self.latest_randao_value(); + *randao_mixes + .get_mut(current_epoch.as_usize() % len) + .ok_or(Error::RandaoMixOutOfBounds)? = *self.latest_randao_value(); *self.randao_mixes_mut() = Some(randao_mixes) } @@ -422,7 +424,6 @@ macro_rules! impl_try_into_beacon_state { exit_cache: <_>::default(), slashings_cache: <_>::default(), epoch_cache: <_>::default(), - tree_hash_cache: <_>::default(), // Variant-specific fields $( diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 8fe13777ac4..8ef4886565c 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -5,7 +5,7 @@ use itertools::{process_results, Itertools}; use slog::info; use state_processing::{ per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext, - StateProcessingStrategy, VerifyBlockRoot, + VerifyBlockRoot, }; use std::sync::Arc; use types::{EthSpec, Hash256}; @@ -94,7 +94,6 @@ where &mut state, &block, BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &self.spec, diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs new file mode 100644 index 00000000000..5c1faa7f2fd --- /dev/null +++ b/beacon_node/store/src/state_cache.rs @@ -0,0 +1,303 @@ +use crate::Error; +use lru::LruCache; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::num::NonZeroUsize; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot}; + +/// Fraction of the LRU cache to leave intact during culling. +const CULL_EXEMPT_NUMERATOR: usize = 1; +const CULL_EXEMPT_DENOMINATOR: usize = 10; + +/// States that are less than or equal to this many epochs old *could* become finalized and will not +/// be culled from the cache. +const EPOCH_FINALIZATION_LIMIT: u64 = 4; + +#[derive(Debug)] +pub struct FinalizedState { + state_root: Hash256, + state: BeaconState, +} + +/// Map from block_root -> slot -> state_root. +#[derive(Debug, Default)] +pub struct BlockMap { + blocks: HashMap, +} + +/// Map from slot -> state_root. +#[derive(Debug, Default)] +pub struct SlotMap { + slots: BTreeMap, +} + +#[derive(Debug)] +pub struct StateCache { + finalized_state: Option>, + states: LruCache>, + block_map: BlockMap, + max_epoch: Epoch, +} + +#[derive(Debug)] +pub enum PutStateOutcome { + Finalized, + Duplicate, + New, +} + +#[allow(clippy::len_without_is_empty)] +impl StateCache { + pub fn new(capacity: NonZeroUsize) -> Self { + StateCache { + finalized_state: None, + states: LruCache::new(capacity), + block_map: BlockMap::default(), + max_epoch: Epoch::new(0), + } + } + + pub fn len(&self) -> usize { + self.states.len() + } + + pub fn capacity(&self) -> usize { + self.states.cap().get() + } + + pub fn update_finalized_state( + &mut self, + state_root: Hash256, + block_root: Hash256, + state: BeaconState, + ) -> Result<(), Error> { + if state.slot() % E::slots_per_epoch() != 0 { + return Err(Error::FinalizedStateUnaligned); + } + + if self + .finalized_state + .as_ref() + .map_or(false, |finalized_state| { + state.slot() < finalized_state.state.slot() + }) + { + return Err(Error::FinalizedStateDecreasingSlot); + } + + // Add to block map. + self.block_map.insert(block_root, state.slot(), state_root); + + // Prune block map. + let state_roots_to_prune = self.block_map.prune(state.slot()); + + // Delete states. + for state_root in state_roots_to_prune { + self.states.pop(&state_root); + } + + // Update finalized state. + self.finalized_state = Some(FinalizedState { state_root, state }); + Ok(()) + } + + /// Rebase the given state on the finalized state in order to reduce its memory consumption. + /// + /// This function should only be called on states that are likely not to already share tree + /// nodes with the finalized state, e.g. states loaded from disk. + /// + /// If the finalized state is not initialized this function is a no-op. + pub fn rebase_on_finalized( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), Error> { + if let Some(finalized_state) = &self.finalized_state { + state.rebase_on(&finalized_state.state, spec)?; + } + Ok(()) + } + + /// Return a status indicating whether the state already existed in the cache. + pub fn put_state( + &mut self, + state_root: Hash256, + block_root: Hash256, + state: &BeaconState, + ) -> Result { + if self + .finalized_state + .as_ref() + .map_or(false, |finalized_state| { + finalized_state.state_root == state_root + }) + { + return Ok(PutStateOutcome::Finalized); + } + + if self.states.peek(&state_root).is_some() { + return Ok(PutStateOutcome::Duplicate); + } + + // Refuse states with pending mutations: we want cached states to be as small as possible + // i.e. stored entirely as a binary merkle tree with no updates overlaid. + if state.has_pending_mutations() { + return Err(Error::StateForCacheHasPendingUpdates { + state_root, + slot: state.slot(), + }); + } + + // Update the cache's idea of the max epoch. + self.max_epoch = std::cmp::max(state.current_epoch(), self.max_epoch); + + // If the cache is full, use the custom cull routine to make room. + if let Some(over_capacity) = self.len().checked_sub(self.capacity()) { + self.cull(over_capacity + 1); + } + + // Insert the full state into the cache. + self.states.put(state_root, state.clone()); + + // Record the connection from block root and slot to this state. + let slot = state.slot(); + self.block_map.insert(block_root, slot, state_root); + + Ok(PutStateOutcome::New) + } + + pub fn get_by_state_root(&mut self, state_root: Hash256) -> Option> { + if let Some(ref finalized_state) = self.finalized_state { + if state_root == finalized_state.state_root { + return Some(finalized_state.state.clone()); + } + } + self.states.get(&state_root).cloned() + } + + pub fn get_by_block_root( + &mut self, + block_root: Hash256, + slot: Slot, + ) -> Option<(Hash256, BeaconState)> { + let slot_map = self.block_map.blocks.get(&block_root)?; + + // Find the state at `slot`, or failing that the most recent ancestor. + let state_root = slot_map + .slots + .iter() + .rev() + .find_map(|(ancestor_slot, state_root)| { + (*ancestor_slot <= slot).then_some(*state_root) + })?; + + let state = self.get_by_state_root(state_root)?; + Some((state_root, state)) + } + + pub fn delete_state(&mut self, state_root: &Hash256) { + self.states.pop(state_root); + self.block_map.delete(state_root); + } + + pub fn delete_block_states(&mut self, block_root: &Hash256) { + if let Some(slot_map) = self.block_map.delete_block_states(block_root) { + for state_root in slot_map.slots.values() { + self.states.pop(state_root); + } + } + } + + /// Cull approximately `count` states from the cache. + /// + /// States are culled LRU, with the following extra order imposed: + /// + /// - Advanced states. + /// - Mid-epoch unadvanced states. + /// - Epoch-boundary states that are too old to be finalized. + /// - Epoch-boundary states that could be finalized. + pub fn cull(&mut self, count: usize) { + let cull_exempt = std::cmp::max( + 1, + self.len() * CULL_EXEMPT_NUMERATOR / CULL_EXEMPT_DENOMINATOR, + ); + + // Stage 1: gather states to cull. + let mut advanced_state_roots = vec![]; + let mut mid_epoch_state_roots = vec![]; + let mut old_boundary_state_roots = vec![]; + let mut good_boundary_state_roots = vec![]; + for (&state_root, state) in self.states.iter().skip(cull_exempt) { + let is_advanced = state.slot() > state.latest_block_header().slot; + let is_boundary = state.slot() % E::slots_per_epoch() == 0; + let could_finalize = + (self.max_epoch - state.current_epoch()) <= EPOCH_FINALIZATION_LIMIT; + + if is_boundary { + if could_finalize { + good_boundary_state_roots.push(state_root); + } else { + old_boundary_state_roots.push(state_root); + } + } else if is_advanced { + advanced_state_roots.push(state_root); + } else { + mid_epoch_state_roots.push(state_root); + } + + // Terminate early in the common case where we've already found enough junk to cull. + if advanced_state_roots.len() == count { + break; + } + } + + // Stage 2: delete. + // This could probably be more efficient in how it interacts with the block map. + for state_root in advanced_state_roots + .iter() + .chain(mid_epoch_state_roots.iter()) + .chain(old_boundary_state_roots.iter()) + .chain(good_boundary_state_roots.iter()) + .take(count) + { + self.delete_state(state_root); + } + } +} + +impl BlockMap { + fn insert(&mut self, block_root: Hash256, slot: Slot, state_root: Hash256) { + let slot_map = self.blocks.entry(block_root).or_default(); + slot_map.slots.insert(slot, state_root); + } + + fn prune(&mut self, finalized_slot: Slot) -> HashSet { + let mut pruned_states = HashSet::new(); + + self.blocks.retain(|_, slot_map| { + slot_map.slots.retain(|slot, state_root| { + let keep = *slot >= finalized_slot; + if !keep { + pruned_states.insert(*state_root); + } + keep + }); + + !slot_map.slots.is_empty() + }); + + pruned_states + } + + fn delete(&mut self, state_root_to_delete: &Hash256) { + self.blocks.retain(|_, slot_map| { + slot_map + .slots + .retain(|_, state_root| state_root != state_root_to_delete); + !slot_map.slots.is_empty() + }); + } + + fn delete_block_states(&mut self, block_root: &Hash256) -> Option { + self.blocks.remove(block_root) + } +} diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index b3d58fa5ea8..cc9a2c5097b 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -12,3 +12,4 @@ futures = { workspace = true } lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } sloggers = { workspace = true } +logging = { workspace = true } diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs index 6e372d97575..ec8f45d850e 100644 --- a/common/task_executor/src/test_utils.rs +++ b/common/task_executor/src/test_utils.rs @@ -1,4 +1,5 @@ use crate::TaskExecutor; +use logging::test_logger; use slog::Logger; use sloggers::{null::NullLoggerBuilder, Build}; use std::sync::Arc; @@ -26,7 +27,7 @@ impl Default for TestRuntime { fn default() -> Self { let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let log = null_logger().unwrap(); + let log = test_logger(); let (runtime, handle) = if let Ok(handle) = runtime::Handle::try_current() { (None, handle) diff --git a/consensus/state_processing/src/all_caches.rs b/consensus/state_processing/src/all_caches.rs index 106692c63aa..b915091405b 100644 --- a/consensus/state_processing/src/all_caches.rs +++ b/consensus/state_processing/src/all_caches.rs @@ -9,12 +9,14 @@ use types::{BeaconState, ChainSpec, EpochCacheError, EthSpec, Hash256, RelativeE pub trait AllCaches { /// Build all caches. /// - /// Note that this excludes the tree-hash cache. That needs to be managed separately. + /// Note that this excludes milhouse's intrinsic tree-hash cache. That needs to be managed + /// separately. fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), EpochCacheError>; /// Return true if all caches are built. /// - /// Note that this excludes the tree-hash cache. That needs to be managed separately. + /// Note that this excludes milhouse's intrinsic tree-hash cache. That needs to be managed + /// separately. fn all_caches_built(&self) -> bool; } diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index f502d7f692c..d7621ebf18b 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -6,19 +6,23 @@ use crate::{ use itertools::Itertools; use std::iter::Peekable; use std::marker::PhantomData; -use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + BeaconState, BeaconStateError, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, + Slot, +}; -type PreBlockHook<'a, E, Error> = Box< +pub type PreBlockHook<'a, E, Error> = Box< dyn FnMut(&mut BeaconState, &SignedBeaconBlock>) -> Result<(), Error> + 'a, >; -type PostBlockHook<'a, E, Error> = PreBlockHook<'a, E, Error>; -type PreSlotHook<'a, E, Error> = Box) -> Result<(), Error> + 'a>; -type PostSlotHook<'a, E, Error> = Box< +pub type PostBlockHook<'a, E, Error> = PreBlockHook<'a, E, Error>; +pub type PreSlotHook<'a, E, Error> = + Box) -> Result<(), Error> + 'a>; +pub type PostSlotHook<'a, E, Error> = Box< dyn FnMut(&mut BeaconState, Option>, bool) -> Result<(), Error> + 'a, >; -type StateRootIterDefault = std::iter::Empty>; +pub type StateRootIterDefault = std::iter::Empty>; /// Efficiently apply blocks to a state while configuring various parameters. /// @@ -31,7 +35,6 @@ pub struct BlockReplayer< > { state: BeaconState, spec: &'a ChainSpec, - state_processing_strategy: StateProcessingStrategy, block_sig_strategy: BlockSignatureStrategy, verify_block_root: Option, pre_block_hook: Option>, @@ -45,9 +48,9 @@ pub struct BlockReplayer< #[derive(Debug)] pub enum BlockReplayError { - NoBlocks, SlotProcessing(SlotProcessingError), BlockProcessing(BlockProcessingError), + BeaconState(BeaconStateError), } impl From for BlockReplayError { @@ -62,14 +65,10 @@ impl From for BlockReplayError { } } -/// Defines how state roots should be computed and whether to perform all state transitions during block replay. -#[derive(PartialEq, Clone, Copy)] -pub enum StateProcessingStrategy { - /// Perform all transitions faithfully to the specification. - Accurate, - /// Don't compute state roots and process withdrawals, eventually computing an invalid beacon - /// state that can only be used for obtaining shuffling. - Inconsistent, +impl From for BlockReplayError { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } } impl<'a, E, Error, StateRootIter> BlockReplayer<'a, E, Error, StateRootIter> @@ -89,7 +88,6 @@ where Self { state, spec, - state_processing_strategy: StateProcessingStrategy::Accurate, block_sig_strategy: BlockSignatureStrategy::VerifyBulk, verify_block_root: Some(VerifyBlockRoot::True), pre_block_hook: None, @@ -102,18 +100,6 @@ where } } - /// Set the replayer's state processing strategy different from the default. - pub fn state_processing_strategy( - mut self, - state_processing_strategy: StateProcessingStrategy, - ) -> Self { - if state_processing_strategy == StateProcessingStrategy::Inconsistent { - self.verify_block_root = None; - } - self.state_processing_strategy = state_processing_strategy; - self - } - /// Set the replayer's block signature verification strategy. pub fn block_signature_strategy(mut self, block_sig_strategy: BlockSignatureStrategy) -> Self { self.block_sig_strategy = block_sig_strategy; @@ -175,21 +161,24 @@ where self } - /// Compute the state root for `slot` as efficiently as possible. + /// Compute the state root for `self.state` as efficiently as possible. + /// + /// This function MUST only be called when `self.state` is a post-state, i.e. it MUST not be + /// called between advancing a state with `per_slot_processing` and applying the block for that + /// slot. /// /// The `blocks` should be the full list of blocks being applied and `i` should be the index of /// the next block that will be applied, or `blocks.len()` if all blocks have already been /// applied. + /// + /// If the state root is not available from the state root iterator or the blocks then it will + /// be computed from `self.state` and a state root iterator miss will be recorded. fn get_state_root( &mut self, - slot: Slot, blocks: &[SignedBeaconBlock>], i: usize, - ) -> Result, Error> { - // If we don't care about state roots then return immediately. - if self.state_processing_strategy == StateProcessingStrategy::Inconsistent { - return Ok(Some(Hash256::zero())); - } + ) -> Result { + let slot = self.state.slot(); // If a state root iterator is configured, use it to find the root. if let Some(ref mut state_root_iter) = self.state_root_iter { @@ -199,7 +188,7 @@ where .transpose()?; if let Some((root, _)) = opt_root { - return Ok(Some(root)); + return Ok(root); } } @@ -207,13 +196,17 @@ where if let Some(prev_i) = i.checked_sub(1) { if let Some(prev_block) = blocks.get(prev_i) { if prev_block.slot() == slot { - return Ok(Some(prev_block.state_root())); + return Ok(prev_block.state_root()); } } } self.state_root_miss = true; - Ok(None) + let state_root = self + .state + .update_tree_hash_cache() + .map_err(BlockReplayError::from)?; + Ok(state_root) } /// Apply `blocks` atop `self.state`, taking care of slot processing. @@ -232,12 +225,13 @@ where } while self.state.slot() < block.slot() { + let state_root = self.get_state_root(&blocks, i)?; + if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { - pre_slot_hook(&mut self.state)?; + pre_slot_hook(state_root, &mut self.state)?; } - let state_root = self.get_state_root(self.state.slot(), &blocks, i)?; - let summary = per_slot_processing(&mut self.state, state_root, self.spec) + let summary = per_slot_processing(&mut self.state, Some(state_root), self.spec) .map_err(BlockReplayError::from)?; if let Some(ref mut post_slot_hook) = self.post_slot_hook { @@ -250,15 +244,11 @@ where pre_block_hook(&mut self.state, block)?; } - let verify_block_root = self.verify_block_root.unwrap_or_else(|| { - // If no explicit policy is set, verify only the first 1 or 2 block roots if using - // accurate state roots. Inaccurate state roots require block root verification to - // be off. - if i <= 1 && self.state_processing_strategy == StateProcessingStrategy::Accurate { - VerifyBlockRoot::True - } else { - VerifyBlockRoot::False - } + // If no explicit policy is set, verify only the first 1 or 2 block roots. + let verify_block_root = self.verify_block_root.unwrap_or(if i <= 1 { + VerifyBlockRoot::True + } else { + VerifyBlockRoot::False }); // Proposer index was already checked when this block was originally processed, we // can omit recomputing it during replay. @@ -268,7 +258,6 @@ where &mut self.state, block, self.block_sig_strategy, - self.state_processing_strategy, verify_block_root, &mut ctxt, self.spec, @@ -282,12 +271,13 @@ where if let Some(target_slot) = target_slot { while self.state.slot() < target_slot { + let state_root = self.get_state_root(&blocks, blocks.len())?; + if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { - pre_slot_hook(&mut self.state)?; + pre_slot_hook(state_root, &mut self.state)?; } - let state_root = self.get_state_root(self.state.slot(), &blocks, blocks.len())?; - let summary = per_slot_processing(&mut self.state, state_root, self.spec) + let summary = per_slot_processing(&mut self.state, Some(state_root), self.spec) .map_err(BlockReplayError::from)?; if let Some(ref mut post_slot_hook) = self.post_slot_hook { diff --git a/consensus/state_processing/src/common/initiate_validator_exit.rs b/consensus/state_processing/src/common/initiate_validator_exit.rs index 84656d9c890..a40a9dfd398 100644 --- a/consensus/state_processing/src/common/initiate_validator_exit.rs +++ b/consensus/state_processing/src/common/initiate_validator_exit.rs @@ -30,13 +30,14 @@ pub fn initiate_validator_exit( exit_queue_epoch.safe_add_assign(1)?; } - let validator = state.get_validator_mut(index)?; + let validator = state.get_validator_cow(index)?; // Return if the validator already initiated exit if validator.exit_epoch != spec.far_future_epoch { return Ok(()); } + let validator = validator.into_mut()?; validator.exit_epoch = exit_queue_epoch; validator.withdrawable_epoch = exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 036ab23498c..5ab9a99a3b1 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -28,7 +28,7 @@ pub fn initialize_beacon_state_from_eth1( let mut state = BeaconState::new(genesis_time, eth1_data, spec); // Seed RANDAO with Eth1 entropy - state.fill_randao_mixes_with(eth1_block_hash); + state.fill_randao_mixes_with(eth1_block_hash)?; let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); @@ -152,7 +152,9 @@ pub fn process_activations( spec: &ChainSpec, ) -> Result<(), Error> { let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut(); - for (index, validator) in validators.iter_mut().enumerate() { + let mut validators_iter = validators.iter_cow(); + while let Some((index, validator)) = validators_iter.next_cow() { + let validator = validator.into_mut()?; let balance = balances .get(index) .copied() diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 7d84c426e8c..74f9d84bb11 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -30,7 +30,7 @@ pub mod upgrade; pub mod verify_operation; pub use all_caches::AllCaches; -pub use block_replayer::{BlockReplayError, BlockReplayer, StateProcessingStrategy}; +pub use block_replayer::{BlockReplayError, BlockReplayer}; pub use consensus_context::{ConsensusContext, ContextError}; pub use genesis::{ eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index b370ec6216b..a8447b7714a 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -40,7 +40,6 @@ mod verify_exit; mod verify_proposer_slashing; use crate::common::decrease_balance; -use crate::StateProcessingStrategy; use crate::common::update_progressive_balances_cache::{ initialize_progressive_balances_cache, update_progressive_balances_metrics, @@ -102,7 +101,6 @@ pub fn per_block_processing>( state: &mut BeaconState, signed_block: &SignedBeaconBlock, block_signature_strategy: BlockSignatureStrategy, - state_processing_strategy: StateProcessingStrategy, verify_block_root: VerifyBlockRoot, ctxt: &mut ConsensusContext, spec: &ChainSpec, @@ -172,9 +170,7 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let body = block.body(); - if state_processing_strategy == StateProcessingStrategy::Accurate { - process_withdrawals::(state, body.execution_payload()?, spec)?; - } + process_withdrawals::(state, body.execution_payload()?, spec)?; process_execution_payload::(state, body, spec)?; } diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 28d36dbc518..336895514f9 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -82,6 +82,7 @@ pub enum BlockProcessingError { }, ExecutionInvalid, ConsensusContext(ContextError), + MilhouseError(milhouse::Error), EpochCacheError(EpochCacheError), WithdrawalsRootMismatch { expected: Hash256, @@ -138,6 +139,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } +} + impl From> for BlockProcessingError { fn from(e: BlockOperationError) -> BlockProcessingError { match e { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 63b7c9e01fb..d812306a0f7 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -128,7 +128,7 @@ pub mod altair_deneb { let previous_epoch = ctxt.previous_epoch; let current_epoch = ctxt.current_epoch; - let attesting_indices = &verify_attestation_for_block_inclusion( + let attesting_indices = verify_attestation_for_block_inclusion( state, attestation, ctxt, @@ -136,7 +136,8 @@ pub mod altair_deneb { spec, ) .map_err(|e| e.into_with_index(att_index))? - .attesting_indices; + .attesting_indices + .clone(); // Matching roots, participation flag indices let data = &attestation.data; @@ -146,7 +147,7 @@ pub mod altair_deneb { // Update epoch participation flags. let mut proposer_reward_numerator = 0; - for index in attesting_indices { + for index in &attesting_indices { let index = *index as usize; let validator_effective_balance = state.epoch_cache().get_effective_balance(index)?; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 2a2b67e30da..f0055fa80dd 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -5,7 +5,7 @@ use crate::per_block_processing::errors::{ DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, ProposerSlashingInvalid, }; -use crate::{per_block_processing, BlockReplayError, BlockReplayer, StateProcessingStrategy}; +use crate::{per_block_processing, BlockReplayError, BlockReplayer}; use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, @@ -72,7 +72,6 @@ async fn valid_block_ok() { &mut state, &block, BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -98,7 +97,6 @@ async fn invalid_block_header_state_slot() { &mut state, &SignedBeaconBlock::from_block(block, signature), BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -131,7 +129,6 @@ async fn invalid_parent_block_root() { &mut state, &SignedBeaconBlock::from_block(block, signature), BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -165,7 +162,6 @@ async fn invalid_block_signature() { &mut state, &SignedBeaconBlock::from_block(block, Signature::empty()), BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -199,7 +195,6 @@ async fn invalid_randao_reveal_signature() { &mut state, &signed_block, BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs index dd1b2dfcd86..fc55fb11144 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs @@ -2,17 +2,14 @@ use crate::EpochProcessingError; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; use types::participation_flags::ParticipationFlags; -use types::VariableList; +use types::List; pub fn process_participation_flag_updates( state: &mut BeaconState, ) -> Result<(), EpochProcessingError> { *state.previous_epoch_participation_mut()? = std::mem::take(state.current_epoch_participation_mut()?); - *state.current_epoch_participation_mut()? = VariableList::new(vec![ - ParticipationFlags::default( - ); - state.validators().len() - ])?; + *state.current_epoch_participation_mut()? = + List::repeat(ParticipationFlags::default(), state.validators().len())?; Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs index 7490f276567..00adabdcfe9 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs @@ -13,6 +13,9 @@ pub fn process_historical_summaries_update( .safe_rem((E::slots_per_historical_root() as u64).safe_div(E::slots_per_epoch())?)? == 0 { + // We need to flush any pending mutations before hashing. + state.block_roots_mut().apply_updates()?; + state.state_roots_mut().apply_updates()?; let summary = HistoricalSummary::new(state); return state .historical_summaries_mut()? diff --git a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs index 7bd62c40816..73881e932b7 100644 --- a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs @@ -21,7 +21,9 @@ pub fn process_effective_balance_updates( let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut(); - for (index, validator) in validators.iter_mut().enumerate() { + let mut validators_iter = validators.iter_cow(); + + while let Some((index, validator)) = validators_iter.next_cow() { let balance = balances .get(index) .copied() @@ -44,7 +46,7 @@ pub fn process_effective_balance_updates( } if new_effective_balance != validator.effective_balance { - validator.effective_balance = new_effective_balance; + validator.into_mut()?.effective_balance = new_effective_balance; } } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 65a946e7bff..6f48050e161 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -3,8 +3,8 @@ use crate::metrics; use std::sync::Arc; use types::{ consts::altair::{TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX}, - BeaconStateError, Epoch, EthSpec, ParticipationFlags, ProgressiveBalancesCache, SyncCommittee, - Validator, VariableList, + BeaconStateError, Epoch, EthSpec, List, ParticipationFlags, ProgressiveBalancesCache, + SyncCommittee, Validator, }; /// Provides a summary of validator participation during the epoch. @@ -25,20 +25,20 @@ pub enum EpochProcessingSummary { #[derive(PartialEq, Debug)] pub struct ParticipationEpochSummary { /// Copy of the validator registry prior to mutation. - validators: VariableList, + validators: List, /// Copy of the participation flags for the previous epoch. - previous_epoch_participation: VariableList, + previous_epoch_participation: List, /// Copy of the participation flags for the current epoch. - current_epoch_participation: VariableList, + current_epoch_participation: List, previous_epoch: Epoch, current_epoch: Epoch, } impl ParticipationEpochSummary { pub fn new( - validators: VariableList, - previous_epoch_participation: VariableList, - current_epoch_participation: VariableList, + validators: List, + previous_epoch_participation: List, + current_epoch_participation: List, previous_epoch: Epoch, current_epoch: Epoch, ) -> Self { diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index c18e1303b26..de481ec6767 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -1,4 +1,4 @@ -use types::{BeaconStateError, EpochCacheError, InconsistentFork}; +use types::{milhouse, BeaconStateError, EpochCacheError, InconsistentFork}; #[derive(Debug, PartialEq)] pub enum EpochProcessingError { @@ -23,6 +23,7 @@ pub enum EpochProcessingError { InconsistentStateFork(InconsistentFork), InvalidJustificationBit(ssz_types::Error), InvalidFlagIndex(usize), + MilhouseError(milhouse::Error), EpochCache(EpochCacheError), } @@ -50,6 +51,12 @@ impl From for EpochProcessingError { } } +impl From for EpochProcessingError { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } +} + impl From for EpochProcessingError { fn from(e: EpochCacheError) -> Self { EpochProcessingError::EpochCache(e) diff --git a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs index 6d06b4d7ca5..7686932192f 100644 --- a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs +++ b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs @@ -14,7 +14,7 @@ pub fn process_historical_roots_update( .safe_rem(E::SlotsPerHistoricalRoot::to_u64().safe_div(E::slots_per_epoch())?)? == 0 { - let historical_batch = state.historical_batch(); + let historical_batch = state.historical_batch()?; state .historical_roots_mut() .push(historical_batch.tree_hash_root())?; diff --git a/consensus/state_processing/src/per_epoch_processing/resets.rs b/consensus/state_processing/src/per_epoch_processing/resets.rs index d577c52e6a5..c9f69c3c95e 100644 --- a/consensus/state_processing/src/per_epoch_processing/resets.rs +++ b/consensus/state_processing/src/per_epoch_processing/resets.rs @@ -2,7 +2,7 @@ use super::errors::EpochProcessingError; use safe_arith::SafeArith; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; -use types::{Unsigned, VariableList}; +use types::{List, Unsigned}; pub fn process_eth1_data_reset( state: &mut BeaconState, @@ -13,7 +13,7 @@ pub fn process_eth1_data_reset( .safe_rem(E::SlotsPerEth1VotingPeriod::to_u64())? == 0 { - *state.eth1_data_votes_mut() = VariableList::empty(); + *state.eth1_data_votes_mut() = List::empty(); } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 380484046c3..7a95de3317e 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -12,6 +12,7 @@ use types::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, + milhouse::Cow, ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ExitCache, ForkName, ParticipationFlags, ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, }; @@ -173,9 +174,9 @@ pub fn process_epoch_single_pass( let effective_balances_ctxt = &EffectiveBalancesContext::new(spec)?; // Iterate over the validators and related fields in one pass. - let mut validators_iter = validators.iter_mut(); - let mut balances_iter = balances.iter_mut(); - let mut inactivity_scores_iter = inactivity_scores.iter_mut(); + let mut validators_iter = validators.iter_cow(); + let mut balances_iter = balances.iter_cow(); + let mut inactivity_scores_iter = inactivity_scores.iter_cow(); // Values computed for the next epoch transition. let mut next_epoch_total_active_balance = 0; @@ -186,14 +187,14 @@ pub fn process_epoch_single_pass( previous_epoch_participation.iter(), current_epoch_participation.iter(), ) { - let validator = validators_iter - .next() + let (_, mut validator) = validators_iter + .next_cow() .ok_or(BeaconStateError::UnknownValidator(index))?; - let balance = balances_iter - .next() + let (_, mut balance) = balances_iter + .next_cow() .ok_or(BeaconStateError::UnknownValidator(index))?; - let inactivity_score = inactivity_scores_iter - .next() + let (_, mut inactivity_score) = inactivity_scores_iter + .next_cow() .ok_or(BeaconStateError::UnknownValidator(index))?; let is_active_current_epoch = validator.is_active_at(current_epoch); @@ -223,7 +224,7 @@ pub fn process_epoch_single_pass( // `process_inactivity_updates` if conf.inactivity_updates { process_single_inactivity_update( - inactivity_score, + &mut inactivity_score, validator_info, state_ctxt, spec, @@ -233,8 +234,8 @@ pub fn process_epoch_single_pass( // `process_rewards_and_penalties` if conf.rewards_and_penalties { process_single_reward_and_penalty( - balance, - inactivity_score, + &mut balance, + &inactivity_score, validator_info, rewards_ctxt, state_ctxt, @@ -246,7 +247,7 @@ pub fn process_epoch_single_pass( // `process_registry_updates` if conf.registry_updates { process_single_registry_update( - validator, + &mut validator, validator_info, exit_cache, activation_queue, @@ -258,14 +259,14 @@ pub fn process_epoch_single_pass( // `process_slashings` if conf.slashings { - process_single_slashing(balance, validator, slashings_ctxt, state_ctxt, spec)?; + process_single_slashing(&mut balance, &validator, slashings_ctxt, state_ctxt, spec)?; } // `process_effective_balance_updates` if conf.effective_balance_updates { process_single_effective_balance_update( *balance, - validator, + &mut validator, validator_info, &mut next_epoch_total_active_balance, &mut next_epoch_cache, @@ -290,7 +291,7 @@ pub fn process_epoch_single_pass( } fn process_single_inactivity_update( - inactivity_score: &mut u64, + inactivity_score: &mut Cow, validator_info: &ValidatorInfo, state_ctxt: &StateContext, spec: &ChainSpec, @@ -303,25 +304,27 @@ fn process_single_inactivity_update( if validator_info.is_unslashed_participating_index(TIMELY_TARGET_FLAG_INDEX)? { // Avoid mutating when the inactivity score is 0 and can't go any lower -- the common // case. - if *inactivity_score == 0 { + if **inactivity_score == 0 { return Ok(()); } - inactivity_score.safe_sub_assign(1)?; + inactivity_score.make_mut()?.safe_sub_assign(1)?; } else { - inactivity_score.safe_add_assign(spec.inactivity_score_bias)?; + inactivity_score + .make_mut()? + .safe_add_assign(spec.inactivity_score_bias)?; } // Decrease the score of all validators for forgiveness when not during a leak if !state_ctxt.is_in_inactivity_leak { - let deduction = min(spec.inactivity_score_recovery_rate, *inactivity_score); - inactivity_score.safe_sub_assign(deduction)?; + let deduction = min(spec.inactivity_score_recovery_rate, **inactivity_score); + inactivity_score.make_mut()?.safe_sub_assign(deduction)?; } Ok(()) } fn process_single_reward_and_penalty( - balance: &mut u64, + balance: &mut Cow, inactivity_score: &u64, validator_info: &ValidatorInfo, rewards_ctxt: &RewardsAndPenaltiesContext, @@ -351,6 +354,7 @@ fn process_single_reward_and_penalty( )?; if delta.rewards != 0 || delta.penalties != 0 { + let balance = balance.make_mut()?; balance.safe_add_assign(delta.rewards)?; *balance = balance.saturating_sub(delta.penalties); } @@ -452,7 +456,7 @@ impl RewardsAndPenaltiesContext { } fn process_single_registry_update( - validator: &mut Validator, + validator: &mut Cow, validator_info: &ValidatorInfo, exit_cache: &mut ExitCache, activation_queue: &BTreeSet, @@ -463,7 +467,7 @@ fn process_single_registry_update( let current_epoch = state_ctxt.current_epoch; if validator.is_eligible_for_activation_queue(spec) { - validator.activation_eligibility_epoch = current_epoch.safe_add(1)?; + validator.make_mut()?.activation_eligibility_epoch = current_epoch.safe_add(1)?; } if validator.is_active_at(current_epoch) && validator.effective_balance <= spec.ejection_balance @@ -472,7 +476,8 @@ fn process_single_registry_update( } if activation_queue.contains(&validator_info.index) { - validator.activation_epoch = spec.compute_activation_exit_epoch(current_epoch)?; + validator.make_mut()?.activation_epoch = + spec.compute_activation_exit_epoch(current_epoch)?; } // Caching: add to speculative activation queue for next epoch. @@ -487,7 +492,7 @@ fn process_single_registry_update( } fn initiate_validator_exit( - validator: &mut Validator, + validator: &mut Cow, exit_cache: &mut ExitCache, state_ctxt: &StateContext, spec: &ChainSpec, @@ -508,6 +513,7 @@ fn initiate_validator_exit( exit_queue_epoch.safe_add_assign(1)?; } + let validator = validator.make_mut()?; validator.exit_epoch = exit_queue_epoch; validator.withdrawable_epoch = exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; @@ -540,7 +546,7 @@ impl SlashingsContext { } fn process_single_slashing( - balance: &mut u64, + balance: &mut Cow, validator: &Validator, slashings_ctxt: &SlashingsContext, state_ctxt: &StateContext, @@ -557,7 +563,7 @@ fn process_single_slashing( .safe_div(state_ctxt.total_active_balance)? .safe_mul(increment)?; - *balance = balance.saturating_sub(penalty); + *balance.make_mut()? = balance.saturating_sub(penalty); } Ok(()) } @@ -581,7 +587,7 @@ impl EffectiveBalancesContext { #[allow(clippy::too_many_arguments)] fn process_single_effective_balance_update( balance: u64, - validator: &mut Validator, + validator: &mut Cow, validator_info: &ValidatorInfo, next_epoch_total_active_balance: &mut u64, next_epoch_cache: &mut PreEpochCache, @@ -611,7 +617,7 @@ fn process_single_effective_balance_update( } if new_effective_balance != old_effective_balance { - validator.effective_balance = new_effective_balance; + validator.make_mut()?.effective_balance = new_effective_balance; // Update progressive balances cache for the *current* epoch, which will soon become the // previous epoch once the epoch transition completes. diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index cfbc6eba9e9..872560db3df 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -4,13 +4,13 @@ use std::mem; use std::sync::Arc; use types::{ BeaconState, BeaconStateAltair, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, VariableList, + Fork, List, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, }; /// Translate the participation information from the epoch prior to the fork into Altair's format. pub fn translate_participation( state: &mut BeaconState, - pending_attestations: &VariableList, E::MaxPendingAttestations>, + pending_attestations: &List, E::MaxPendingAttestations>, spec: &ChainSpec, ) -> Result<(), Error> { // Previous epoch committee cache is required for `get_attesting_indices`. @@ -51,8 +51,8 @@ pub fn upgrade_to_altair( let pre = pre_state.as_base_mut()?; let default_epoch_participation = - VariableList::new(vec![ParticipationFlags::default(); pre.validators.len()])?; - let inactivity_scores = VariableList::new(vec![0; pre.validators.len()])?; + List::new(vec![ParticipationFlags::default(); pre.validators.len()])?; + let inactivity_scores = List::new(vec![0; pre.validators.len()])?; let temp_sync_committee = Arc::new(SyncCommittee::temporary()); @@ -108,7 +108,6 @@ pub fn upgrade_to_altair( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); // Fill in previous epoch participation from the pre state's pending attestations. diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index 87b40abebdd..51e29d10f3c 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -1,7 +1,7 @@ use std::mem; use types::{ BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, VariableList, + Fork, List, }; /// Transform a `Merge` state into an `Capella` state. @@ -61,7 +61,7 @@ pub fn upgrade_to_capella( // Capella next_withdrawal_index: 0, next_withdrawal_validator_index: 0, - historical_summaries: VariableList::default(), + historical_summaries: List::default(), // Caches total_active_balance: pre.total_active_balance, progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), @@ -70,7 +70,6 @@ pub fn upgrade_to_capella( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); *pre_state = post; diff --git a/consensus/state_processing/src/upgrade/deneb.rs b/consensus/state_processing/src/upgrade/deneb.rs index 43fe5d9dc3d..c21e1361a5a 100644 --- a/consensus/state_processing/src/upgrade/deneb.rs +++ b/consensus/state_processing/src/upgrade/deneb.rs @@ -71,7 +71,6 @@ pub fn upgrade_to_deneb( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); *pre_state = post; diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index a37d0fc3beb..f64228f050b 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -70,7 +70,6 @@ pub fn upgrade_to_electra( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); *pre_state = post; diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index 147c97ac29e..02705743ceb 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -66,7 +66,6 @@ pub fn upgrade_to_bellatrix( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); *pre_state = post; diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index db15f53537e..4b7d9f2b98d 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -52,6 +52,8 @@ serde_json = { workspace = true } smallvec = { workspace = true } maplit = { workspace = true } strum = { workspace = true } +milhouse = { workspace = true } +rpds = { workspace = true } [dev-dependencies] criterion = { workspace = true } @@ -68,4 +70,4 @@ sqlite = [] # The `arbitrary-fuzz` feature is a no-op provided for backwards compatibility. # For simplicity `Arbitrary` is now derived regardless of the feature's presence. arbitrary-fuzz = [] -portable = ["bls/supranational-portable"] \ No newline at end of file +portable = ["bls/supranational-portable"] diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index bb2b527109f..5c1036a4c5a 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -2,6 +2,7 @@ use criterion::Criterion; use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use milhouse::List; use rayon::prelude::*; use ssz::Encode; use std::sync::Arc; @@ -27,21 +28,23 @@ fn get_state(validator_count: usize) -> BeaconState { .expect("should add balance"); } - *state.validators_mut() = (0..validator_count) - .collect::>() - .par_iter() - .map(|&i| Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: spec.max_effective_balance, - slashed: false, - activation_eligibility_epoch: Epoch::new(0), - activation_epoch: Epoch::new(0), - exit_epoch: Epoch::from(u64::max_value()), - withdrawable_epoch: Epoch::from(u64::max_value()), - }) - .collect::>() - .into(); + *state.validators_mut() = List::new( + (0..validator_count) + .collect::>() + .par_iter() + .map(|&i| Validator { + pubkey: generate_deterministic_keypair(i).pk.compress(), + withdrawal_credentials: Hash256::from_low_u64_le(i as u64), + effective_balance: spec.max_effective_balance, + slashed: false, + activation_eligibility_epoch: Epoch::new(0), + activation_epoch: Epoch::new(0), + exit_epoch: Epoch::from(u64::max_value()), + withdrawable_epoch: Epoch::from(u64::max_value()), + }) + .collect(), + ) + .unwrap(); state } @@ -96,19 +99,6 @@ fn all_benches(c: &mut Criterion) { .sample_size(10), ); - let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("clone/tree_hash_cache", move |b| { - b.iter_batched_ref( - || inner_state.clone(), - |state| black_box(state.tree_hash_cache().clone()), - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - let inner_state = state.clone(); c.bench( &format!("{}_validators", validator_count), diff --git a/consensus/types/examples/clone_state.rs b/consensus/types/examples/clone_state.rs deleted file mode 100644 index a7e80cf4078..00000000000 --- a/consensus/types/examples/clone_state.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! These examples only really exist so we can use them for flamegraph. If they get annoying to -//! maintain, feel free to delete. - -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Eth1Data, EthSpec, Hash256, - MinimalEthSpec, Validator, -}; - -type E = MinimalEthSpec; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state - .balances_mut() - .push(i as u64) - .expect("should add balance"); - state - .validators_mut() - .push(Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: i as u64, - slashed: i % 2 == 0, - activation_eligibility_epoch: i.into(), - activation_epoch: i.into(), - exit_epoch: i.into(), - withdrawable_epoch: i.into(), - }) - .expect("should add validator"); - } - - state -} - -fn main() { - let validator_count = 1_024; - let state = get_state(validator_count); - - for _ in 0..100_000 { - let _ = state.clone(); - } -} diff --git a/consensus/types/examples/ssz_encode_state.rs b/consensus/types/examples/ssz_encode_state.rs deleted file mode 100644 index 5d0a2db17c7..00000000000 --- a/consensus/types/examples/ssz_encode_state.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! These examples only really exist so we can use them for flamegraph. If they get annoying to -//! maintain, feel free to delete. - -use ssz::Encode; -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Eth1Data, EthSpec, Hash256, - MinimalEthSpec, Validator, -}; - -type E = MinimalEthSpec; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state - .balances_mut() - .push(i as u64) - .expect("should add balance"); - state - .validators_mut() - .push(Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: i as u64, - slashed: i % 2 == 0, - activation_eligibility_epoch: i.into(), - activation_epoch: i.into(), - exit_epoch: i.into(), - withdrawable_epoch: i.into(), - }) - .expect("should add validator"); - } - - state -} - -fn main() { - let validator_count = 1_024; - let state = get_state(validator_count); - - for _ in 0..1_024 { - let state_bytes = state.as_ssz_bytes(); - let _: BeaconState = - BeaconState::from_ssz_bytes(&state_bytes, &E::default_spec()).expect("should decode"); - } -} diff --git a/consensus/types/examples/tree_hash_state.rs b/consensus/types/examples/tree_hash_state.rs deleted file mode 100644 index 26777b25912..00000000000 --- a/consensus/types/examples/tree_hash_state.rs +++ /dev/null @@ -1,56 +0,0 @@ -//! These examples only really exist so we can use them for flamegraph. If they get annoying to -//! maintain, feel free to delete. - -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Eth1Data, EthSpec, Hash256, - MinimalEthSpec, Validator, -}; - -type E = MinimalEthSpec; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state - .balances_mut() - .push(i as u64) - .expect("should add balance"); - state - .validators_mut() - .push(Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: i as u64, - slashed: i % 2 == 0, - activation_eligibility_epoch: i.into(), - activation_epoch: i.into(), - exit_epoch: i.into(), - withdrawable_epoch: i.into(), - }) - .expect("should add validator"); - } - - state -} - -fn main() { - let validator_count = 1_024; - let mut state = get_state(validator_count); - state.update_tree_hash_cache().expect("should update cache"); - - actual_thing::(&mut state); -} - -fn actual_thing(state: &mut BeaconState) { - for _ in 0..200_024 { - let _ = state.update_tree_hash_cache().expect("should update cache"); - } -} diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 02572b0efbd..5da81f6a752 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -7,6 +7,7 @@ use compare_fields_derive::CompareFields; use derivative::Derivative; use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; +use metastruct::{metastruct, NumFields}; pub use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; @@ -28,28 +29,25 @@ pub use crate::beacon_state::balance::Balance; pub use crate::beacon_state::exit_cache::ExitCache; pub use crate::beacon_state::progressive_balances_cache::*; pub use crate::beacon_state::slashings_cache::SlashingsCache; -pub use clone_config::CloneConfig; pub use eth_spec::*; pub use iter::BlockRootsIter; -pub use tree_hash_cache::BeaconTreeHashCache; +pub use milhouse::{interface::Interface, List, Vector}; #[macro_use] mod committee_cache; mod balance; -mod clone_config; mod exit_cache; mod iter; mod progressive_balances_cache; mod pubkey_cache; mod slashings_cache; mod tests; -mod tree_hash_cache; pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; -pub type Validators = VariableList::ValidatorRegistryLimit>; -pub type Balances = VariableList::ValidatorRegistryLimit>; +pub type Validators = List::ValidatorRegistryLimit>; +pub type Balances = List::ValidatorRegistryLimit>; #[derive(Debug, PartialEq, Clone)] pub enum Error { @@ -144,6 +142,20 @@ pub enum Error { current_epoch: Epoch, epoch: Epoch, }, + MilhouseError(milhouse::Error), + CommitteeCacheDiffInvalidEpoch { + prev_current_epoch: Epoch, + current_epoch: Epoch, + }, + CommitteeCacheDiffUninitialized { + expected_epoch: Epoch, + }, + DiffAcrossFork { + prev_fork: ForkName, + current_fork: ForkName, + }, + TotalActiveBalanceDiffUninitialized, + MissingImmutableValidator(usize), IndexNotSupported(usize), InvalidFlagIndex(usize), MerkleTreeError(merkle_proof::MerkleTreeError), @@ -207,16 +219,105 @@ impl From for Hash256 { TreeHash, TestRandom, CompareFields, - arbitrary::Arbitrary + arbitrary::Arbitrary, ), serde(bound = "E: EthSpec", deny_unknown_fields), arbitrary(bound = "E: EthSpec"), derivative(Clone), ), + specific_variant_attributes( + Base(metastruct( + mappings( + map_beacon_state_base_fields(), + map_beacon_state_base_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_base_tree_list_fields_immutable(groups(tree_lists)), + ), + bimappings(bimap_beacon_state_base_tree_list_fields( + other_type = "BeaconStateBase", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )), + Altair(metastruct( + mappings( + map_beacon_state_altair_fields(), + map_beacon_state_altair_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_altair_tree_list_fields_immutable(groups(tree_lists)), + ), + bimappings(bimap_beacon_state_altair_tree_list_fields( + other_type = "BeaconStateAltair", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )), + Merge(metastruct( + mappings( + map_beacon_state_bellatrix_fields(), + map_beacon_state_bellatrix_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_bellatrix_tree_list_fields_immutable(groups(tree_lists)), + ), + bimappings(bimap_beacon_state_bellatrix_tree_list_fields( + other_type = "BeaconStateMerge", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )), + Capella(metastruct( + mappings( + map_beacon_state_capella_fields(), + map_beacon_state_capella_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_capella_tree_list_fields_immutable(groups(tree_lists)), + ), + bimappings(bimap_beacon_state_capella_tree_list_fields( + other_type = "BeaconStateCapella", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )), + Deneb(metastruct( + mappings( + map_beacon_state_deneb_fields(), + map_beacon_state_deneb_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_deneb_tree_list_fields_immutable(groups(tree_lists)), + ), + bimappings(bimap_beacon_state_deneb_tree_list_fields( + other_type = "BeaconStateDeneb", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )), + Electra(metastruct( + mappings( + map_beacon_state_electra_fields(), + map_beacon_state_electra_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_electra_tree_list_fields_immutable(groups(tree_lists)), + ), + bimappings(bimap_beacon_state_electra_tree_list_fields( + other_type = "BeaconStateElectra", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), + )) + ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + map_ref_mut_into(BeaconStateRef) +)] +#[derive( + Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash, arbitrary::Arbitrary, )] -#[derive(Debug, PartialEq, Serialize, Deserialize, Encode, TreeHash, arbitrary::Arbitrary)] #[serde(untagged)] #[serde(bound = "E: EthSpec")] #[arbitrary(bound = "E: EthSpec")] @@ -228,76 +329,102 @@ where { // Versioning #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub genesis_validators_root: Hash256, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub slot: Slot, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub fork: Fork, // History + #[metastruct(exclude_from(tree_lists))] pub latest_block_header: BeaconBlockHeader, - #[compare_fields(as_slice)] - pub block_roots: FixedVector, - #[compare_fields(as_slice)] - pub state_roots: FixedVector, + #[test_random(default)] + #[compare_fields(as_iter)] + pub block_roots: Vector, + #[test_random(default)] + #[compare_fields(as_iter)] + pub state_roots: Vector, // Frozen in Capella, replaced by historical_summaries - pub historical_roots: VariableList, + #[test_random(default)] + #[compare_fields(as_iter)] + pub historical_roots: List, // Ethereum 1.0 chain data + #[metastruct(exclude_from(tree_lists))] pub eth1_data: Eth1Data, - pub eth1_data_votes: VariableList, + #[test_random(default)] + pub eth1_data_votes: List, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub eth1_deposit_index: u64, // Registry - #[compare_fields(as_slice)] - pub validators: VariableList, - #[compare_fields(as_slice)] + #[test_random(default)] + pub validators: List, #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - pub balances: VariableList, + #[compare_fields(as_iter)] + #[test_random(default)] + pub balances: List, // Randomness - pub randao_mixes: FixedVector, + #[test_random(default)] + pub randao_mixes: Vector, // Slashings + #[test_random(default)] #[serde(with = "ssz_types::serde_utils::quoted_u64_fixed_vec")] - pub slashings: FixedVector, + pub slashings: Vector, // Attestations (genesis fork only) #[superstruct(only(Base))] - pub previous_epoch_attestations: VariableList, E::MaxPendingAttestations>, + #[test_random(default)] + pub previous_epoch_attestations: List, E::MaxPendingAttestations>, #[superstruct(only(Base))] - pub current_epoch_attestations: VariableList, E::MaxPendingAttestations>, + #[test_random(default)] + pub current_epoch_attestations: List, E::MaxPendingAttestations>, // Participation (Altair and later) #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub previous_epoch_participation: VariableList, + #[test_random(default)] + pub previous_epoch_participation: List, #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub current_epoch_participation: VariableList, + #[test_random(default)] + pub current_epoch_participation: List, // Finality #[test_random(default)] + #[metastruct(exclude_from(tree_lists))] pub justification_bits: BitVector, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub previous_justified_checkpoint: Checkpoint, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub current_justified_checkpoint: Checkpoint, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub finalized_checkpoint: Checkpoint, // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] - pub inactivity_scores: VariableList, + #[test_random(default)] + pub inactivity_scores: List, // Light-client sync committees #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[metastruct(exclude_from(tree_lists))] pub current_sync_committee: Arc>, #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[metastruct(exclude_from(tree_lists))] pub next_sync_committee: Arc>, // Execution @@ -305,89 +432,85 @@ where only(Merge), partial_getter(rename = "latest_execution_payload_header_merge") )] + #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, #[superstruct( only(Capella), partial_getter(rename = "latest_execution_payload_header_capella") )] + #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, #[superstruct( only(Deneb), partial_getter(rename = "latest_execution_payload_header_deneb") )] + #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderDeneb, #[superstruct( only(Electra), partial_getter(rename = "latest_execution_payload_header_electra") )] + #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderElectra, // Capella #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] + #[metastruct(exclude_from(tree_lists))] pub next_withdrawal_index: u64, #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] + #[metastruct(exclude_from(tree_lists))] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. #[superstruct(only(Capella, Deneb, Electra))] - pub historical_summaries: VariableList, + #[test_random(default)] + pub historical_summaries: List, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub total_active_balance: Option<(Epoch, u64)>, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] - pub progressive_balances_cache: ProgressiveBalancesCache, + #[metastruct(exclude)] + pub committee_caches: [Arc; CACHED_EPOCHS], #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] - pub committee_caches: [CommitteeCache; CACHED_EPOCHS], + #[metastruct(exclude)] + pub progressive_balances_cache: ProgressiveBalancesCache, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub pubkey_cache: PubkeyCache, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub exit_cache: ExitCache, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub slashings_cache: SlashingsCache, /// Epoch cache of values that are useful for block processing that are static over an epoch. #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] + #[metastruct(exclude)] pub epoch_cache: EpochCache, - #[serde(skip_serializing, skip_deserializing)] - #[ssz(skip_serializing, skip_deserializing)] - #[tree_hash(skip_hashing)] - #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] - pub tree_hash_cache: BeaconTreeHashCache, -} - -impl Clone for BeaconState { - fn clone(&self) -> Self { - self.clone_with(CloneConfig::all()) - } } impl BeaconState { @@ -395,6 +518,7 @@ impl BeaconState { /// /// Not a complete genesis state, see `initialize_beacon_state_from_eth1`. pub fn new(genesis_time: u64, eth1_data: Eth1Data, spec: &ChainSpec) -> Self { + let default_committee_cache = Arc::new(CommitteeCache::default()); BeaconState::Base(BeaconStateBase { // Versioning genesis_time, @@ -408,28 +532,28 @@ impl BeaconState { // History latest_block_header: BeaconBlock::::empty(spec).temporary_block_header(), - block_roots: FixedVector::from_elem(Hash256::zero()), - state_roots: FixedVector::from_elem(Hash256::zero()), - historical_roots: VariableList::empty(), + block_roots: Vector::default(), + state_roots: Vector::default(), + historical_roots: List::default(), // Eth1 eth1_data, - eth1_data_votes: VariableList::empty(), + eth1_data_votes: List::default(), eth1_deposit_index: 0, // Validator registry - validators: VariableList::empty(), // Set later. - balances: VariableList::empty(), // Set later. + validators: List::default(), // Set later. + balances: List::default(), // Set later. // Randomness - randao_mixes: FixedVector::from_elem(Hash256::zero()), + randao_mixes: Vector::default(), // Slashings - slashings: FixedVector::from_elem(0), + slashings: Vector::default(), // Attestations - previous_epoch_attestations: VariableList::empty(), - current_epoch_attestations: VariableList::empty(), + previous_epoch_attestations: List::default(), + current_epoch_attestations: List::default(), // Finality justification_bits: BitVector::new(), @@ -441,15 +565,14 @@ impl BeaconState { total_active_balance: None, progressive_balances_cache: <_>::default(), committee_caches: [ - CommitteeCache::default(), - CommitteeCache::default(), - CommitteeCache::default(), + default_committee_cache.clone(), + default_committee_cache.clone(), + default_committee_cache, ], pubkey_cache: PubkeyCache::default(), exit_cache: ExitCache::default(), slashings_cache: SlashingsCache::default(), epoch_cache: EpochCache::default(), - tree_hash_cache: <_>::default(), }) } @@ -485,30 +608,6 @@ impl BeaconState { } } - /// Specialised deserialisation method that uses the `ChainSpec` as context. - #[allow(clippy::arithmetic_side_effects)] - pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { - // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). - let slot_start = ::ssz_fixed_len() + ::ssz_fixed_len(); - let slot_end = slot_start + ::ssz_fixed_len(); - - let slot_bytes = bytes - .get(slot_start..slot_end) - .ok_or(DecodeError::InvalidByteLength { - len: bytes.len(), - expected: slot_end, - })?; - - let slot = Slot::from_ssz_bytes(slot_bytes)?; - let fork_at_slot = spec.fork_name_at_slot::(slot); - - Ok(map_fork_name!( - fork_at_slot, - Self, - <_>::from_ssz_bytes(bytes)? - )) - } - /// Returns the `tree_hash_root` of the state. /// /// Spec v0.12.1 @@ -516,11 +615,15 @@ impl BeaconState { Hash256::from_slice(&self.tree_hash_root()[..]) } - pub fn historical_batch(&self) -> HistoricalBatch { - HistoricalBatch { + pub fn historical_batch(&mut self) -> Result, Error> { + // Updating before cloning makes the clone cheap and saves repeated hashing. + self.block_roots_mut().apply_updates()?; + self.state_roots_mut().apply_updates()?; + + Ok(HistoricalBatch { block_roots: self.block_roots().clone(), state_roots: self.state_roots().clone(), - } + }) } /// This method ensures the state's pubkey cache is fully up-to-date before checking if the validator @@ -1079,8 +1182,9 @@ impl BeaconState { } /// Fill `randao_mixes` with - pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) { - *self.randao_mixes_mut() = FixedVector::from_elem(index_root); + pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), Error> { + *self.randao_mixes_mut() = Vector::from_elem(index_root)?; + Ok(()) } /// Safely obtains the index for `randao_mixes` @@ -1213,7 +1317,7 @@ impl BeaconState { } /// Get a reference to the entire `slashings` vector. - pub fn get_all_slashings(&self) -> &[u64] { + pub fn get_all_slashings(&self) -> &Vector { self.slashings() } @@ -1237,45 +1341,25 @@ impl BeaconState { } /// Convenience accessor for validators and balances simultaneously. - pub fn validators_and_balances_and_progressive_balances_mut( - &mut self, + pub fn validators_and_balances_and_progressive_balances_mut<'a>( + &'a mut self, ) -> ( - &mut Validators, - &mut Balances, - &mut ProgressiveBalancesCache, + &'a mut Validators, + &'a mut Balances, + &'a mut ProgressiveBalancesCache, ) { - match self { - BeaconState::Base(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - BeaconState::Altair(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - BeaconState::Merge(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - BeaconState::Capella(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - BeaconState::Deneb(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - BeaconState::Electra(state) => ( - &mut state.validators, - &mut state.balances, - &mut state.progressive_balances_cache, - ), - } + map_beacon_state_ref_mut_into_beacon_state_ref!(&'a _, self.to_mut(), |inner, cons| { + if false { + cons(&*inner); + unreachable!() + } else { + ( + &mut inner.validators, + &mut inner.balances, + &mut inner.progressive_balances_cache, + ) + } + }) } #[allow(clippy::type_complexity)] @@ -1285,9 +1369,9 @@ impl BeaconState { ( &mut Validators, &mut Balances, - &VariableList, - &VariableList, - &mut VariableList, + &List, + &List, + &mut List, &mut ProgressiveBalancesCache, &mut ExitCache, &mut EpochCache, @@ -1349,6 +1433,13 @@ impl BeaconState { } } + /// Get a mutable reference to the balance of a single validator. + pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + self.balances_mut() + .get_mut(validator_index) + .ok_or(Error::BalancesOutOfBounds(validator_index)) + } + /// Generate a seed for the given `epoch`. pub fn get_seed( &self, @@ -1398,6 +1489,16 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + /// Safe copy-on-write accessor for the `validators` list. + pub fn get_validator_cow( + &mut self, + validator_index: usize, + ) -> Result, Error> { + self.validators_mut() + .get_cow(validator_index) + .ok_or(Error::UnknownValidator(validator_index)) + } + /// Return the effective balance for a validator with the given `validator_index`. pub fn get_effective_balance(&self, validator_index: usize) -> Result { self.get_validator(validator_index) @@ -1423,13 +1524,6 @@ impl BeaconState { .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) } - /// Get a mutable reference to the balance of a single validator. - pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { - self.balances_mut() - .get_mut(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) - } - /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. /// /// Spec v0.12.1 @@ -1575,7 +1669,7 @@ impl BeaconState { epoch: Epoch, previous_epoch: Epoch, current_epoch: Epoch, - ) -> Result<&mut VariableList, Error> { + ) -> Result<&mut List, Error> { if epoch == current_epoch { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), @@ -1659,7 +1753,6 @@ impl BeaconState { self.drop_committee_cache(RelativeEpoch::Current)?; self.drop_committee_cache(RelativeEpoch::Next)?; self.drop_pubkey_cache(); - self.drop_tree_hash_cache(); self.drop_progressive_balances_cache(); *self.exit_cache_mut() = ExitCache::default(); *self.slashings_cache_mut() = SlashingsCache::default(); @@ -1718,7 +1811,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result { + ) -> Result, Error> { CommitteeCache::initialized(self, epoch, spec) } @@ -1732,7 +1825,7 @@ impl BeaconState { self.committee_caches_mut().rotate_left(1); let next = Self::committee_cache_index(RelativeEpoch::Next); - *self.committee_cache_at_index_mut(next)? = CommitteeCache::default(); + *self.committee_cache_at_index_mut(next)? = Arc::new(CommitteeCache::default()); Ok(()) } @@ -1747,21 +1840,24 @@ impl BeaconState { /// Get the committee cache for some `slot`. /// /// Return an error if the cache for the slot's epoch is not initialized. - fn committee_cache_at_slot(&self, slot: Slot) -> Result<&CommitteeCache, Error> { + fn committee_cache_at_slot(&self, slot: Slot) -> Result<&Arc, Error> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; self.committee_cache(relative_epoch) } /// Get the committee cache at a given index. - fn committee_cache_at_index(&self, index: usize) -> Result<&CommitteeCache, Error> { + fn committee_cache_at_index(&self, index: usize) -> Result<&Arc, Error> { self.committee_caches() .get(index) .ok_or(Error::CommitteeCachesOutOfBounds(index)) } /// Get a mutable reference to the committee cache at a given index. - fn committee_cache_at_index_mut(&mut self, index: usize) -> Result<&mut CommitteeCache, Error> { + fn committee_cache_at_index_mut( + &mut self, + index: usize, + ) -> Result<&mut Arc, Error> { self.committee_caches_mut() .get_mut(index) .ok_or(Error::CommitteeCachesOutOfBounds(index)) @@ -1769,7 +1865,10 @@ impl BeaconState { /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been /// initialized. - pub fn committee_cache(&self, relative_epoch: RelativeEpoch) -> Result<&CommitteeCache, Error> { + pub fn committee_cache( + &self, + relative_epoch: RelativeEpoch, + ) -> Result<&Arc, Error> { let i = Self::committee_cache_index(relative_epoch); let cache = self.committee_cache_at_index(i)?; @@ -1780,30 +1879,10 @@ impl BeaconState { } } - /// Returns the cache for some `RelativeEpoch`, replacing the existing cache with an - /// un-initialized cache. Returns an error if the existing cache has not been initialized. - pub fn take_committee_cache( - &mut self, - relative_epoch: RelativeEpoch, - ) -> Result { - let i = Self::committee_cache_index(relative_epoch); - let current_epoch = self.current_epoch(); - let cache = self - .committee_caches_mut() - .get_mut(i) - .ok_or(Error::CommitteeCachesOutOfBounds(i))?; - - if cache.is_initialized_at(relative_epoch.into_epoch(current_epoch)) { - Ok(mem::take(cache)) - } else { - Err(Error::CommitteeCacheUninitialized(Some(relative_epoch))) - } - } - /// Drops the cache, leaving it in an uninitialized state. pub fn drop_committee_cache(&mut self, relative_epoch: RelativeEpoch) -> Result<(), Error> { *self.committee_cache_at_index_mut(Self::committee_cache_index(relative_epoch))? = - CommitteeCache::default(); + Arc::new(CommitteeCache::default()); Ok(()) } @@ -1813,13 +1892,11 @@ impl BeaconState { /// never re-add a pubkey. pub fn update_pubkey_cache(&mut self) -> Result<(), Error> { let mut pubkey_cache = mem::take(self.pubkey_cache_mut()); - for (i, validator) in self - .validators() - .iter() - .enumerate() - .skip(pubkey_cache.len()) - { - let success = pubkey_cache.insert(validator.pubkey, i); + let start_index = pubkey_cache.len(); + + for (i, validator) in self.validators().iter_from(start_index)?.enumerate() { + let index = start_index.safe_add(i)?; + let success = pubkey_cache.insert(validator.pubkey, index); if !success { return Err(Error::PubkeyCacheInconsistent); } @@ -1834,96 +1911,71 @@ impl BeaconState { *self.pubkey_cache_mut() = PubkeyCache::default() } + pub fn has_pending_mutations(&self) -> bool { + let mut any_pending_mutations = false; + match &self { + Self::Base(self_inner) => { + map_beacon_state_base_tree_list_fields_immutable!(self_inner, |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + }); + } + Self::Altair(self_inner) => { + map_beacon_state_altair_tree_list_fields_immutable!(self_inner, |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + }); + } + Self::Merge(self_inner) => { + map_beacon_state_bellatrix_tree_list_fields_immutable!( + self_inner, + |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + } + ); + } + Self::Capella(self_inner) => { + map_beacon_state_capella_tree_list_fields_immutable!( + self_inner, + |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + } + ); + } + Self::Deneb(self_inner) => { + map_beacon_state_deneb_tree_list_fields_immutable!(self_inner, |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + }); + } + Self::Electra(self_inner) => { + map_beacon_state_electra_tree_list_fields_immutable!( + self_inner, + |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + } + ); + } + }; + any_pending_mutations + } + /// Completely drops the `progressive_balances_cache` cache, replacing it with a new, empty cache. fn drop_progressive_balances_cache(&mut self) { *self.progressive_balances_cache_mut() = ProgressiveBalancesCache::default(); } - /// Initialize but don't fill the tree hash cache, if it isn't already initialized. - pub fn initialize_tree_hash_cache(&mut self) { - if !self.tree_hash_cache().is_initialized() { - *self.tree_hash_cache_mut() = BeaconTreeHashCache::new(self) - } - } - /// Compute the tree hash root of the state using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. pub fn update_tree_hash_cache(&mut self) -> Result { - self.initialize_tree_hash_cache(); - - let cache = self.tree_hash_cache_mut().take(); - - if let Some(mut cache) = cache { - // Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as - // None. There's no need to keep a cache that fails. - let root = cache.recalculate_tree_hash_root(self)?; - self.tree_hash_cache_mut().restore(cache); - Ok(root) - } else { - Err(Error::TreeHashCacheNotInitialized) - } + self.apply_pending_mutations()?; + Ok(self.tree_hash_root()) } /// Compute the tree hash root of the validators using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. pub fn update_validators_tree_hash_cache(&mut self) -> Result { - self.initialize_tree_hash_cache(); - - let cache = self.tree_hash_cache_mut().take(); - - if let Some(mut cache) = cache { - // Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as - // None. There's no need to keep a cache that fails. - let root = cache.recalculate_validators_tree_hash_root(self.validators())?; - self.tree_hash_cache_mut().restore(cache); - Ok(root) - } else { - Err(Error::TreeHashCacheNotInitialized) - } - } - - /// Completely drops the tree hash cache, replacing it with a new, empty cache. - pub fn drop_tree_hash_cache(&mut self) { - self.tree_hash_cache_mut().uninitialize(); - } - - /// Clone the state whilst preserving only the selected caches. - pub fn clone_with(&self, config: CloneConfig) -> Self { - let mut res = match self { - BeaconState::Base(inner) => BeaconState::Base(inner.clone()), - BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), - BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), - BeaconState::Capella(inner) => BeaconState::Capella(inner.clone()), - BeaconState::Deneb(inner) => BeaconState::Deneb(inner.clone()), - BeaconState::Electra(inner) => BeaconState::Electra(inner.clone()), - }; - if config.committee_caches { - res.committee_caches_mut() - .clone_from(self.committee_caches()); - *res.total_active_balance_mut() = *self.total_active_balance(); - } - if config.pubkey_cache { - *res.pubkey_cache_mut() = self.pubkey_cache().clone(); - } - if config.exit_cache { - *res.exit_cache_mut() = self.exit_cache().clone(); - } - if config.slashings_cache { - *res.slashings_cache_mut() = self.slashings_cache().clone(); - } - if config.tree_hash_cache { - *res.tree_hash_cache_mut() = self.tree_hash_cache().clone(); - } - if config.progressive_balances_cache { - *res.progressive_balances_cache_mut() = self.progressive_balances_cache().clone(); - } - res - } - - pub fn clone_with_only_committee_caches(&self) -> Self { - self.clone_with(CloneConfig::committee_caches_only()) + self.validators_mut().apply_updates()?; + Ok(self.validators().tree_hash_root()) } /// Passing `previous_epoch` to this function rather than computing it internally provides @@ -1979,10 +2031,177 @@ impl BeaconState { self.epoch_cache().get_base_reward(validator_index) } - pub fn compute_merkle_proof( - &mut self, - generalized_index: usize, - ) -> Result, Error> { + #[allow(clippy::arithmetic_side_effects)] + pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + // Required for macros (which use type-hints internally). + + match (&mut *self, base) { + (Self::Base(self_inner), Self::Base(base_inner)) => { + bimap_beacon_state_base_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Base(_), _) => (), + (Self::Altair(self_inner), Self::Altair(base_inner)) => { + bimap_beacon_state_altair_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Altair(_), _) => (), + (Self::Merge(self_inner), Self::Merge(base_inner)) => { + bimap_beacon_state_bellatrix_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Merge(_), _) => (), + (Self::Capella(self_inner), Self::Capella(base_inner)) => { + bimap_beacon_state_capella_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Capella(_), _) => (), + (Self::Deneb(self_inner), Self::Deneb(base_inner)) => { + bimap_beacon_state_deneb_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Deneb(_), _) => (), + (Self::Electra(self_inner), Self::Electra(base_inner)) => { + bimap_beacon_state_electra_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Electra(_), _) => (), + } + + // Use sync committees from `base` if they are equal. + if let Ok(current_sync_committee) = self.current_sync_committee_mut() { + if let Ok(base_sync_committee) = base.current_sync_committee() { + if current_sync_committee == base_sync_committee { + *current_sync_committee = base_sync_committee.clone(); + } + } + } + if let Ok(next_sync_committee) = self.next_sync_committee_mut() { + if let Ok(base_sync_committee) = base.next_sync_committee() { + if next_sync_committee == base_sync_committee { + *next_sync_committee = base_sync_committee.clone(); + } + } + } + + // Rebase caches like the committee caches and the pubkey cache, which are expensive to + // rebuild and likely to be re-usable from the base state. + self.rebase_caches_on(base, spec)?; + + Ok(()) + } + + pub fn rebase_caches_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + // Use pubkey cache from `base` if it contains superior information (likely if our cache is + // uninitialized). + let num_validators = self.validators().len(); + let pubkey_cache = self.pubkey_cache_mut(); + let base_pubkey_cache = base.pubkey_cache(); + if pubkey_cache.len() < base_pubkey_cache.len() && pubkey_cache.len() < num_validators { + *pubkey_cache = base_pubkey_cache.clone(); + } + + // Use committee caches from `base` if they are relevant. + let epochs = [ + self.previous_epoch(), + self.current_epoch(), + self.next_epoch()?, + ]; + for (index, epoch) in epochs.into_iter().enumerate() { + if let Ok(base_relative_epoch) = RelativeEpoch::from_epoch(base.current_epoch(), epoch) + { + *self.committee_cache_at_index_mut(index)? = + base.committee_cache(base_relative_epoch)?.clone(); + + // Ensure total active balance cache remains built whenever current committee + // cache is built. + if epoch == self.current_epoch() { + self.build_total_active_balance_cache(spec)?; + } + } + } + + Ok(()) + } +} + +impl BeaconState { + /// The number of fields of the `BeaconState` rounded up to the nearest power of two. + /// + /// This is relevant to tree-hashing of the `BeaconState`. + /// + /// We assume this value is stable across forks. This assumption is checked in the + /// `check_num_fields_pow2` test. + pub const NUM_FIELDS_POW2: usize = BeaconStateMerge::::NUM_FIELDS.next_power_of_two(); + + /// Specialised deserialisation method that uses the `ChainSpec` as context. + #[allow(clippy::arithmetic_side_effects)] + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). + let slot_start = ::ssz_fixed_len() + ::ssz_fixed_len(); + let slot_end = slot_start + ::ssz_fixed_len(); + + let slot_bytes = bytes + .get(slot_start..slot_end) + .ok_or(DecodeError::InvalidByteLength { + len: bytes.len(), + expected: slot_end, + })?; + + let slot = Slot::from_ssz_bytes(slot_bytes)?; + let fork_at_slot = spec.fork_name_at_slot::(slot); + + Ok(map_fork_name!( + fork_at_slot, + Self, + <_>::from_ssz_bytes(bytes)? + )) + } + + #[allow(clippy::arithmetic_side_effects)] + pub fn apply_pending_mutations(&mut self) -> Result<(), Error> { + match self { + Self::Base(inner) => { + map_beacon_state_base_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Altair(inner) => { + map_beacon_state_altair_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Merge(inner) => { + map_beacon_state_bellatrix_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Capella(inner) => { + map_beacon_state_capella_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Deneb(inner) => { + map_beacon_state_deneb_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Electra(inner) => { + map_beacon_state_electra_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + } + Ok(()) + } + + pub fn compute_merkle_proof(&self, generalized_index: usize) -> Result, Error> { // 1. Convert generalized index to field index. let field_index = match generalized_index { light_client_update::CURRENT_SYNC_COMMITTEE_INDEX @@ -1992,7 +2211,7 @@ impl BeaconState { // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate generalized_index - .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .checked_sub(Self::NUM_FIELDS_POW2) .ok_or(Error::IndexNotSupported(generalized_index))? } light_client_update::FINALIZED_ROOT_INDEX => { @@ -2002,20 +2221,47 @@ impl BeaconState { // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches // position of `finalized_checkpoint` in `BeaconState`. finalized_checkpoint_generalized_index - .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .checked_sub(Self::NUM_FIELDS_POW2) .ok_or(Error::IndexNotSupported(generalized_index))? } _ => return Err(Error::IndexNotSupported(generalized_index)), }; // 2. Get all `BeaconState` leaves. - self.initialize_tree_hash_cache(); - let mut cache = self - .tree_hash_cache_mut() - .take() - .ok_or(Error::TreeHashCacheNotInitialized)?; - let leaves = cache.recalculate_tree_hash_leaves(self)?; - self.tree_hash_cache_mut().restore(cache); + let mut leaves = vec![]; + #[allow(clippy::arithmetic_side_effects)] + match self { + BeaconState::Base(state) => { + map_beacon_state_base_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Altair(state) => { + map_beacon_state_altair_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Merge(state) => { + map_beacon_state_bellatrix_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Capella(state) => { + map_beacon_state_capella_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Deneb(state) => { + map_beacon_state_deneb_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Electra(state) => { + map_beacon_state_electra_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + }; // 3. Make deposit tree. // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). @@ -2074,9 +2320,10 @@ impl From for Error { } } -/// Helper function for "cloning" a field by using its default value. -fn clone_default(_value: &T) -> T { - T::default() +impl From for Error { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } } impl CompareFields for BeaconState { diff --git a/consensus/types/src/beacon_state/clone_config.rs b/consensus/types/src/beacon_state/clone_config.rs deleted file mode 100644 index 27e066d5db6..00000000000 --- a/consensus/types/src/beacon_state/clone_config.rs +++ /dev/null @@ -1,47 +0,0 @@ -/// Configuration struct for controlling which caches of a `BeaconState` should be cloned. -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] -pub struct CloneConfig { - pub committee_caches: bool, - pub pubkey_cache: bool, - pub exit_cache: bool, - pub slashings_cache: bool, - pub tree_hash_cache: bool, - pub progressive_balances_cache: bool, -} - -impl CloneConfig { - pub fn all() -> Self { - Self { - committee_caches: true, - pubkey_cache: true, - exit_cache: true, - slashings_cache: true, - tree_hash_cache: true, - progressive_balances_cache: true, - } - } - - pub fn none() -> Self { - Self::default() - } - - pub fn committee_caches_only() -> Self { - Self { - committee_caches: true, - ..Self::none() - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn sanity() { - assert!(CloneConfig::all().pubkey_cache); - assert!(!CloneConfig::none().tree_hash_cache); - assert!(CloneConfig::committee_caches_only().committee_caches); - assert!(!CloneConfig::committee_caches_only().exit_cache); - } -} diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index a6b12cf5af3..7913df8e00e 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize}; use ssz::{four_byte_option_impl, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::ops::Range; +use std::sync::Arc; use swap_or_not_shuffle::shuffle_list; mod tests; @@ -65,7 +66,7 @@ impl CommitteeCache { state: &BeaconState, epoch: Epoch, spec: &ChainSpec, - ) -> Result { + ) -> Result, Error> { // Check that the cache is being built for an in-range epoch. // // We allow caches to be constructed for historic epochs, per: @@ -115,13 +116,13 @@ impl CommitteeCache { .ok_or(Error::ShuffleIndexOutOfBounds(v))? = NonZeroUsize::new(i + 1).into(); } - Ok(CommitteeCache { + Ok(Arc::new(CommitteeCache { initialized_epoch: Some(epoch), shuffling, shuffling_positions, committees_per_slot, slots_per_epoch: E::slots_per_epoch(), - }) + })) } /// Returns `true` if the cache has been initialized at the supplied `epoch`. diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index a5effb9363b..a2274765691 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -92,7 +92,7 @@ async fn shuffles_for_the_right_epoch() { .map(|i| Hash256::from_low_u64_be(i as u64)) .collect(); - *state.randao_mixes_mut() = FixedVector::from(distinct_hashes); + *state.randao_mixes_mut() = Vector::try_from_iter(distinct_hashes).unwrap(); let previous_seed = state .get_seed(state.previous_epoch(), Domain::BeaconAttester, spec) diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index bda788e63b9..0bb984b6676 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -1,10 +1,9 @@ use super::{BeaconStateError, ChainSpec, Epoch, Validator}; use safe_arith::SafeArith; -use serde::{Deserialize, Serialize}; use std::cmp::Ordering; /// Map from exit epoch to the number of validators with that exit epoch. -#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct ExitCache { /// True if the cache has been initialized. initialized: bool, @@ -16,7 +15,11 @@ pub struct ExitCache { impl ExitCache { /// Initialize a new cache for the given list of validators. - pub fn new(validators: &[Validator], spec: &ChainSpec) -> Result { + pub fn new<'a, V, I>(validators: V, spec: &ChainSpec) -> Result + where + V: IntoIterator, + I: ExactSizeIterator + Iterator, + { let mut exit_cache = ExitCache { initialized: true, max_exit_epoch: Epoch::new(0), @@ -24,7 +27,7 @@ impl ExitCache { }; // Add all validators with a non-default exit epoch to the cache. validators - .iter() + .into_iter() .filter(|validator| validator.exit_epoch != spec.far_future_epoch) .try_for_each(|validator| exit_cache.record_validator_exit(validator.exit_epoch))?; Ok(exit_cache) diff --git a/consensus/types/src/beacon_state/iter.rs b/consensus/types/src/beacon_state/iter.rs index 2d3ad02c836..2caa0365e01 100644 --- a/consensus/types/src/beacon_state/iter.rs +++ b/consensus/types/src/beacon_state/iter.rs @@ -74,7 +74,7 @@ mod test { let mut state: BeaconState = BeaconState::new(0, <_>::default(), &spec); for i in 0..state.block_roots().len() { - state.block_roots_mut()[i] = root_slot(i).1; + *state.block_roots_mut().get_mut(i).unwrap() = root_slot(i).1; } assert_eq!( @@ -122,7 +122,7 @@ mod test { let mut state: BeaconState = BeaconState::new(0, <_>::default(), &spec); for i in 0..state.block_roots().len() { - state.block_roots_mut()[i] = root_slot(i).1; + *state.block_roots_mut().get_mut(i).unwrap() = root_slot(i).1; } assert_eq!( diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/beacon_state/pubkey_cache.rs index 0b61ea3c5f8..d58dd7bc1dd 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/beacon_state/pubkey_cache.rs @@ -1,21 +1,21 @@ use crate::*; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; +use rpds::HashTrieMapSync as HashTrieMap; type ValidatorIndex = usize; #[allow(clippy::len_without_is_empty)] -#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Default)] pub struct PubkeyCache { - /// Maintain the number of keys added to the map. It is not sufficient to just use the HashMap - /// len, as it does not increase when duplicate keys are added. Duplicate keys are used during - /// testing. + /// Maintain the number of keys added to the map. It is not sufficient to just use the + /// HashTrieMap len, as it does not increase when duplicate keys are added. Duplicate keys are + /// used during testing. len: usize, - map: HashMap, + map: HashTrieMap, } impl PubkeyCache { /// Returns the number of validator indices added to the map so far. + #[allow(clippy::len_without_is_empty)] pub fn len(&self) -> ValidatorIndex { self.len } @@ -26,7 +26,7 @@ impl PubkeyCache { /// that an index is never skipped. pub fn insert(&mut self, pubkey: PublicKeyBytes, index: ValidatorIndex) -> bool { if index == self.len { - self.map.insert(pubkey, index); + self.map.insert_mut(pubkey, index); self.len = self .len .checked_add(1) diff --git a/consensus/types/src/beacon_state/slashings_cache.rs b/consensus/types/src/beacon_state/slashings_cache.rs index cfdc349f86c..45d8f7e2129 100644 --- a/consensus/types/src/beacon_state/slashings_cache.rs +++ b/consensus/types/src/beacon_state/slashings_cache.rs @@ -1,13 +1,13 @@ use crate::{BeaconStateError, Slot, Validator}; use arbitrary::Arbitrary; -use std::collections::HashSet; +use rpds::HashTrieSetSync as HashTrieSet; /// Persistent (cheap to clone) cache of all slashed validator indices. #[derive(Debug, Default, Clone, PartialEq, Arbitrary)] pub struct SlashingsCache { latest_block_slot: Option, #[arbitrary(default)] - slashed_validators: HashSet, + slashed_validators: HashTrieSet, } impl SlashingsCache { @@ -49,7 +49,7 @@ impl SlashingsCache { validator_index: usize, ) -> Result<(), BeaconStateError> { self.check_initialized(block_slot)?; - self.slashed_validators.insert(validator_index); + self.slashed_validators.insert_mut(validator_index); Ok(()) } diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 00625a1788e..012c063afef 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -1,20 +1,14 @@ #![cfg(test)] -use crate::test_utils::*; -use beacon_chain::test_utils::{ - interop_genesis_state_with_eth1, test_spec, BeaconChainHarness, EphemeralHarnessType, - DEFAULT_ETH1_BLOCK_HASH, -}; +use crate::{test_utils::*, ForkName}; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::types::{ - test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, - ChainSpec, CloneConfig, Domain, Epoch, EthSpec, FixedVector, Hash256, Keypair, MainnetEthSpec, - MinimalEthSpec, RelativeEpoch, Slot, + test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateCapella, + BeaconStateDeneb, BeaconStateElectra, BeaconStateError, BeaconStateMerge, ChainSpec, Domain, + Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, Slot, Vector, }; -use safe_arith::SafeArith; use ssz::Encode; -use state_processing::per_slot_processing; use std::ops::Mul; use swap_or_not_shuffle::compute_shuffled_index; -use tree_hash::TreeHash; pub const MAX_VALIDATOR_COUNT: usize = 129; pub const SLOT_OFFSET: Slot = Slot::new(1); @@ -101,7 +95,11 @@ async fn test_beacon_proposer_index() { // Test with two validators per slot, first validator has zero balance. let mut state = build_state::((E::slots_per_epoch() as usize).mul(2)).await; let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec); - state.validators_mut()[slot0_candidate0].effective_balance = 0; + state + .validators_mut() + .get_mut(slot0_candidate0) + .unwrap() + .effective_balance = 0; test(&state, Slot::new(0), 1); for i in 1..E::slots_per_epoch() { test(&state, Slot::from(i), 0); @@ -159,85 +157,6 @@ async fn cache_initialization() { test_cache_initialization(&mut state, RelativeEpoch::Next, &spec); } -fn test_clone_config(base_state: &BeaconState, clone_config: CloneConfig) { - let state = base_state.clone_with(clone_config); - if clone_config.committee_caches { - state - .committee_cache(RelativeEpoch::Previous) - .expect("committee cache exists"); - state - .committee_cache(RelativeEpoch::Current) - .expect("committee cache exists"); - state - .committee_cache(RelativeEpoch::Next) - .expect("committee cache exists"); - state - .total_active_balance() - .expect("total active balance exists"); - } else { - state - .committee_cache(RelativeEpoch::Previous) - .expect_err("shouldn't exist"); - state - .committee_cache(RelativeEpoch::Current) - .expect_err("shouldn't exist"); - state - .committee_cache(RelativeEpoch::Next) - .expect_err("shouldn't exist"); - } - if clone_config.pubkey_cache { - assert_ne!(state.pubkey_cache().len(), 0); - } else { - assert_eq!(state.pubkey_cache().len(), 0); - } - if clone_config.exit_cache { - state - .exit_cache() - .check_initialized() - .expect("exit cache exists"); - } else { - state - .exit_cache() - .check_initialized() - .expect_err("exit cache doesn't exist"); - } - if clone_config.tree_hash_cache { - assert!(state.tree_hash_cache().is_initialized()); - } else { - assert!( - !state.tree_hash_cache().is_initialized(), - "{:?}", - clone_config - ); - } -} - -#[tokio::test] -async fn clone_config() { - let spec = MinimalEthSpec::default_spec(); - - let mut state = build_state::(16).await; - - state.build_caches(&spec).unwrap(); - state - .update_tree_hash_cache() - .expect("should update tree hash cache"); - - let num_caches = 6; - let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig { - committee_caches: (i & 1) != 0, - pubkey_cache: ((i >> 1) & 1) != 0, - exit_cache: ((i >> 2) & 1) != 0, - slashings_cache: ((i >> 3) & 1) != 0, - tree_hash_cache: ((i >> 4) & 1) != 0, - progressive_balances_cache: ((i >> 5) & 1) != 0, - }); - - for config in all_configs { - test_clone_config(&state, config); - } -} - /// Tests committee-specific components #[cfg(test)] mod committees { @@ -328,10 +247,9 @@ mod committees { let harness = get_harness::(validator_count, slot).await; let mut new_head_state = harness.get_current_state(); - let distinct_hashes: Vec = (0..E::epochs_per_historical_vector()) - .map(|i| Hash256::from_low_u64_be(i as u64)) - .collect(); - *new_head_state.randao_mixes_mut() = FixedVector::from(distinct_hashes); + let distinct_hashes = + (0..E::epochs_per_historical_vector()).map(|i| Hash256::from_low_u64_be(i as u64)); + *new_head_state.randao_mixes_mut() = Vector::try_from_iter(distinct_hashes).unwrap(); new_head_state .force_build_committee_cache(RelativeEpoch::Previous, spec) @@ -487,120 +405,22 @@ fn decode_base_and_altair() { } #[test] -fn tree_hash_cache_linear_history() { - let mut rng = XorShiftRng::from_seed([42; 16]); - - let mut state: BeaconState = - BeaconState::Base(BeaconStateBase::random_for_test(&mut rng)); - - let root = state.update_tree_hash_cache().unwrap(); - - assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]); - - /* - * A cache should hash twice without updating the slot. - */ - - assert_eq!( - state.update_tree_hash_cache().unwrap(), - root, - "tree hash result should be identical on the same slot" - ); - - /* - * A cache should not hash after updating the slot but not updating the state roots. - */ - - // The tree hash cache needs to be rebuilt since it was dropped when it failed. - state - .update_tree_hash_cache() - .expect("should rebuild cache"); - - *state.slot_mut() += 1; - - assert_eq!( - state.update_tree_hash_cache(), - Err(BeaconStateError::NonLinearTreeHashCacheHistory), - "should not build hash without updating the state root" - ); - - /* - * The cache should update if the slot and state root are updated. - */ - - // The tree hash cache needs to be rebuilt since it was dropped when it failed. - let root = state - .update_tree_hash_cache() - .expect("should rebuild cache"); - - *state.slot_mut() += 1; - state - .set_state_root(state.slot() - 1, root) - .expect("should set state root"); - - let root = state.update_tree_hash_cache().unwrap(); - assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]); -} - -// Check how the cache behaves when there's a distance larger than `SLOTS_PER_HISTORICAL_ROOT` -// since its last update. -#[test] -fn tree_hash_cache_linear_history_long_skip() { - let validator_count = 128; - let keypairs = generate_deterministic_keypairs(validator_count); - - let spec = &test_spec::(); - - // This state has a cache that advances normally each slot. - let mut state: BeaconState = interop_genesis_state_with_eth1( - &keypairs, - 0, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, - spec, - ) - .unwrap(); - - state.update_tree_hash_cache().unwrap(); - - // This state retains its original cache until it is updated after a long skip. - let mut original_cache_state = state.clone(); - assert!(original_cache_state.tree_hash_cache().is_initialized()); - - // Advance the states to a slot beyond the historical state root limit, using the state root - // from the first state to avoid touching the original state's cache. - let start_slot = state.slot(); - let target_slot = start_slot - .safe_add(MinimalEthSpec::slots_per_historical_root() as u64 + 1) - .unwrap(); - - let mut prev_state_root; - while state.slot() < target_slot { - prev_state_root = state.update_tree_hash_cache().unwrap(); - per_slot_processing(&mut state, None, spec).unwrap(); - per_slot_processing(&mut original_cache_state, Some(prev_state_root), spec).unwrap(); +fn check_num_fields_pow2() { + use metastruct::NumFields; + pub type E = MainnetEthSpec; + + for fork_name in ForkName::list_all() { + let num_fields = match fork_name { + ForkName::Base => BeaconStateBase::::NUM_FIELDS, + ForkName::Altair => BeaconStateAltair::::NUM_FIELDS, + ForkName::Merge => BeaconStateMerge::::NUM_FIELDS, + ForkName::Capella => BeaconStateCapella::::NUM_FIELDS, + ForkName::Deneb => BeaconStateDeneb::::NUM_FIELDS, + ForkName::Electra => BeaconStateElectra::::NUM_FIELDS, + }; + assert_eq!( + num_fields.next_power_of_two(), + BeaconState::::NUM_FIELDS_POW2 + ); } - - // The state with the original cache should still be initialized at the starting slot. - assert_eq!( - original_cache_state - .tree_hash_cache() - .initialized_slot() - .unwrap(), - start_slot - ); - - // Updating the tree hash cache should be successful despite the long skip. - assert_eq!( - original_cache_state.update_tree_hash_cache().unwrap(), - state.update_tree_hash_cache().unwrap() - ); - - assert_eq!( - original_cache_state - .tree_hash_cache() - .initialized_slot() - .unwrap(), - target_slot - ); } diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs deleted file mode 100644 index 290020b1b35..00000000000 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ /dev/null @@ -1,645 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] -#![allow(clippy::disallowed_methods)] -#![allow(clippy::indexing_slicing)] - -use super::Error; -use crate::historical_summary::HistoricalSummaryCache; -use crate::{BeaconState, EthSpec, Hash256, ParticipationList, Slot, Unsigned, Validator}; -use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; -use rayon::prelude::*; -use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; -use std::cmp::Ordering; -use tree_hash::{mix_in_length, MerkleHasher, TreeHash}; - -/// The number of leaves (including padding) on the `BeaconState` Merkle tree. -/// -/// ## Note -/// -/// This constant is set with the assumption that there are `> 16` and `<= 32` fields on the -/// `BeaconState`. **Tree hashing will fail if this value is set incorrectly.** -pub const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32; - -/// The number of nodes in the Merkle tree of a validator record. -const NODES_PER_VALIDATOR: usize = 15; - -/// The number of validator record tree hash caches stored in each arena. -/// -/// This is primarily used for concurrency; if we have 16 validators and set `VALIDATORS_PER_ARENA -/// == 8` then it is possible to do a 2-core concurrent hash. -/// -/// Do not set to 0. -const VALIDATORS_PER_ARENA: usize = 4_096; - -#[derive(Debug, PartialEq, Clone, Encode, Decode)] -pub struct Eth1DataVotesTreeHashCache { - arena: CacheArena, - tree_hash_cache: TreeHashCache, - voting_period: u64, - roots: VariableList, -} - -impl Eth1DataVotesTreeHashCache { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees. Only the leaves are - /// hashed, leaving the internal nodes as all-zeros. - pub fn new(state: &BeaconState) -> Self { - let mut arena = CacheArena::default(); - let roots: VariableList<_, _> = state - .eth1_data_votes() - .iter() - .map(|eth1_data| eth1_data.tree_hash_root()) - .collect::>() - .into(); - let tree_hash_cache = roots.new_tree_hash_cache(&mut arena); - - Self { - arena, - tree_hash_cache, - voting_period: Self::voting_period(state.slot()), - roots, - } - } - - fn voting_period(slot: Slot) -> u64 { - slot.as_u64() / E::SlotsPerEth1VotingPeriod::to_u64() - } - - pub fn recalculate_tree_hash_root(&mut self, state: &BeaconState) -> Result { - if state.eth1_data_votes().len() < self.roots.len() - || Self::voting_period(state.slot()) != self.voting_period - { - *self = Self::new(state); - } - - state - .eth1_data_votes() - .iter() - .skip(self.roots.len()) - .try_for_each(|eth1_data| self.roots.push(eth1_data.tree_hash_root()))?; - - self.roots - .recalculate_tree_hash_root(&mut self.arena, &mut self.tree_hash_cache) - .map_err(Into::into) - } -} - -/// A cache that performs a caching tree hash of the entire `BeaconState` struct. -/// -/// This type is a wrapper around the inner cache, which does all the work. -#[derive(Debug, Default, PartialEq, Clone)] -pub struct BeaconTreeHashCache { - inner: Option>, -} - -impl BeaconTreeHashCache { - pub fn new(state: &BeaconState) -> Self { - Self { - inner: Some(BeaconTreeHashCacheInner::new(state)), - } - } - - pub fn is_initialized(&self) -> bool { - self.inner.is_some() - } - - /// Move the inner cache out so that the containing `BeaconState` can be borrowed. - pub fn take(&mut self) -> Option> { - self.inner.take() - } - - /// Restore the inner cache after using `take`. - pub fn restore(&mut self, inner: BeaconTreeHashCacheInner) { - self.inner = Some(inner); - } - - /// Make the cache empty. - pub fn uninitialize(&mut self) { - self.inner = None; - } - - /// Return the slot at which the cache was last updated. - /// - /// This should probably only be used during testing. - pub fn initialized_slot(&self) -> Option { - Some(self.inner.as_ref()?.previous_state?.1) - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct BeaconTreeHashCacheInner { - /// Tracks the previously generated state root to ensure the next state root provided descends - /// directly from this state. - previous_state: Option<(Hash256, Slot)>, - // Validators cache - validators: ValidatorsListTreeHashCache, - // Arenas - fixed_arena: CacheArena, - balances_arena: CacheArena, - slashings_arena: CacheArena, - // Caches - block_roots: TreeHashCache, - state_roots: TreeHashCache, - historical_roots: TreeHashCache, - historical_summaries: OptionalTreeHashCache, - balances: TreeHashCache, - randao_mixes: TreeHashCache, - slashings: TreeHashCache, - eth1_data_votes: Eth1DataVotesTreeHashCache, - inactivity_scores: OptionalTreeHashCache, - // Participation caches - previous_epoch_participation: OptionalTreeHashCache, - current_epoch_participation: OptionalTreeHashCache, -} - -impl BeaconTreeHashCacheInner { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees. Only the leaves are - /// hashed, leaving the internal nodes as all-zeros. - pub fn new(state: &BeaconState) -> Self { - let mut fixed_arena = CacheArena::default(); - let block_roots = state.block_roots().new_tree_hash_cache(&mut fixed_arena); - let state_roots = state.state_roots().new_tree_hash_cache(&mut fixed_arena); - let historical_roots = state - .historical_roots() - .new_tree_hash_cache(&mut fixed_arena); - let historical_summaries = OptionalTreeHashCache::new( - state - .historical_summaries() - .ok() - .map(HistoricalSummaryCache::new) - .as_ref(), - ); - - let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena); - - let validators = ValidatorsListTreeHashCache::new::(state.validators()); - - let mut balances_arena = CacheArena::default(); - let balances = state.balances().new_tree_hash_cache(&mut balances_arena); - - let mut slashings_arena = CacheArena::default(); - let slashings = state.slashings().new_tree_hash_cache(&mut slashings_arena); - - let inactivity_scores = OptionalTreeHashCache::new(state.inactivity_scores().ok()); - - let previous_epoch_participation = OptionalTreeHashCache::new( - state - .previous_epoch_participation() - .ok() - .map(ParticipationList::new) - .as_ref(), - ); - let current_epoch_participation = OptionalTreeHashCache::new( - state - .current_epoch_participation() - .ok() - .map(ParticipationList::new) - .as_ref(), - ); - - Self { - previous_state: None, - validators, - fixed_arena, - balances_arena, - slashings_arena, - block_roots, - state_roots, - historical_roots, - historical_summaries, - balances, - randao_mixes, - slashings, - inactivity_scores, - eth1_data_votes: Eth1DataVotesTreeHashCache::new(state), - previous_epoch_participation, - current_epoch_participation, - } - } - - pub fn recalculate_tree_hash_leaves( - &mut self, - state: &BeaconState, - ) -> Result, Error> { - let mut leaves = vec![ - // Genesis data leaves. - state.genesis_time().tree_hash_root(), - state.genesis_validators_root().tree_hash_root(), - // Current fork data leaves. - state.slot().tree_hash_root(), - state.fork().tree_hash_root(), - state.latest_block_header().tree_hash_root(), - // Roots leaves. - state - .block_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)?, - state - .state_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)?, - state - .historical_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)?, - // Eth1 Data leaves. - state.eth1_data().tree_hash_root(), - self.eth1_data_votes.recalculate_tree_hash_root(state)?, - state.eth1_deposit_index().tree_hash_root(), - // Validator leaves. - self.validators - .recalculate_tree_hash_root(state.validators())?, - state - .balances() - .recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)?, - state - .randao_mixes() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)?, - state - .slashings() - .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?, - ]; - - // Participation - if let BeaconState::Base(state) = state { - leaves.push(state.previous_epoch_attestations.tree_hash_root()); - leaves.push(state.current_epoch_attestations.tree_hash_root()); - } else { - leaves.push( - self.previous_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.previous_epoch_participation()?, - ))?, - ); - leaves.push( - self.current_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.current_epoch_participation()?, - ))?, - ); - } - // Checkpoint leaves - leaves.push(state.justification_bits().tree_hash_root()); - leaves.push(state.previous_justified_checkpoint().tree_hash_root()); - leaves.push(state.current_justified_checkpoint().tree_hash_root()); - leaves.push(state.finalized_checkpoint().tree_hash_root()); - // Inactivity & light-client sync committees (Altair and later). - if let Ok(inactivity_scores) = state.inactivity_scores() { - leaves.push( - self.inactivity_scores - .recalculate_tree_hash_root(inactivity_scores)?, - ); - } - if let Ok(current_sync_committee) = state.current_sync_committee() { - leaves.push(current_sync_committee.tree_hash_root()); - } - - if let Ok(next_sync_committee) = state.next_sync_committee() { - leaves.push(next_sync_committee.tree_hash_root()); - } - - // Execution payload (merge and later). - if let Ok(payload_header) = state.latest_execution_payload_header() { - leaves.push(payload_header.tree_hash_root()); - } - - // Withdrawal indices (Capella and later). - if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { - leaves.push(next_withdrawal_index.tree_hash_root()); - } - if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { - leaves.push(next_withdrawal_validator_index.tree_hash_root()); - } - - // Historical roots/summaries (Capella and later). - if let Ok(historical_summaries) = state.historical_summaries() { - leaves.push( - self.historical_summaries.recalculate_tree_hash_root( - &HistoricalSummaryCache::new(historical_summaries), - )?, - ); - } - - Ok(leaves) - } - - /// Updates the cache and returns the tree hash root for the given `state`. - /// - /// The provided `state` should be a descendant of the last `state` given to this function, or - /// the `Self::new` function. If the state is more than `SLOTS_PER_HISTORICAL_ROOT` slots - /// after `self.previous_state` then the whole cache will be re-initialized. - pub fn recalculate_tree_hash_root(&mut self, state: &BeaconState) -> Result { - // If this cache has previously produced a root, ensure that it is in the state root - // history of this state. - // - // This ensures that the states applied have a linear history, this - // allows us to make assumptions about how the state changes over times and produce a more - // efficient algorithm. - if let Some((previous_root, previous_slot)) = self.previous_state { - // The previously-hashed state must not be newer than `state`. - if previous_slot > state.slot() { - return Err(Error::TreeHashCacheSkippedSlot { - cache: previous_slot, - state: state.slot(), - }); - } - - // If the state is newer, the previous root must be in the history of the given state. - // If the previous slot is out of range of the `state_roots` array (indicating a long - // gap between the cache's last use and the current state) then we re-initialize. - match state.get_state_root(previous_slot) { - Ok(state_previous_root) if *state_previous_root == previous_root => {} - Ok(_) => return Err(Error::NonLinearTreeHashCacheHistory), - Err(Error::SlotOutOfBounds) => { - *self = Self::new(state); - } - Err(e) => return Err(e), - } - } - - let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES); - - let leaves = self.recalculate_tree_hash_leaves(state)?; - for leaf in leaves { - hasher.write(leaf.as_bytes())?; - } - - let root = hasher.finish()?; - - self.previous_state = Some((root, state.slot())); - - Ok(root) - } - - /// Updates the cache and provides the root of the given `validators`. - pub fn recalculate_validators_tree_hash_root( - &mut self, - validators: &[Validator], - ) -> Result { - self.validators.recalculate_tree_hash_root(validators) - } -} - -/// A specialized cache for computing the tree hash root of `state.validators`. -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -struct ValidatorsListTreeHashCache { - list_arena: CacheArena, - list_cache: TreeHashCache, - values: ParallelValidatorTreeHash, -} - -impl ValidatorsListTreeHashCache { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees but does perform any - /// hashing. - fn new(validators: &[Validator]) -> Self { - let mut list_arena = CacheArena::default(); - Self { - list_cache: TreeHashCache::new( - &mut list_arena, - int_log(E::ValidatorRegistryLimit::to_usize()), - validators.len(), - ), - list_arena, - values: ParallelValidatorTreeHash::new(validators), - } - } - - /// Updates the cache and returns the tree hash root for the given `state`. - /// - /// This function makes assumptions that the `validators` list will only change in accordance - /// with valid per-block/per-slot state transitions. - fn recalculate_tree_hash_root(&mut self, validators: &[Validator]) -> Result { - let mut list_arena = std::mem::take(&mut self.list_arena); - - let leaves = self.values.leaves(validators)?; - let num_leaves = leaves.iter().map(|arena| arena.len()).sum(); - - let leaves_iter = ForcedExactSizeIterator { - iter: leaves.into_iter().flatten().map(|h| h.to_fixed_bytes()), - len: num_leaves, - }; - - let list_root = self - .list_cache - .recalculate_merkle_root(&mut list_arena, leaves_iter)?; - - self.list_arena = list_arena; - - Ok(mix_in_length(&list_root, validators.len())) - } -} - -/// Provides a wrapper around some `iter` if the number of items in the iterator is known to the -/// programmer but not the compiler. This allows use of `ExactSizeIterator` in some occasions. -/// -/// Care should be taken to ensure `len` is accurate. -struct ForcedExactSizeIterator { - iter: I, - len: usize, -} - -impl> Iterator for ForcedExactSizeIterator { - type Item = V; - - fn next(&mut self) -> Option { - self.iter.next() - } -} - -impl> ExactSizeIterator for ForcedExactSizeIterator { - fn len(&self) -> usize { - self.len - } -} - -/// Provides a cache for each of the `Validator` objects in `state.validators` and computes the -/// roots of these using Rayon parallelization. -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -pub struct ParallelValidatorTreeHash { - /// Each arena and its associated sub-trees. - arenas: Vec<(CacheArena, Vec)>, -} - -impl ParallelValidatorTreeHash { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees but does perform any - /// hashing. - fn new(validators: &[Validator]) -> Self { - let num_arenas = std::cmp::max( - 1, - (validators.len() + VALIDATORS_PER_ARENA - 1) / VALIDATORS_PER_ARENA, - ); - - let mut arenas = (1..=num_arenas) - .map(|i| { - let num_validators = if i == num_arenas { - validators.len() % VALIDATORS_PER_ARENA - } else { - VALIDATORS_PER_ARENA - }; - NODES_PER_VALIDATOR * num_validators - }) - .map(|capacity| (CacheArena::with_capacity(capacity), vec![])) - .collect::>(); - - validators.iter().enumerate().for_each(|(i, v)| { - let (arena, caches) = &mut arenas[i / VALIDATORS_PER_ARENA]; - caches.push(v.new_tree_hash_cache(arena)) - }); - - Self { arenas } - } - - /// Returns the number of validators stored in self. - fn len(&self) -> usize { - self.arenas.last().map_or(0, |last| { - // Subtraction cannot underflow because `.last()` ensures the `.len() > 0`. - (self.arenas.len() - 1) * VALIDATORS_PER_ARENA + last.1.len() - }) - } - - /// Updates the caches for each `Validator` in `validators` and returns a list that maps 1:1 - /// with `validators` to the hash of each validator. - /// - /// This function makes assumptions that the `validators` list will only change in accordance - /// with valid per-block/per-slot state transitions. - fn leaves(&mut self, validators: &[Validator]) -> Result>, Error> { - match self.len().cmp(&validators.len()) { - Ordering::Less => validators.iter().skip(self.len()).for_each(|v| { - if self - .arenas - .last() - .map_or(true, |last| last.1.len() >= VALIDATORS_PER_ARENA) - { - let mut arena = CacheArena::default(); - let cache = v.new_tree_hash_cache(&mut arena); - self.arenas.push((arena, vec![cache])) - } else { - let (arena, caches) = &mut self - .arenas - .last_mut() - .expect("Cannot reach this block if arenas is empty."); - caches.push(v.new_tree_hash_cache(arena)) - } - }), - Ordering::Greater => { - return Err(Error::ValidatorRegistryShrunk); - } - Ordering::Equal => (), - } - - self.arenas - .par_iter_mut() - .enumerate() - .map(|(arena_index, (arena, caches))| { - caches - .iter_mut() - .enumerate() - .map(move |(cache_index, cache)| { - let val_index = (arena_index * VALIDATORS_PER_ARENA) + cache_index; - - let validator = validators - .get(val_index) - .ok_or(Error::TreeHashCacheInconsistent)?; - - validator - .recalculate_tree_hash_root(arena, cache) - .map_err(Error::CachedTreeHashError) - }) - .collect() - }) - .collect() - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct OptionalTreeHashCache { - inner: Option, -} - -#[derive(Debug, PartialEq, Clone)] -pub struct OptionalTreeHashCacheInner { - arena: CacheArena, - tree_hash_cache: TreeHashCache, -} - -impl OptionalTreeHashCache { - /// Initialize a new cache if `item.is_some()`. - fn new>(item: Option<&C>) -> Self { - let inner = item.map(OptionalTreeHashCacheInner::new); - Self { inner } - } - - /// Compute the tree hash root for the given `item`. - /// - /// This function will initialize the inner cache if necessary (e.g. when crossing the fork). - fn recalculate_tree_hash_root>( - &mut self, - item: &C, - ) -> Result { - let cache = self - .inner - .get_or_insert_with(|| OptionalTreeHashCacheInner::new(item)); - item.recalculate_tree_hash_root(&mut cache.arena, &mut cache.tree_hash_cache) - .map_err(Into::into) - } -} - -impl OptionalTreeHashCacheInner { - fn new>(item: &C) -> Self { - let mut arena = CacheArena::default(); - let tree_hash_cache = item.new_tree_hash_cache(&mut arena); - OptionalTreeHashCacheInner { - arena, - tree_hash_cache, - } - } -} - -impl arbitrary::Arbitrary<'_> for BeaconTreeHashCache { - fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - Ok(Self::default()) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{MainnetEthSpec, ParticipationFlags}; - - #[test] - fn validator_node_count() { - let mut arena = CacheArena::default(); - let v = Validator::default(); - let _cache = v.new_tree_hash_cache(&mut arena); - assert_eq!(arena.backing_len(), NODES_PER_VALIDATOR); - } - - #[test] - fn participation_flags() { - type N = ::ValidatorRegistryLimit; - let len = 65; - let mut test_flag = ParticipationFlags::default(); - test_flag.add_flag(0).unwrap(); - let epoch_participation = VariableList::<_, N>::new(vec![test_flag; len]).unwrap(); - - let mut cache = OptionalTreeHashCache { inner: None }; - - let cache_root = cache - .recalculate_tree_hash_root(&ParticipationList::new(&epoch_participation)) - .unwrap(); - let recalc_root = cache - .recalculate_tree_hash_root(&ParticipationList::new(&epoch_participation)) - .unwrap(); - - assert_eq!(cache_root, recalc_root, "recalculated root should match"); - assert_eq!( - cache_root, - epoch_participation.tree_hash_root(), - "cached root should match uncached" - ); - } -} diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 31b1307aa7f..e54bc2f4f97 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{ beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, - EthSpec, Hash256, SignedBeaconBlockHeader, Slot, + EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, }; use crate::{KzgProofs, SignedBeaconBlock}; use bls::Signature; @@ -16,7 +16,6 @@ use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::{FixedVector, VariableList}; use std::fmt::Debug; use std::hash::Hash; use std::sync::Arc; diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 3d0b0aca41d..02850304f1d 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -32,7 +32,8 @@ use tree_hash_derive::TreeHash; tree_hash(enum_behaviour = "transparent") ), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + map_ref_into(ExecutionPayloadHeader) )] #[derive( Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, @@ -364,6 +365,27 @@ impl TryFrom> for ExecutionPayloadHeaderDe } } +impl<'a, E: EthSpec> ExecutionPayloadHeaderRefMut<'a, E> { + /// Mutate through + pub fn replace(self, header: ExecutionPayloadHeader) -> Result<(), BeaconStateError> { + match self { + ExecutionPayloadHeaderRefMut::Merge(mut_ref) => { + *mut_ref = header.try_into()?; + } + ExecutionPayloadHeaderRefMut::Capella(mut_ref) => { + *mut_ref = header.try_into()?; + } + ExecutionPayloadHeaderRefMut::Deneb(mut_ref) => { + *mut_ref = header.try_into()?; + } + ExecutionPayloadHeaderRefMut::Electra(mut_ref) => { + *mut_ref = header.try_into()?; + } + } + Ok(()) + } +} + impl TryFrom> for ExecutionPayloadHeaderElectra { type Error = BeaconStateError; fn try_from(header: ExecutionPayloadHeader) -> Result { diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index 1c565c0092d..7bac9699eb6 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -23,8 +23,10 @@ use tree_hash_derive::TreeHash; )] #[arbitrary(bound = "E: EthSpec")] pub struct HistoricalBatch { - pub block_roots: FixedVector, - pub state_roots: FixedVector, + #[test_random(default)] + pub block_roots: Vector, + #[test_random(default)] + pub state_roots: Vector, } #[cfg(test)] diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index dee55789398..feefbc48946 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -98,7 +98,6 @@ pub mod sync_committee_contribution; pub mod sync_committee_message; pub mod sync_selection_proof; pub mod sync_subnet_id; -mod tree_hash_impls; pub mod validator_registration_data; pub mod withdrawal; @@ -132,7 +131,7 @@ pub use crate::beacon_block_body::{ }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; -pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; +pub use crate::beacon_state::{Error as BeaconStateError, *}; pub use crate::blob_sidecar::{BlobSidecar, BlobSidecarList, BlobsList}; pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; @@ -257,8 +256,7 @@ pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; - pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG}; - +pub use milhouse::{self, List, Vector}; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; pub use superstruct::superstruct; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 33fbf214c80..4d42d357c19 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -1,8 +1,7 @@ -use super::{BeaconState, EthSpec, FixedVector, Hash256, LightClientHeader, SyncCommittee}; use crate::{ - light_client_update::*, test_utils::TestRandom, ChainSpec, ForkName, ForkVersionDeserialize, - LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, SignedBeaconBlock, - Slot, + light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec, EthSpec, FixedVector, + ForkName, ForkVersionDeserialize, Hash256, LightClientHeader, LightClientHeaderAltair, + LightClientHeaderCapella, LightClientHeaderDeneb, SignedBeaconBlock, Slot, SyncCommittee, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index c5c730e8b83..5f83b0db523 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -58,6 +58,7 @@ pub struct LightClientFinalityUpdate { #[superstruct(only(Deneb), partial_getter(rename = "finalized_header_deneb"))] pub finalized_header: LightClientHeaderDeneb, /// Merkle proof attesting finalized header. + #[test_random(default)] pub finality_branch: FixedVector, /// current sync aggregate pub sync_aggregate: SyncAggregate, diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index af9cbc16610..d5e8cd592df 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -37,6 +37,7 @@ pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), + MilhouseError(milhouse::Error), BeaconStateError(beacon_state::Error), ArithError(ArithError), AltairForkNotActive, @@ -65,6 +66,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: milhouse::Error) -> Error { + Error::MilhouseError(e) + } +} + /// A LightClientUpdate is the update we request solely to either complete the bootstrapping process, /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 0adaf81bd7d..72a7a036ccc 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -87,7 +87,7 @@ where } } -impl TestRandom for FixedVector +impl TestRandom for ssz_types::FixedVector where T: TestRandom, { diff --git a/consensus/types/src/tree_hash_impls.rs b/consensus/types/src/tree_hash_impls.rs deleted file mode 100644 index eb3660d4666..00000000000 --- a/consensus/types/src/tree_hash_impls.rs +++ /dev/null @@ -1,165 +0,0 @@ -//! This module contains custom implementations of `CachedTreeHash` for ETH2-specific types. -//! -//! It makes some assumptions about the layouts and update patterns of other structs in this -//! crate, and should be updated carefully whenever those structs are changed. -use crate::{Epoch, Hash256, PublicKeyBytes, Validator}; -use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache}; -use int_to_bytes::int_to_fixed_bytes32; -use tree_hash::merkle_root; - -/// Number of struct fields on `Validator`. -const NUM_VALIDATOR_FIELDS: usize = 8; - -impl CachedTreeHash for Validator { - fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { - TreeHashCache::new(arena, int_log(NUM_VALIDATOR_FIELDS), NUM_VALIDATOR_FIELDS) - } - - /// Efficiently tree hash a `Validator`, assuming it was updated by a valid state transition. - /// - /// Specifically, we assume that the `pubkey` field is constant. - fn recalculate_tree_hash_root( - &self, - arena: &mut CacheArena, - cache: &mut TreeHashCache, - ) -> Result { - // Otherwise just check the fields which might have changed. - let dirty_indices = cache - .leaves() - .iter_mut(arena)? - .enumerate() - .flat_map(|(i, leaf)| { - // Pubkey field (index 0) is constant. - if i == 0 && cache.initialized { - None - } else if process_field_by_index(self, i, leaf, !cache.initialized) { - Some(i) - } else { - None - } - }) - .collect(); - - cache.update_merkle_root(arena, dirty_indices) - } -} - -fn process_field_by_index( - v: &Validator, - field_idx: usize, - leaf: &mut Hash256, - force_update: bool, -) -> bool { - match field_idx { - 0 => process_pubkey_bytes_field(&v.pubkey, leaf, force_update), - 1 => process_slice_field(v.withdrawal_credentials.as_bytes(), leaf, force_update), - 2 => process_u64_field(v.effective_balance, leaf, force_update), - 3 => process_bool_field(v.slashed, leaf, force_update), - 4 => process_epoch_field(v.activation_eligibility_epoch, leaf, force_update), - 5 => process_epoch_field(v.activation_epoch, leaf, force_update), - 6 => process_epoch_field(v.exit_epoch, leaf, force_update), - 7 => process_epoch_field(v.withdrawable_epoch, leaf, force_update), - _ => panic!( - "Validator type only has {} fields, {} out of bounds", - NUM_VALIDATOR_FIELDS, field_idx - ), - } -} - -fn process_pubkey_bytes_field( - val: &PublicKeyBytes, - leaf: &mut Hash256, - force_update: bool, -) -> bool { - let new_tree_hash = merkle_root(val.as_serialized(), 0); - process_slice_field(new_tree_hash.as_bytes(), leaf, force_update) -} - -fn process_slice_field(new_tree_hash: &[u8], leaf: &mut Hash256, force_update: bool) -> bool { - if force_update || leaf.as_bytes() != new_tree_hash { - leaf.assign_from_slice(new_tree_hash); - true - } else { - false - } -} - -fn process_u64_field(val: u64, leaf: &mut Hash256, force_update: bool) -> bool { - let new_tree_hash = int_to_fixed_bytes32(val); - process_slice_field(&new_tree_hash[..], leaf, force_update) -} - -fn process_epoch_field(val: Epoch, leaf: &mut Hash256, force_update: bool) -> bool { - process_u64_field(val.as_u64(), leaf, force_update) -} - -fn process_bool_field(val: bool, leaf: &mut Hash256, force_update: bool) -> bool { - process_u64_field(val as u64, leaf, force_update) -} - -#[cfg(test)] -mod test { - use super::*; - use crate::test_utils::TestRandom; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - use tree_hash::TreeHash; - - fn test_validator_tree_hash(v: &Validator) { - let arena = &mut CacheArena::default(); - - let mut cache = v.new_tree_hash_cache(arena); - // With a fresh cache - assert_eq!( - &v.tree_hash_root()[..], - v.recalculate_tree_hash_root(arena, &mut cache) - .unwrap() - .as_bytes(), - "{:?}", - v - ); - // With a completely up-to-date cache - assert_eq!( - &v.tree_hash_root()[..], - v.recalculate_tree_hash_root(arena, &mut cache) - .unwrap() - .as_bytes(), - "{:?}", - v - ); - } - - #[test] - fn default_validator() { - test_validator_tree_hash(&Validator::default()); - } - - #[test] - fn zeroed_validator() { - let v = Validator { - activation_eligibility_epoch: Epoch::from(0u64), - activation_epoch: Epoch::from(0u64), - ..Default::default() - }; - test_validator_tree_hash(&v); - } - - #[test] - fn random_validators() { - let mut rng = XorShiftRng::from_seed([0xf1; 16]); - let num_validators = 1000; - (0..num_validators) - .map(|_| Validator::random_for_test(&mut rng)) - .for_each(|v| test_validator_tree_hash(&v)); - } - - #[test] - #[allow(clippy::assertions_on_constants)] - pub fn smallvec_size_check() { - // If this test fails we need to go and reassess the length of the `SmallVec` in - // `cached_tree_hash::TreeHashCache`. If the size of the `SmallVec` is too slow we're going - // to start doing heap allocations for each validator, this will fragment memory and slow - // us down. - assert!(NUM_VALIDATOR_FIELDS <= 8,); - } -} diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index f9da3d2b3e9..edba4249966 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -264,7 +264,7 @@ fn initialize_state_with_validators( let mut state = BeaconState::new(genesis_time, eth1_data, spec); // Seed RANDAO with Eth1 entropy - state.fill_randao_mixes_with(eth1_block_hash); + state.fill_randao_mixes_with(eth1_block_hash).unwrap(); for keypair in keypairs.iter() { let withdrawal_credentials = |pubkey: &PublicKey| { diff --git a/lcli/src/replace_state_pubkeys.rs b/lcli/src/replace_state_pubkeys.rs index 0f9fac3aff9..e8d012b16ec 100644 --- a/lcli/src/replace_state_pubkeys.rs +++ b/lcli/src/replace_state_pubkeys.rs @@ -42,7 +42,8 @@ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); let mut deposit_root = Hash256::zero(); - for (index, validator) in state.validators_mut().iter_mut().enumerate() { + let validators = state.validators_mut(); + for index in 0..validators.len() { let (secret, _) = recover_validator_secret_from_mnemonic(seed.as_bytes(), index as u32, KeyType::Voting) .map_err(|e| format!("Unable to generate validator key: {:?}", e))?; @@ -52,11 +53,11 @@ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), eprintln!("{}: {}", index, keypair.pk); - validator.pubkey = keypair.pk.into(); + validators.get_mut(index).unwrap().pubkey = keypair.pk.into(); // Update the deposit tree. let mut deposit_data = DepositData { - pubkey: validator.pubkey, + pubkey: validators.get(index).unwrap().pubkey, // Set this to a junk value since it's very time consuming to generate the withdrawal // keys and it's not useful for the time being. withdrawal_credentials: Hash256::zero(), diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 9e5da7709f1..d421c077d83 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -57,7 +57,7 @@ use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use std::time::{Duration, Instant}; -use types::{BeaconState, CloneConfig, EthSpec, Hash256}; +use types::{BeaconState, EthSpec, Hash256}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); @@ -128,7 +128,7 @@ pub fn run( }; for i in 0..runs { - let mut state = state.clone_with(CloneConfig::all()); + let mut state = state.clone(); let start = Instant::now(); diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index c72b41b1d44..77fd352829f 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -76,7 +76,7 @@ use ssz::Encode; use state_processing::state_advance::complete_state_advance; use state_processing::{ block_signature_verifier::BlockSignatureVerifier, per_block_processing, AllCaches, - BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs::File; @@ -85,7 +85,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::{Duration, Instant}; use store::HotColdDB; -use types::{BeaconState, ChainSpec, CloneConfig, EthSpec, Hash256, SignedBeaconBlock}; +use types::{BeaconState, ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); @@ -234,7 +234,7 @@ pub fn run( let mut output_post_state = None; let mut saved_ctxt = None; for i in 0..runs { - let pre_state = pre_state.clone_with(CloneConfig::all()); + let pre_state = pre_state.clone(); let block = block.clone(); let start = Instant::now(); @@ -399,7 +399,6 @@ fn do_transition( &mut pre_state, &block, BlockSignatureStrategy::NoVerification, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index ec10ff4429d..9983717cb1e 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -172,26 +172,6 @@ fn shuffling_cache_set() { .with_config(|config| assert_eq!(config.chain.shuffling_cache_size, 500)); } -#[test] -fn snapshot_cache_default() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.chain.snapshot_cache_size, - beacon_node::beacon_chain::snapshot_cache::DEFAULT_SNAPSHOT_CACHE_SIZE - ) - }); -} - -#[test] -fn snapshot_cache_set() { - CommandLineTest::new() - .flag("state-cache-size", Some("500")) - .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.snapshot_cache_size, 500)); -} - #[test] fn fork_choice_before_proposal_timeout_default() { CommandLineTest::new() diff --git a/testing/ef_tests/src/case_result.rs b/testing/ef_tests/src/case_result.rs index 67ab9c51bbf..c511d9a1ca0 100644 --- a/testing/ef_tests/src/case_result.rs +++ b/testing/ef_tests/src/case_result.rs @@ -39,6 +39,9 @@ pub fn compare_beacon_state_results_without_caches( if let (Ok(ref mut result), Some(ref mut expected)) = (result.as_mut(), expected.as_mut()) { result.drop_all_caches().unwrap(); expected.drop_all_caches().unwrap(); + + result.apply_pending_mutations().unwrap(); + expected.apply_pending_mutations().unwrap(); } compare_result_detailed(result, expected) diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index a2e831ade59..cf0b9f77c8f 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -51,7 +51,7 @@ impl LoadCase for MerkleProofValidity { impl Case for MerkleProofValidity { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut state = self.state.clone(); - state.initialize_tree_hash_cache(); + state.update_tree_hash_cache().unwrap(); let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else { return Err(Error::FailedToParseTest( "Could not retrieve merkle proof".to_string(), @@ -77,9 +77,6 @@ impl Case for MerkleProofValidity { } } - // Tree hash cache should still be initialized (not dropped). - assert!(state.tree_hash_cache().is_initialized()); - Ok(()) } } diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index b0902cb5b74..91bb995cc43 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -5,7 +5,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, - ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, + ConsensusContext, VerifyBlockRoot, }; use types::{BeaconState, RelativeEpoch, SignedBeaconBlock}; @@ -96,7 +96,6 @@ impl Case for SanityBlocks { &mut indiv_state, signed_block, BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, @@ -107,7 +106,6 @@ impl Case for SanityBlocks { &mut bulk_state, signed_block, BlockSignatureStrategy::VerifyBulk, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index bb2465aae10..8de3e217f00 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -1,14 +1,14 @@ #![allow(non_snake_case)] use super::*; -use crate::cases::common::{TestU128, TestU256}; -use crate::decode::{snappy_decode_file, yaml_decode_file}; -use serde::Deserialize; -use serde::{de::Error as SerdeError, Deserializer}; +use crate::cases::common::{SszStaticType, TestU128, TestU256}; +use crate::cases::ssz_static::{check_serialization, check_tree_hash}; +use crate::decode::{log_file_access, snappy_decode_file, yaml_decode_file}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer}; use ssz_derive::{Decode, Encode}; use tree_hash_derive::TreeHash; use types::typenum::*; -use types::{BitList, BitVector, FixedVector, VariableList}; +use types::{BitList, BitVector, FixedVector, ForkName, VariableList, Vector}; #[derive(Debug, Clone, Deserialize)] struct Metadata { @@ -125,6 +125,20 @@ impl Case for SszGeneric { let elem_ty = parts[1]; let length = parts[2]; + // Skip length 0 tests. Milhouse doesn't have any checks against 0-capacity lists. + if length == "0" { + log_file_access(self.path.join("serialized.ssz_snappy")); + return Ok(()); + } + + type_dispatch!( + ssz_generic_test, + (&self.path), + Vector, + <>, + [elem_ty => primitive_type] + [length => typenum] + )?; type_dispatch!( ssz_generic_test, (&self.path), @@ -263,8 +277,8 @@ struct ComplexTestStruct { #[serde(deserialize_with = "byte_list_from_hex_str")] D: VariableList, E: VarTestStruct, - F: FixedVector, - G: FixedVector, + F: Vector, + G: Vector, } #[derive(Debug, Clone, PartialEq, Decode, Encode, TreeHash, Deserialize)] diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index e41c90c6e03..5f0ac3525c4 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -118,7 +118,6 @@ impl Case for SszStaticTHC> { check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; let mut state = self.value.clone(); - state.initialize_tree_hash_cache(); let cached_tree_hash_root = state.update_tree_hash_cache().unwrap(); check_tree_hash(&self.roots.root, cached_tree_hash_root.as_bytes())?; diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 927589948a2..b2c49a96feb 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -4,7 +4,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use state_processing::{ per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, - ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, + ConsensusContext, VerifyBlockRoot, }; use std::str::FromStr; use types::{BeaconState, Epoch, SignedBeaconBlock}; @@ -114,7 +114,6 @@ impl Case for TransitionTest { &mut state, block, BlockSignatureStrategy::VerifyBulk, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index e3cd346da13..61cae6dbe1b 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -1,7 +1,7 @@ use super::*; use state_processing::{ per_block_processing, per_block_processing::errors::ExitInvalid, BlockProcessingError, - BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use types::{BeaconBlock, Epoch}; @@ -68,7 +68,6 @@ impl ExitTest { state, block, BlockSignatureStrategy::VerifyIndividual, - StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &E::default_spec(), @@ -332,7 +331,7 @@ mod custom_tests { fn assert_exited(state: &BeaconState, validator_index: usize) { let spec = E::default_spec(); - let validator = &state.validators()[validator_index]; + let validator = &state.validators().get(validator_index).unwrap(); assert_eq!( validator.exit_epoch, // This is correct until we exceed the churn limit. If that happens, we From c4a2bcb9c79828fcbb3c204306c986d5b2e5a246 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 24 Apr 2024 02:02:10 -0400 Subject: [PATCH 07/16] Yaml rust2 (#5635) * use yaml-rust2 --- Cargo.lock | 424 +++--------------------------- consensus/int_to_bytes/Cargo.toml | 2 +- consensus/int_to_bytes/src/lib.rs | 10 +- 3 files changed, 43 insertions(+), 393 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d63773d9a94..bee644e2d79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,7 @@ version = "0.3.5" dependencies = [ "account_utils", "bls", - "clap 2.34.0", + "clap", "clap_utils", "directory", "environment", @@ -292,54 +292,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "anstream" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" - -[[package]] -name = "anstyle-parse" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" -dependencies = [ - "anstyle", - "windows-sys 0.52.0", -] - [[package]] name = "anyhow" version = "1.0.82" @@ -494,6 +446,12 @@ dependencies = [ "rand", ] +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + [[package]] name = "arrayref" version = "0.3.7" @@ -856,7 +814,7 @@ name = "beacon_node" version = "5.1.3" dependencies = [ "beacon_chain", - "clap 2.34.0", + "clap", "clap_utils", "client", "directory", @@ -1069,7 +1027,7 @@ name = "boot_node" version = "5.1.3" dependencies = [ "beacon_node", - "clap 2.34.0", + "clap", "clap_utils", "eth2_network_config", "ethereum_ssz", @@ -1345,38 +1303,11 @@ dependencies = [ "vec_map", ] -[[package]] -name = "clap" -version = "4.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" -dependencies = [ - "clap_builder", -] - -[[package]] -name = "clap_builder" -version = "4.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim 0.11.1", -] - -[[package]] -name = "clap_lex" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" - [[package]] name = "clap_utils" version = "0.1.0" dependencies = [ - "clap 2.34.0", + "clap", "dirs", "eth2_network_config", "ethereum-types 0.14.1", @@ -1442,12 +1373,6 @@ dependencies = [ "cc", ] -[[package]] -name = "colorchoice" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" - [[package]] name = "compare_fields" version = "0.2.0" @@ -1473,19 +1398,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "console" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" -dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "unicode-width", - "windows-sys 0.52.0", -] - [[package]] name = "const-hex" version = "1.11.3" @@ -1568,7 +1480,7 @@ checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", - "clap 2.34.0", + "clap", "criterion-plot", "csv", "itertools", @@ -1783,18 +1695,8 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core 0.14.4", - "darling_macro 0.14.4", + "darling_core", + "darling_macro", ] [[package]] @@ -1811,38 +1713,13 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core 0.14.4", + "darling_core", "quote", "syn 1.0.109", ] @@ -1905,7 +1782,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "beacon_node", - "clap 2.34.0", + "clap", "clap_utils", "environment", "hex", @@ -2014,37 +1891,6 @@ dependencies = [ "syn 2.0.60", ] -[[package]] -name = "derive_builder" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" -dependencies = [ - "darling 0.14.4", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_builder_macro" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" -dependencies = [ - "derive_builder_core", - "syn 1.0.109", -] - [[package]] name = "derive_more" version = "0.99.17" @@ -2058,19 +1904,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "dialoguer" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" -dependencies = [ - "console", - "shell-words", - "tempfile", - "thiserror", - "zeroize", -] - [[package]] name = "diesel" version = "2.1.6" @@ -2142,7 +1975,7 @@ dependencies = [ name = "directory" version = "0.1.0" dependencies = [ - "clap 2.34.0", + "clap", "clap_utils", "eth2_network_config", ] @@ -2236,29 +2069,6 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" -[[package]] -name = "dtt" -version = "0.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b2dd9ee2d76888dc4c17d6da74629fa11b3cb1e8094fdc159b7f8ff259fc88" -dependencies = [ - "regex", - "serde", - "time", -] - -[[package]] -name = "duct" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ab5718d1224b63252cd0c6f74f6480f9ffeb117438a2e0f5cf6d9a4798929c" -dependencies = [ - "libc", - "once_cell", - "os_pipe", - "shared_child", -] - [[package]] name = "dunce" version = "1.0.4" @@ -2396,12 +2206,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - [[package]] name = "encoding_rs" version = "0.8.34" @@ -2442,16 +2246,6 @@ dependencies = [ "syn 2.0.60", ] -[[package]] -name = "env_filter" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" -dependencies = [ - "log", - "regex", -] - [[package]] name = "env_logger" version = "0.8.4" @@ -2475,19 +2269,6 @@ dependencies = [ "termcolor", ] -[[package]] -name = "env_logger" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" -dependencies = [ - "anstream", - "anstyle", - "env_filter", - "humantime", - "log", -] - [[package]] name = "environment" version = "0.1.2" @@ -2675,7 +2456,7 @@ dependencies = [ "sha2 0.9.9", "tempfile", "unicode-normalization", - "uuid 0.8.2", + "uuid", "zeroize", ] @@ -2715,7 +2496,7 @@ dependencies = [ "serde_repr", "tempfile", "tiny-bip39", - "uuid 0.8.2", + "uuid", ] [[package]] @@ -2859,7 +2640,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6085d7fd3cf84bd2b8fec150d54c8467fb491d8db9c460607c5534f653a0ee38" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", "syn 1.0.109", @@ -3172,12 +2953,6 @@ dependencies = [ "rustc_version 0.4.0", ] -[[package]] -name = "figlet-rs" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4742a071cd9694fc86f9fa1a08fa3e53d40cc899d7ee532295da2d085639fbc5" - [[package]] name = "filesystem" version = "0.1.0" @@ -3283,12 +3058,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "fs_extra" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" - [[package]] name = "funty" version = "1.1.0" @@ -4286,7 +4055,7 @@ version = "0.2.0" dependencies = [ "bytes", "hex", - "serde_yml", + "yaml-rust2", ] [[package]] @@ -4510,7 +4279,7 @@ dependencies = [ "account_utils", "beacon_chain", "bls", - "clap 2.34.0", + "clap", "clap_utils", "deposit_contract", "directory", @@ -5088,7 +4857,7 @@ dependencies = [ "beacon_processor", "bls", "boot_node", - "clap 2.34.0", + "clap", "clap_utils", "database_manager", "directory", @@ -5436,7 +5205,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37cb4045d5677b7da537f8cb5d0730d5b6414e3cc81c61e4b50e1f0cbdc73909" dependencies = [ - "darling 0.13.4", + "darling", "itertools", "proc-macro2", "quote", @@ -6044,16 +5813,6 @@ dependencies = [ "types", ] -[[package]] -name = "os_pipe" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57119c3b893986491ec9aa85056780d3a0f3cf4da7cc09dd3650dbd6c6738fb9" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - [[package]] name = "overload" version = "0.1.1" @@ -7086,30 +6845,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" -[[package]] -name = "rlg" -version = "0.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ccf670238310d5c31a52fed1a3314620d037a64f1e5fbdc71b2c50909134dc" -dependencies = [ - "dtt", - "tokio", - "vrd 0.0.4", -] - -[[package]] -name = "rlg" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e02c717e23f67b23032a4acb01cf63534d6259938d592e6d2451c02f09fc368" -dependencies = [ - "dtt", - "hostname", - "serde_json", - "tokio", - "vrd 0.0.5", -] - [[package]] name = "rlp" version = "0.5.2" @@ -7687,7 +7422,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", "syn 1.0.109", @@ -7706,27 +7441,6 @@ dependencies = [ "unsafe-libyaml", ] -[[package]] -name = "serde_yml" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196c3750ff0411738366b0d9534ca55fc74d04a3d4284a450039de950bd11938" -dependencies = [ - "dtt", - "env_logger 0.11.3", - "figlet-rs", - "indexmap 2.2.6", - "itoa", - "log", - "openssl", - "rlg 0.0.3", - "ryu", - "serde", - "unsafe-libyaml", - "uuid 1.8.0", - "xtasks", -] - [[package]] name = "sha1" version = "0.10.6" @@ -7803,22 +7517,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shared_child" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d94659ad3c2137fef23ae75b03d5241d633f8acded53d672decfa0e6e0caef" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "shell-words" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" - [[package]] name = "shlex" version = "1.3.0" @@ -7870,7 +7568,7 @@ dependencies = [ name = "simulator" version = "0.2.0" dependencies = [ - "clap 2.34.0", + "clap", "env_logger 0.9.3", "eth1", "eth2_network_config", @@ -8282,12 +7980,6 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - [[package]] name = "strum" version = "0.24.1" @@ -8322,7 +8014,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f4e1f478a7728f8855d7e620e9a152cf8932c6614f86564c886f9b8141f3201" dependencies = [ - "darling 0.13.4", + "darling", "itertools", "proc-macro2", "quote", @@ -8691,7 +8383,6 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", "socket2 0.5.6", @@ -9022,7 +8713,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce7bccc538359a213436af7bc95804bdbf1c2a21d80e22953cbe9e096837ff1" dependencies = [ - "darling 0.13.4", + "darling", "quote", "syn 1.0.109", ] @@ -9270,12 +8961,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "utf8parse" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" - [[package]] name = "uuid" version = "0.8.2" @@ -9286,15 +8971,6 @@ dependencies = [ "serde", ] -[[package]] -name = "uuid" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" -dependencies = [ - "getrandom", -] - [[package]] name = "validator_client" version = "0.3.5" @@ -9302,7 +8978,7 @@ dependencies = [ "account_utils", "bincode", "bls", - "clap 2.34.0", + "clap", "clap_utils", "deposit_contract", "directory", @@ -9374,7 +9050,7 @@ version = "0.1.0" dependencies = [ "account_utils", "bls", - "clap 2.34.0", + "clap", "clap_utils", "environment", "eth2", @@ -9423,25 +9099,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -[[package]] -name = "vrd" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81b8b5b404f3d7afa1b8142a6bc980c20cd68556c634c3db517871aa0402521" -dependencies = [ - "rand", -] - -[[package]] -name = "vrd" -version = "0.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee1067b8d17481f5be71b59d11c329e955ffe36348907e0a4a41b619682bb4af" -dependencies = [ - "rand", - "serde", -] - [[package]] name = "wait-timeout" version = "0.2.0" @@ -9634,7 +9291,7 @@ dependencies = [ "beacon_node", "bls", "byteorder", - "clap 2.34.0", + "clap", "diesel", "diesel_migrations", "env_logger 0.9.3", @@ -10099,23 +9756,14 @@ dependencies = [ ] [[package]] -name = "xtasks" -version = "0.0.2" +name = "yaml-rust2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "940db5674e301470e6cd91098b2c68a1fad751a1623575d1133f7456146e6d2f" +checksum = "498f4d102a79ea1c9d4dd27573c0fc96ad74c023e8da38484e47883076da25fb" dependencies = [ - "anyhow", - "clap 4.5.4", - "derive_builder", - "dialoguer", - "dtt", - "duct", - "fs_extra", - "glob", - "rlg 0.0.2", - "serde", - "serde_json", - "vrd 0.0.5", + "arraydeque", + "encoding_rs", + "hashlink", ] [[package]] diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index 1958fac6e56..e99d1af8e56 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } bytes = { workspace = true } [dev-dependencies] +yaml-rust2 = "0.8" hex = { workspace = true } -serde_yml = "0.0.4" diff --git a/consensus/int_to_bytes/src/lib.rs b/consensus/int_to_bytes/src/lib.rs index 076e7154647..7a1fff7ccd3 100644 --- a/consensus/int_to_bytes/src/lib.rs +++ b/consensus/int_to_bytes/src/lib.rs @@ -78,7 +78,8 @@ pub fn int_to_bytes96(int: u64) -> Vec { #[cfg(test)] mod tests { use super::*; - use std::{collections::HashMap, fs::File, io::prelude::*, path::PathBuf}; + use std::{fs::File, io::prelude::*, path::PathBuf}; + use yaml_rust2::yaml; #[test] fn fixed_bytes32() { @@ -110,13 +111,14 @@ mod tests { file.read_to_string(&mut yaml_str).unwrap(); - let docs: HashMap = serde_yml::from_str(&yaml_str).unwrap(); - let test_cases = docs["test_cases"].as_sequence().unwrap(); + let docs = yaml::YamlLoader::load_from_str(&yaml_str).unwrap(); + let doc = &docs[0]; + let test_cases = doc["test_cases"].as_vec().unwrap(); for test_case in test_cases { let byte_length = test_case["byte_length"].as_i64().unwrap() as u64; let int = test_case["int"].as_i64().unwrap() as u64; - let bytes_string = test_case["bytes"].as_str().unwrap(); + let bytes_string = test_case["bytes"].clone().into_string().unwrap(); let bytes = hex::decode(bytes_string.replace("0x", "")).unwrap(); match byte_length { From 1eaaa4a8bd15f505d37a43a4a11333ea2a64c93f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 24 Apr 2024 16:02:42 +1000 Subject: [PATCH 08/16] Bump jobserver and fix non-portable builds (#5641) * Bump jobserver and fix non-portable builds --- Cargo.lock | 5 ++--- Cargo.toml | 4 +++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bee644e2d79..335491adc1c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1133,7 +1133,6 @@ dependencies = [ "glob", "hex", "libc", - "serde", ] [[package]] @@ -4155,9 +4154,9 @@ dependencies = [ [[package]] name = "jobserver" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 296dadc1f66..be2011ba286 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,7 +102,9 @@ bincode = "1" bitvec = "1" byteorder = "1" bytes = "1" -c-kzg = "1" +# Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable +# feature ourselves when desired. +c-kzg = { version = "1", default-features = false } clap = "2" compare_fields_derive = { path = "common/compare_fields_derive" } criterion = "0.3" From c38b05d640bfddc311e44e37cb06962fe5c9c79d Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 24 Apr 2024 15:02:45 +0900 Subject: [PATCH 09/16] Drop lookup type trait for a simple arg (#5620) * Drop lookup type trait for a simple arg --- .../network/src/sync/block_lookups/common.rs | 63 +++---- .../network/src/sync/block_lookups/mod.rs | 40 ++-- .../src/sync/block_lookups/parent_lookup.rs | 7 +- .../sync/block_lookups/single_block_lookup.rs | 172 ++---------------- beacon_node/network/src/sync/manager.rs | 21 +-- 5 files changed, 73 insertions(+), 230 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 3bd39301b21..7193dd6e216 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -29,34 +29,12 @@ pub enum LookupType { Parent, } -/// This trait helps differentiate `SingleBlockLookup`s from `ParentLookup`s .This is useful in -/// ensuring requests and responses are handled separately and enables us to use different failure -/// tolerances for each, while re-using the same basic request and retry logic. -pub trait Lookup { - const MAX_ATTEMPTS: u8; - fn lookup_type() -> LookupType; - fn max_attempts() -> u8 { - Self::MAX_ATTEMPTS - } -} - -/// A `Lookup` that is a part of a `ParentLookup`. -pub struct Parent; - -impl Lookup for Parent { - const MAX_ATTEMPTS: u8 = PARENT_FAIL_TOLERANCE; - fn lookup_type() -> LookupType { - LookupType::Parent - } -} - -/// A `Lookup` that part of a single block lookup. -pub struct Current; - -impl Lookup for Current { - const MAX_ATTEMPTS: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; - fn lookup_type() -> LookupType { - LookupType::Current +impl LookupType { + fn max_attempts(&self) -> u8 { + match self { + LookupType::Current => SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, + LookupType::Parent => PARENT_FAIL_TOLERANCE, + } } } @@ -68,7 +46,7 @@ impl Lookup for Current { /// The use of the `ResponseType` associated type gives us a degree of type /// safety when handling a block/blob response ensuring we only mutate the correct corresponding /// state. -pub trait RequestState { +pub trait RequestState { /// The type of the request . type RequestType; @@ -81,9 +59,12 @@ pub trait RequestState { /* Request building methods */ /// Construct a new request. - fn build_request(&mut self) -> Result<(PeerId, Self::RequestType), LookupRequestError> { + fn build_request( + &mut self, + lookup_type: LookupType, + ) -> Result<(PeerId, Self::RequestType), LookupRequestError> { // Verify and construct request. - self.too_many_attempts()?; + self.too_many_attempts(lookup_type)?; let peer = self.get_peer()?; let request = self.new_request(); Ok((peer, request)) @@ -93,6 +74,7 @@ pub trait RequestState { fn build_request_and_send( &mut self, id: Id, + lookup_type: LookupType, cx: &mut SyncNetworkContext, ) -> Result<(), LookupRequestError> { // Check if request is necessary. @@ -101,7 +83,7 @@ pub trait RequestState { } // Construct request. - let (peer_id, request) = self.build_request()?; + let (peer_id, request) = self.build_request(lookup_type)?; // Update request state. let req_counter = self.get_state_mut().on_download_start(peer_id); @@ -110,17 +92,16 @@ pub trait RequestState { let id = SingleLookupReqId { id, req_counter, - lookup_type: L::lookup_type(), + lookup_type, }; Self::make_request(id, peer_id, request, cx) } /// Verify the current request has not exceeded the maximum number of attempts. - fn too_many_attempts(&self) -> Result<(), LookupRequestError> { - let max_attempts = L::max_attempts(); + fn too_many_attempts(&self, lookup_type: LookupType) -> Result<(), LookupRequestError> { let request_state = self.get_state(); - if request_state.failed_attempts() >= max_attempts { + if request_state.failed_attempts() >= lookup_type.max_attempts() { let cannot_process = request_state.more_failed_processing_attempts(); Err(LookupRequestError::TooManyAttempts { cannot_process }) } else { @@ -187,7 +168,7 @@ pub trait RequestState { fn response_type() -> ResponseType; /// A getter for the `BlockRequestState` or `BlobRequestState` associated with this trait. - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; /// A getter for a reference to the `SingleLookupRequestState` associated with this trait. fn get_state(&self) -> &SingleLookupRequestState; @@ -196,7 +177,7 @@ pub trait RequestState { fn get_state_mut(&mut self) -> &mut SingleLookupRequestState; } -impl RequestState for BlockRequestState { +impl RequestState for BlockRequestState { type RequestType = BlocksByRootSingleRequest; type VerifiedResponseType = Arc>; type ReconstructedResponseType = RpcBlock; @@ -253,7 +234,7 @@ impl RequestState for BlockRequestState fn response_type() -> ResponseType { ResponseType::Block } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { &mut request.block_request_state } fn get_state(&self) -> &SingleLookupRequestState { @@ -264,7 +245,7 @@ impl RequestState for BlockRequestState } } -impl RequestState for BlobRequestState { +impl RequestState for BlobRequestState { type RequestType = BlobsByRootSingleBlockRequest; type VerifiedResponseType = FixedBlobSidecarList; type ReconstructedResponseType = FixedBlobSidecarList; @@ -328,7 +309,7 @@ impl RequestState for BlobRequestState ResponseType { ResponseType::Blob } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { &mut request.blob_request_state } fn get_state(&self) -> &SingleLookupRequestState { diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index fa2683fb0f0..a2909b49dd1 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -16,9 +16,6 @@ use beacon_chain::data_availability_checker::{ }; use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; -pub use common::Current; -pub use common::Lookup; -pub use common::Parent; pub use common::RequestState; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; @@ -55,12 +52,12 @@ pub struct BlockLookups { /// Parent chain lookups being downloaded. parent_lookups: SmallVec<[ParentLookup; 3]>, - processing_parent_lookups: HashMap, SingleBlockLookup)>, + processing_parent_lookups: HashMap, SingleBlockLookup)>, /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, - single_block_lookups: FnvHashMap>, + single_block_lookups: FnvHashMap>, pub(crate) da_checker: Arc>, @@ -131,7 +128,7 @@ impl BlockLookups { /// Attempts to trigger the request matching the given `block_root`. pub fn trigger_single_lookup( &mut self, - mut single_block_lookup: SingleBlockLookup, + mut single_block_lookup: SingleBlockLookup, cx: &mut SyncNetworkContext, ) { let block_root = single_block_lookup.block_root(); @@ -147,7 +144,7 @@ impl BlockLookups { } /// Adds a lookup to the `single_block_lookups` map. - pub fn add_single_lookup(&mut self, single_block_lookup: SingleBlockLookup) { + pub fn add_single_lookup(&mut self, single_block_lookup: SingleBlockLookup) { self.single_block_lookups .insert(single_block_lookup.id, single_block_lookup); @@ -212,6 +209,7 @@ impl BlockLookups { peers, self.da_checker.clone(), cx.next_id(), + LookupType::Current, ); debug!( @@ -284,10 +282,10 @@ impl BlockLookups { /// Get a single block lookup by its ID. This method additionally ensures the `req_counter` /// matches the current `req_counter` for the lookup. This ensures any stale responses from requests /// that have been retried are ignored. - fn get_single_lookup>( + fn get_single_lookup>( &mut self, id: SingleLookupReqId, - ) -> Option> { + ) -> Option> { let mut lookup = self.single_block_lookups.remove(&id.id)?; let request_state = R::request_state_mut(&mut lookup); @@ -314,7 +312,7 @@ impl BlockLookups { } /// Process a block or blob response received from a single lookup request. - pub fn single_lookup_response>( + pub fn single_lookup_response>( &mut self, lookup_id: SingleLookupReqId, peer_id: PeerId, @@ -345,7 +343,7 @@ impl BlockLookups { "response_type" => ?response_type, ); - match self.handle_verified_response::( + match self.handle_verified_response::( seen_timestamp, cx, BlockProcessType::SingleBlock { id: lookup.id }, @@ -372,13 +370,13 @@ impl BlockLookups { /// Consolidates error handling for `single_lookup_response`. An `Err` here should always mean /// the lookup is dropped. - fn handle_verified_response>( + fn handle_verified_response>( &self, seen_timestamp: Duration, cx: &mut SyncNetworkContext, process_type: BlockProcessType, verified_response: R::VerifiedResponseType, - lookup: &mut SingleBlockLookup, + lookup: &mut SingleBlockLookup, ) -> Result<(), LookupRequestError> { let id = lookup.id; let block_root = lookup.block_root(); @@ -389,7 +387,7 @@ impl BlockLookups { // If we have an outstanding parent request for this block, delay sending the response until // all parent blocks have been processed, otherwise we will fail validation with an // `UnknownParent`. - let delay_send = match L::lookup_type() { + let delay_send = match lookup.lookup_type { LookupType::Parent => false, LookupType::Current => self.has_pending_parent_request(lookup.block_root()), }; @@ -453,7 +451,7 @@ impl BlockLookups { /// Get a parent block lookup by its ID. This method additionally ensures the `req_counter` /// matches the current `req_counter` for the lookup. This any stale responses from requests /// that have been retried are ignored. - fn get_parent_lookup>( + fn get_parent_lookup>( &mut self, id: SingleLookupReqId, ) -> Option> { @@ -479,7 +477,7 @@ impl BlockLookups { } /// Process a response received from a parent lookup request. - pub fn parent_lookup_response>( + pub fn parent_lookup_response>( &mut self, id: SingleLookupReqId, peer_id: PeerId, @@ -523,7 +521,7 @@ impl BlockLookups { /// Consolidates error handling for `parent_lookup_response`. An `Err` here should always mean /// the lookup is dropped. - fn parent_lookup_response_inner>( + fn parent_lookup_response_inner>( &mut self, peer_id: PeerId, response: R::VerifiedResponseType, @@ -554,7 +552,7 @@ impl BlockLookups { } } - self.handle_verified_response::( + self.handle_verified_response::( seen_timestamp, cx, BlockProcessType::ParentLookup { @@ -633,7 +631,7 @@ impl BlockLookups { } /// An RPC error has occurred during a parent lookup. This function handles this case. - pub fn parent_lookup_failed>( + pub fn parent_lookup_failed>( &mut self, id: SingleLookupReqId, peer_id: &PeerId, @@ -669,7 +667,7 @@ impl BlockLookups { } /// An RPC error has occurred during a single lookup. This function handles this case.\ - pub fn single_block_lookup_failed>( + pub fn single_block_lookup_failed>( &mut self, id: SingleLookupReqId, peer_id: &PeerId, @@ -717,7 +715,7 @@ impl BlockLookups { /* Processing responses */ - pub fn single_block_component_processed>( + pub fn single_block_component_processed>( &mut self, target_id: Id, result: BlockProcessingResult, diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index b7a71860bff..11eb908953f 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,6 +1,6 @@ +use super::common::LookupType; use super::single_block_lookup::{LookupRequestError, SingleBlockLookup}; use super::{DownloadedBlock, PeerId}; -use crate::sync::block_lookups::common::Parent; use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::block_verification_types::RpcBlock; @@ -24,7 +24,7 @@ pub(crate) struct ParentLookup { /// The blocks that have currently been downloaded. downloaded_blocks: Vec>, /// Request of the last parent. - pub current_parent_request: SingleBlockLookup, + pub current_parent_request: SingleBlockLookup, } #[derive(Debug, PartialEq, Eq)] @@ -55,6 +55,7 @@ impl ParentLookup { &[peer_id], da_checker, cx.next_id(), + LookupType::Parent, ); Self { @@ -132,7 +133,7 @@ impl ParentLookup { Hash256, VecDeque>, Vec, - SingleBlockLookup, + SingleBlockLookup, ) { let ParentLookup { chain_hash, diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 5bb663967d7..077af7c3d19 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,5 +1,6 @@ +use super::common::LookupType; use super::PeerId; -use crate::sync::block_lookups::common::{Lookup, RequestState}; +use crate::sync::block_lookups::common::RequestState; use crate::sync::block_lookups::Id; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::block_verification_types::RpcBlock; @@ -14,7 +15,6 @@ use rand::seq::IteratorRandom; use slog::{debug, Logger}; use std::collections::HashSet; use std::fmt::Debug; -use std::marker::PhantomData; use std::sync::Arc; use store::Hash256; use strum::IntoStaticStr; @@ -33,27 +33,30 @@ pub enum LookupRequestError { BadState(String), } -pub struct SingleBlockLookup { +pub struct SingleBlockLookup { pub id: Id, - pub block_request_state: BlockRequestState, - pub blob_request_state: BlobRequestState, + pub lookup_type: LookupType, + pub block_request_state: BlockRequestState, + pub blob_request_state: BlobRequestState, pub da_checker: Arc>, /// Only necessary for requests triggered by an `UnknownBlockParent` or `UnknownBlockParent` /// because any blocks or blobs without parents won't hit the data availability cache. pub child_components: Option>, } -impl SingleBlockLookup { +impl SingleBlockLookup { pub fn new( requested_block_root: Hash256, child_components: Option>, peers: &[PeerId], da_checker: Arc>, id: Id, + lookup_type: LookupType, ) -> Self { let is_deneb = da_checker.is_deneb(); Self { id, + lookup_type, block_request_state: BlockRequestState::new(requested_block_root, peers), blob_request_state: BlobRequestState::new(requested_block_root, peers, is_deneb), da_checker, @@ -103,11 +106,11 @@ impl SingleBlockLookup { if !block_already_downloaded { self.block_request_state - .build_request_and_send(self.id, cx)?; + .build_request_and_send(self.id, self.lookup_type, cx)?; } if !blobs_already_downloaded { self.blob_request_state - .build_request_and_send(self.id, cx)?; + .build_request_and_send(self.id, self.lookup_type, cx)?; } Ok(()) } @@ -144,7 +147,7 @@ impl SingleBlockLookup { /// Accepts a verified response, and adds it to the child components if required. This method /// returns a `CachedChild` which provides a completed block + blob response if all components have been /// received, or information about whether the child is required and if it has been downloaded. - pub fn add_response>( + pub fn add_response>( &mut self, verified_response: R::VerifiedResponseType, ) -> CachedChild { @@ -301,7 +304,7 @@ impl SingleBlockLookup { } /// The state of the blob request component of a `SingleBlockLookup`. -pub struct BlobRequestState { +pub struct BlobRequestState { /// The latest picture of which blobs still need to be requested. This includes information /// from both block/blobs downloaded in the network layer and any blocks/blobs that exist in /// the data availability checker. @@ -310,10 +313,9 @@ pub struct BlobRequestState { /// Where we store blobs until we receive the stream terminator. pub blob_download_queue: FixedBlobSidecarList, pub state: SingleLookupRequestState, - _phantom: PhantomData, } -impl BlobRequestState { +impl BlobRequestState { pub fn new(block_root: Hash256, peer_source: &[PeerId], is_deneb: bool) -> Self { let default_ids = MissingBlobs::new_without_block(block_root, is_deneb); Self { @@ -321,24 +323,21 @@ impl BlobRequestState { requested_ids: default_ids, blob_download_queue: <_>::default(), state: SingleLookupRequestState::new(peer_source), - _phantom: PhantomData, } } } /// The state of the block request component of a `SingleBlockLookup`. -pub struct BlockRequestState { +pub struct BlockRequestState { pub requested_block_root: Hash256, pub state: SingleLookupRequestState, - _phantom: PhantomData, } -impl BlockRequestState { +impl BlockRequestState { pub fn new(block_root: Hash256, peers: &[PeerId]) -> Self { Self { requested_block_root: block_root, state: SingleLookupRequestState::new(peers), - _phantom: PhantomData, } } } @@ -525,7 +524,7 @@ impl SingleLookupRequestState { } } -impl slog::Value for SingleBlockLookup { +impl slog::Value for SingleBlockLookup { fn serialize( &self, _record: &slog::Record, @@ -533,7 +532,7 @@ impl slog::Value for SingleBlockLookup { serializer: &mut dyn slog::Serializer, ) -> slog::Result { serializer.emit_str("request", key)?; - serializer.emit_arguments("lookup_type", &format_args!("{:?}", L::lookup_type()))?; + serializer.emit_arguments("lookup_type", &format_args!("{:?}", self.lookup_type))?; serializer.emit_arguments("hash", &format_args!("{}", self.block_root()))?; serializer.emit_arguments( "blob_ids", @@ -587,138 +586,3 @@ impl std::fmt::Display for State { } } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::sync::block_lookups::common::LookupType; - use beacon_chain::builder::Witness; - use beacon_chain::eth1_chain::CachingEth1Backend; - use sloggers::null::NullLoggerBuilder; - use sloggers::Build; - use slot_clock::{SlotClock, TestingSlotClock}; - use std::time::Duration; - use store::{HotColdDB, MemoryStore, StoreConfig}; - use types::{ - test_utils::{SeedableRng, TestRandom, XorShiftRng}, - ChainSpec, MinimalEthSpec as E, SignedBeaconBlock, Slot, - }; - - fn rand_block() -> SignedBeaconBlock { - let mut rng = XorShiftRng::from_seed([42; 16]); - SignedBeaconBlock::from_block( - types::BeaconBlock::Base(types::BeaconBlockBase { - ..<_>::random_for_test(&mut rng) - }), - types::Signature::random_for_test(&mut rng), - ) - } - type T = Witness, E, MemoryStore, MemoryStore>; - - struct TestLookup1; - - impl Lookup for TestLookup1 { - const MAX_ATTEMPTS: u8 = 3; - - fn lookup_type() -> LookupType { - panic!() - } - } - - struct TestLookup2; - - impl Lookup for TestLookup2 { - const MAX_ATTEMPTS: u8 = 4; - - fn lookup_type() -> LookupType { - panic!() - } - } - - #[test] - fn test_happy_path() { - let peer_id = PeerId::random(); - let block = rand_block(); - let spec = E::default_spec(); - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(0), - Duration::from_secs(spec.seconds_per_slot), - ); - let log = NullLoggerBuilder.build().expect("logger should build"); - let store = - HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) - .expect("store"); - let da_checker = Arc::new( - DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec.clone()) - .expect("data availability checker"), - ); - let mut sl = SingleBlockLookup::::new( - block.canonical_root(), - None, - &[peer_id], - da_checker, - 1, - ); - as RequestState>::build_request( - &mut sl.block_request_state, - ) - .unwrap(); - sl.block_request_state.state.state = State::Downloading { peer_id }; - } - - #[test] - fn test_block_lookup_failures() { - let peer_id = PeerId::random(); - let block = rand_block(); - let spec = E::default_spec(); - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(0), - Duration::from_secs(spec.seconds_per_slot), - ); - let log = NullLoggerBuilder.build().expect("logger should build"); - let store = - HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) - .expect("store"); - - let da_checker = Arc::new( - DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec.clone()) - .expect("data availability checker"), - ); - - let mut sl = SingleBlockLookup::::new( - block.canonical_root(), - None, - &[peer_id], - da_checker, - 1, - ); - for _ in 1..TestLookup2::MAX_ATTEMPTS { - as RequestState>::build_request( - &mut sl.block_request_state, - ) - .unwrap(); - sl.block_request_state.state.on_download_failure(); - } - - // Now we receive the block and send it for processing - as RequestState>::build_request( - &mut sl.block_request_state, - ) - .unwrap(); - sl.block_request_state.state.state = State::Downloading { peer_id }; - - // One processing failure maxes the available attempts - sl.block_request_state.state.on_processing_failure(); - assert_eq!( - as RequestState>::build_request( - &mut sl.block_request_state, - ) - .unwrap_err(), - LookupRequestError::TooManyAttempts { - cannot_process: false - } - ) - } -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 23bd1010bfe..9c17c6a1512 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -42,7 +42,6 @@ use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use crate::sync::block_lookups::common::{Current, Parent}; use crate::sync::block_lookups::{BlobRequestState, BlockRequestState}; use crate::sync::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use beacon_chain::block_verification_types::AsBlock; @@ -621,14 +620,14 @@ impl SyncManager { } => match process_type { BlockProcessType::SingleBlock { id } => self .block_lookups - .single_block_component_processed::>( + .single_block_component_processed::( id, result, &mut self.network, ), BlockProcessType::SingleBlob { id } => self .block_lookups - .single_block_component_processed::>( + .single_block_component_processed::>( id, result, &mut self.network, @@ -834,7 +833,7 @@ impl SyncManager { Ok((block, seen_timestamp)) => match id.lookup_type { LookupType::Current => self .block_lookups - .single_lookup_response::>( + .single_lookup_response::( id, peer_id, block, @@ -843,7 +842,7 @@ impl SyncManager { ), LookupType::Parent => self .block_lookups - .parent_lookup_response::>( + .parent_lookup_response::( id, peer_id, block, @@ -854,7 +853,7 @@ impl SyncManager { Err(error) => match id.lookup_type { LookupType::Current => self .block_lookups - .single_block_lookup_failed::>( + .single_block_lookup_failed::( id, &peer_id, &mut self.network, @@ -862,7 +861,7 @@ impl SyncManager { ), LookupType::Parent => self .block_lookups - .parent_lookup_failed::>( + .parent_lookup_failed::( id, &peer_id, &mut self.network, @@ -909,7 +908,7 @@ impl SyncManager { Ok((blobs, seen_timestamp)) => match id.lookup_type { LookupType::Current => self .block_lookups - .single_lookup_response::>( + .single_lookup_response::>( id, peer_id, blobs, @@ -918,7 +917,7 @@ impl SyncManager { ), LookupType::Parent => self .block_lookups - .parent_lookup_response::>( + .parent_lookup_response::>( id, peer_id, blobs, @@ -930,7 +929,7 @@ impl SyncManager { Err(error) => match id.lookup_type { LookupType::Current => self .block_lookups - .single_block_lookup_failed::>( + .single_block_lookup_failed::>( id, &peer_id, &mut self.network, @@ -938,7 +937,7 @@ impl SyncManager { ), LookupType::Parent => self .block_lookups - .parent_lookup_failed::>( + .parent_lookup_failed::>( id, &peer_id, &mut self.network, From 4a48d7b546c21b668b12050947b3ea33cf973fd5 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Wed, 24 Apr 2024 01:02:48 -0500 Subject: [PATCH 10/16] Encode Execution Engine Client Version In Graffiti (#5290) * Add `engine_clientVersionV1` structs * Implement `engine_clientVersionV1` * Update to latest spec changes * Implement GraffitiCalculator Service * Added Unit Tests for GraffitiCalculator * Address Mac's Comments * Remove need to use clap in beacon chain * Merge remote-tracking branch 'upstream/unstable' into el_client_version_graffiti * Merge branch 'unstable' into el_client_version_graffiti # Conflicts: # beacon_node/beacon_chain/Cargo.toml --- Cargo.lock | 2 + beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 17 +- beacon_node/beacon_chain/src/builder.rs | 24 +- .../beacon_chain/src/graffiti_calculator.rs | 377 ++++++++++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/client/src/builder.rs | 9 +- beacon_node/client/src/config.rs | 8 +- beacon_node/execution_layer/Cargo.toml | 1 + beacon_node/execution_layer/src/engine_api.rs | 145 ++++++- .../execution_layer/src/engine_api/http.rs | 111 +++++- .../src/engine_api/json_structures.rs | 33 ++ beacon_node/execution_layer/src/engines.rs | 57 ++- beacon_node/execution_layer/src/lib.rs | 33 +- .../src/test_utils/handle_rpc.rs | 5 +- .../execution_layer/src/test_utils/mod.rs | 12 + beacon_node/src/config.rs | 27 +- common/lighthouse_version/src/lib.rs | 18 + consensus/types/src/graffiti.rs | 6 + lighthouse/tests/beacon_node.rs | 41 +- 20 files changed, 847 insertions(+), 81 deletions(-) create mode 100644 beacon_node/beacon_chain/src/graffiti_calculator.rs diff --git a/Cargo.lock b/Cargo.lock index 335491adc1c..cc38877fe76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -774,6 +774,7 @@ dependencies = [ "kzg", "lazy_static", "lighthouse_metrics", + "lighthouse_version", "logging", "lru", "maplit", @@ -2845,6 +2846,7 @@ dependencies = [ "kzg", "lazy_static", "lighthouse_metrics", + "lighthouse_version", "lru", "parking_lot 0.12.1", "pretty_reqwest_error", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 9c7c7febc5f..32a1056c10e 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -39,6 +39,7 @@ itertools = { workspace = true } kzg = { workspace = true } lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } +lighthouse_version = { workspace = true } logging = { workspace = true } lru = { workspace = true } merkle_proof = { workspace = true } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c65dfdb449f..dd1fe59b922 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -30,6 +30,7 @@ use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; +use crate::graffiti_calculator::GraffitiCalculator; use crate::head_tracker::{HeadTracker, HeadTrackerReader, SszHeadTracker}; use crate::historical_blocks::HistoricalBlockError; use crate::light_client_finality_update_verification::{ @@ -474,7 +475,7 @@ pub struct BeaconChain { /// Logging to CLI, etc. pub(crate) log: Logger, /// Arbitrary bytes included in the blocks. - pub(crate) graffiti: Graffiti, + pub(crate) graffiti_calculator: GraffitiCalculator, /// Optional slasher. pub slasher: Option>>, /// Provides monitoring of a set of explicitly defined validators. @@ -4654,6 +4655,10 @@ impl BeaconChain { // // Perform the state advance and block-packing functions. let chain = self.clone(); + let graffiti = self + .graffiti_calculator + .get_graffiti(validator_graffiti) + .await; let mut partial_beacon_block = self .task_executor .spawn_blocking_handle( @@ -4663,7 +4668,7 @@ impl BeaconChain { state_root_opt, produce_at_slot, randao_reveal, - validator_graffiti, + graffiti, builder_boost_factor, block_production_version, ) @@ -4761,7 +4766,7 @@ impl BeaconChain { state_root_opt: Option, produce_at_slot: Slot, randao_reveal: Signature, - validator_graffiti: Option, + graffiti: Graffiti, builder_boost_factor: Option, block_production_version: BlockProductionVersion, ) -> Result, BlockProductionError> { @@ -4867,12 +4872,6 @@ impl BeaconChain { } drop(unagg_import_timer); - // Override the beacon node's graffiti with graffiti from the validator, if present. - let graffiti = match validator_graffiti { - Some(graffiti) => graffiti, - None => self.graffiti, - }; - let attestation_packing_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_ATTESTATION_TIMES); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index be6b1f9b2bf..376bc16c035 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -7,6 +7,7 @@ use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; +use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; use crate::head_tracker::HeadTracker; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; @@ -38,8 +39,8 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, - Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, + Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -95,7 +96,7 @@ pub struct BeaconChainBuilder { spec: ChainSpec, chain_config: ChainConfig, log: Option, - graffiti: Graffiti, + beacon_graffiti: GraffitiOrigin, slasher: Option>>, // Pending I/O batch that is constructed during building and should be executed atomically // alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called. @@ -138,7 +139,7 @@ where spec: E::default_spec(), chain_config: ChainConfig::default(), log: None, - graffiti: Graffiti::default(), + beacon_graffiti: GraffitiOrigin::default(), slasher: None, pending_io_batch: vec![], kzg: None, @@ -655,9 +656,9 @@ where self } - /// Sets the `graffiti` field. - pub fn graffiti(mut self, graffiti: Graffiti) -> Self { - self.graffiti = graffiti; + /// Sets the `beacon_graffiti` field. + pub fn beacon_graffiti(mut self, beacon_graffiti: GraffitiOrigin) -> Self { + self.beacon_graffiti = beacon_graffiti; self } @@ -924,7 +925,7 @@ where observed_attester_slashings: <_>::default(), observed_bls_to_execution_changes: <_>::default(), eth1_chain: self.eth1_chain, - execution_layer: self.execution_layer, + execution_layer: self.execution_layer.clone(), genesis_validators_root, genesis_time, canonical_head, @@ -953,7 +954,12 @@ where .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, log: log.clone(), - graffiti: self.graffiti, + graffiti_calculator: GraffitiCalculator::new( + self.beacon_graffiti, + self.execution_layer, + slot_clock.slot_duration() * E::slots_per_epoch() as u32, + log.clone(), + ), slasher: self.slasher.clone(), validator_monitor: RwLock::new(validator_monitor), genesis_backfill_slot, diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs new file mode 100644 index 00000000000..599c99dc2da --- /dev/null +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -0,0 +1,377 @@ +use crate::BeaconChain; +use crate::BeaconChainTypes; +use execution_layer::{http::ENGINE_GET_CLIENT_VERSION_V1, CommitPrefix, ExecutionLayer}; +use serde::{Deserialize, Serialize}; +use slog::{crit, debug, error, warn, Logger}; +use slot_clock::SlotClock; +use std::{fmt::Debug, time::Duration}; +use task_executor::TaskExecutor; +use types::{EthSpec, Graffiti, GRAFFITI_BYTES_LEN}; + +const ENGINE_VERSION_AGE_LIMIT_EPOCH_MULTIPLE: u32 = 6; // 6 epochs +const ENGINE_VERSION_CACHE_REFRESH_EPOCH_MULTIPLE: u32 = 2; // 2 epochs +const ENGINE_VERSION_CACHE_PRELOAD_STARTUP_DELAY: Duration = Duration::from_secs(60); + +/// Represents the source and content of graffiti for block production, excluding +/// inputs from the validator client and execution engine. Graffiti is categorized +/// as either user-specified or calculated to facilitate decisions on graffiti +/// selection. +#[derive(Clone, Copy, Serialize, Deserialize)] +pub enum GraffitiOrigin { + UserSpecified(Graffiti), + Calculated(Graffiti), +} + +impl GraffitiOrigin { + pub fn graffiti(&self) -> Graffiti { + match self { + GraffitiOrigin::UserSpecified(graffiti) => *graffiti, + GraffitiOrigin::Calculated(graffiti) => *graffiti, + } + } +} + +impl Default for GraffitiOrigin { + fn default() -> Self { + let version_bytes = lighthouse_version::VERSION.as_bytes(); + let trimmed_len = std::cmp::min(version_bytes.len(), GRAFFITI_BYTES_LEN); + let mut bytes = [0u8; GRAFFITI_BYTES_LEN]; + bytes[..trimmed_len].copy_from_slice(&version_bytes[..trimmed_len]); + Self::Calculated(Graffiti::from(bytes)) + } +} + +impl Debug for GraffitiOrigin { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + self.graffiti().fmt(f) + } +} + +pub struct GraffitiCalculator { + pub beacon_graffiti: GraffitiOrigin, + execution_layer: Option>, + pub epoch_duration: Duration, + log: Logger, +} + +impl GraffitiCalculator { + pub fn new( + beacon_graffiti: GraffitiOrigin, + execution_layer: Option>, + epoch_duration: Duration, + log: Logger, + ) -> Self { + Self { + beacon_graffiti, + execution_layer, + epoch_duration, + log, + } + } + + /// Returns the appropriate graffiti to use for block production, prioritizing + /// sources in the following order: + /// 1. Graffiti specified by the validator client. + /// 2. Graffiti specified by the user via beacon node CLI options. + /// 3. The EL & CL client version string, applicable when the EL supports version specification. + /// 4. The default lighthouse version string, used if the EL lacks version specification support. + pub async fn get_graffiti(&self, validator_graffiti: Option) -> Graffiti { + if let Some(graffiti) = validator_graffiti { + return graffiti; + } + + match self.beacon_graffiti { + GraffitiOrigin::UserSpecified(graffiti) => graffiti, + GraffitiOrigin::Calculated(default_graffiti) => { + let Some(execution_layer) = self.execution_layer.as_ref() else { + // Return default graffiti if there is no execution layer. This + // shouldn't occur if we're actually producing blocks. + crit!(self.log, "No execution layer available for graffiti calculation during block production!"); + return default_graffiti; + }; + + // The engine version cache refresh service ensures this will almost always retrieve this data from the + // cache instead of making a request to the execution engine. A cache miss would only occur if lighthouse + // has recently started or the EL recently went offline. + let engine_versions = match execution_layer + .get_engine_version(Some( + self.epoch_duration * ENGINE_VERSION_AGE_LIMIT_EPOCH_MULTIPLE, + )) + .await + { + Ok(engine_versions) => engine_versions, + Err(el_error) => { + warn!(self.log, "Failed to determine execution engine version for graffiti"; "error" => ?el_error); + return default_graffiti; + } + }; + + let Some(engine_version) = engine_versions.first() else { + // Got an empty array which indicates the EL doesn't support the method + debug!( + self.log, + "Using default lighthouse graffiti: EL does not support {} method", + ENGINE_GET_CLIENT_VERSION_V1; + ); + return default_graffiti; + }; + if engine_versions.len() != 1 { + // More than one version implies lighthouse is connected to + // an EL multiplexer. We don't support modifying the graffiti + // with these configurations. + warn!( + self.log, + "Execution Engine multiplexer detected, using default graffiti" + ); + return default_graffiti; + } + + let lighthouse_commit_prefix = CommitPrefix::try_from(lighthouse_version::COMMIT_PREFIX.to_string()) + .unwrap_or_else(|error_message| { + // This really shouldn't happen but we want to definitly log if it does + crit!(self.log, "Failed to parse lighthouse commit prefix"; "error" => error_message); + CommitPrefix("00000000".to_string()) + }); + + engine_version.calculate_graffiti(lighthouse_commit_prefix) + } + } + } +} + +pub fn start_engine_version_cache_refresh_service( + chain: &BeaconChain, + executor: TaskExecutor, +) { + let Some(el_ref) = chain.execution_layer.as_ref() else { + debug!( + chain.log, + "No execution layer configured, not starting engine version cache refresh service" + ); + return; + }; + if matches!( + chain.graffiti_calculator.beacon_graffiti, + GraffitiOrigin::UserSpecified(_) + ) { + debug!( + chain.log, + "Graffiti is user-specified, not starting engine version cache refresh service" + ); + return; + } + + let execution_layer = el_ref.clone(); + let log = chain.log.clone(); + let slot_clock = chain.slot_clock.clone(); + let epoch_duration = chain.graffiti_calculator.epoch_duration; + executor.spawn( + async move { + engine_version_cache_refresh_service::( + execution_layer, + slot_clock, + epoch_duration, + log, + ) + .await + }, + "engine_version_cache_refresh_service", + ); +} + +async fn engine_version_cache_refresh_service( + execution_layer: ExecutionLayer, + slot_clock: T::SlotClock, + epoch_duration: Duration, + log: Logger, +) { + // Preload the engine version cache after a brief delay to allow for EL initialization. + // This initial priming ensures cache readiness before the service's regular update cycle begins. + tokio::time::sleep(ENGINE_VERSION_CACHE_PRELOAD_STARTUP_DELAY).await; + if let Err(e) = execution_layer.get_engine_version(None).await { + debug!(log, "Failed to preload engine version cache"; "error" => format!("{:?}", e)); + } + + // this service should run 3/8 of the way through the epoch + let epoch_delay = (epoch_duration * 3) / 8; + // the duration of 1 epoch less than the total duration between firing of this service + let partial_firing_delay = + epoch_duration * ENGINE_VERSION_CACHE_REFRESH_EPOCH_MULTIPLE.saturating_sub(1); + loop { + match slot_clock.duration_to_next_epoch(T::EthSpec::slots_per_epoch()) { + Some(duration_to_next_epoch) => { + let firing_delay = partial_firing_delay + duration_to_next_epoch + epoch_delay; + tokio::time::sleep(firing_delay).await; + + debug!( + log, + "Engine version cache refresh service firing"; + ); + + match execution_layer.get_engine_version(None).await { + Err(e) => warn!(log, "Failed to populate engine version cache"; "error" => ?e), + Ok(versions) => { + if versions.is_empty() { + // Empty array indicates the EL doesn't support the method + debug!( + log, + "EL does not support {} method. Sleeping twice as long before retry", + ENGINE_GET_CLIENT_VERSION_V1 + ); + tokio::time::sleep( + epoch_duration * ENGINE_VERSION_CACHE_REFRESH_EPOCH_MULTIPLE, + ) + .await; + } + } + } + } + None => { + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + tokio::time::sleep(slot_clock.slot_duration()).await; + } + }; + } +} + +#[cfg(test)] +mod tests { + use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; + use crate::ChainConfig; + use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; + use execution_layer::EngineCapabilities; + use lazy_static::lazy_static; + use slog::info; + use std::time::Duration; + use types::{ChainSpec, Graffiti, Keypair, MinimalEthSpec, GRAFFITI_BYTES_LEN}; + + const VALIDATOR_COUNT: usize = 48; + lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); + } + + fn get_harness( + validator_count: usize, + spec: ChainSpec, + chain_config: Option, + ) -> BeaconChainHarness> { + let harness = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec) + .chain_config(chain_config.unwrap_or_default()) + .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(logging::test_logger()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + + harness + } + + #[tokio::test] + async fn check_graffiti_without_el_version_support() { + let spec = test_spec::(); + let harness = get_harness(VALIDATOR_COUNT, spec, None); + // modify execution engine so it doesn't support engine_getClientVersionV1 method + let mock_execution_layer = harness.mock_execution_layer.as_ref().unwrap(); + mock_execution_layer + .server + .set_engine_capabilities(EngineCapabilities { + get_client_version_v1: false, + ..DEFAULT_ENGINE_CAPABILITIES + }); + // refresh capabilities cache + harness + .chain + .execution_layer + .as_ref() + .unwrap() + .get_engine_capabilities(Some(Duration::ZERO)) + .await + .unwrap(); + + let version_bytes = std::cmp::min( + lighthouse_version::VERSION.as_bytes().len(), + GRAFFITI_BYTES_LEN, + ); + // grab the slice of the graffiti that corresponds to the lighthouse version + let graffiti_slice = + &harness.chain.graffiti_calculator.get_graffiti(None).await.0[..version_bytes]; + + // convert graffiti bytes slice to ascii for easy debugging if this test should fail + let graffiti_str = + std::str::from_utf8(graffiti_slice).expect("bytes should convert nicely to ascii"); + + info!(harness.chain.log, "results"; "lighthouse_version" => lighthouse_version::VERSION, "graffiti_str" => graffiti_str); + println!("lighthouse_version: '{}'", lighthouse_version::VERSION); + println!("graffiti_str: '{}'", graffiti_str); + + assert!(lighthouse_version::VERSION.starts_with(graffiti_str)); + } + + #[tokio::test] + async fn check_graffiti_with_el_version_support() { + let spec = test_spec::(); + let harness = get_harness(VALIDATOR_COUNT, spec, None); + + let found_graffiti_bytes = harness.chain.graffiti_calculator.get_graffiti(None).await.0; + + let mock_commit = DEFAULT_CLIENT_VERSION.commit.clone(); + let expected_graffiti_string = format!( + "{}{}{}{}", + DEFAULT_CLIENT_VERSION.code, + mock_commit + .strip_prefix("0x") + .unwrap_or(&mock_commit) + .get(0..4) + .expect("should get first 2 bytes in hex"), + "LH", + lighthouse_version::COMMIT_PREFIX + .get(0..4) + .expect("should get first 2 bytes in hex") + ); + + let expected_graffiti_prefix_bytes = expected_graffiti_string.as_bytes(); + let expected_graffiti_prefix_len = + std::cmp::min(expected_graffiti_prefix_bytes.len(), GRAFFITI_BYTES_LEN); + + let found_graffiti_string = + std::str::from_utf8(&found_graffiti_bytes[..expected_graffiti_prefix_len]) + .expect("bytes should convert nicely to ascii"); + + info!(harness.chain.log, "results"; "expected_graffiti_string" => &expected_graffiti_string, "found_graffiti_string" => &found_graffiti_string); + println!("expected_graffiti_string: '{}'", expected_graffiti_string); + println!("found_graffiti_string: '{}'", found_graffiti_string); + + assert_eq!(expected_graffiti_string, found_graffiti_string); + + let mut expected_graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; + expected_graffiti_bytes[..expected_graffiti_prefix_len] + .copy_from_slice(expected_graffiti_string.as_bytes()); + assert_eq!(found_graffiti_bytes, expected_graffiti_bytes); + } + + #[tokio::test] + async fn check_graffiti_with_validator_specified_value() { + let spec = test_spec::(); + let harness = get_harness(VALIDATOR_COUNT, spec, None); + + let graffiti_str = "nice graffiti bro"; + let mut graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; + graffiti_bytes[..graffiti_str.as_bytes().len()].copy_from_slice(graffiti_str.as_bytes()); + + let found_graffiti = harness + .chain + .graffiti_calculator + .get_graffiti(Some(Graffiti::from(graffiti_bytes))) + .await; + + assert_eq!( + found_graffiti.to_string(), + "0x6e6963652067726166666974692062726f000000000000000000000000000000" + ); + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index c1df9ede87a..5ee7803436c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -28,6 +28,7 @@ pub mod events; pub mod execution_payload; pub mod fork_choice_signal; pub mod fork_revert; +pub mod graffiti_calculator; mod head_tracker; pub mod historical_blocks; pub mod kzg_utils; diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index e7f201b8521..2af4e74c224 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -6,6 +6,7 @@ use crate::notifier::spawn_notifier; use crate::Client; use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; +use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; @@ -164,7 +165,7 @@ where let runtime_context = self.runtime_context.clone(); let eth_spec_instance = self.eth_spec_instance.clone(); let chain_config = config.chain.clone(); - let graffiti = config.graffiti; + let beacon_graffiti = config.beacon_graffiti; let store = store.ok_or("beacon_chain_start_method requires a store")?; let runtime_context = @@ -203,7 +204,7 @@ where MigratorConfig::default().epochs_per_migration(chain_config.epochs_per_migration), ) .chain_config(chain_config) - .graffiti(graffiti) + .beacon_graffiti(beacon_graffiti) .event_handler(event_handler) .execution_layer(execution_layer) .validator_monitor_config(config.validator_monitor.clone()); @@ -967,6 +968,10 @@ where runtime_context.executor.clone(), beacon_chain.clone(), ); + start_engine_version_cache_refresh_service( + beacon_chain.as_ref(), + runtime_context.executor.clone(), + ); start_attestation_simulator_service( beacon_chain.task_executor.clone(), beacon_chain.clone(), diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index a441e2c186c..16000374b22 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,3 +1,4 @@ +use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::validator_monitor::ValidatorMonitorConfig; use beacon_chain::TrustedSetup; use beacon_processor::BeaconProcessorConfig; @@ -9,7 +10,6 @@ use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; use std::time::Duration; -use types::Graffiti; /// Default directory name for the freezer database under the top-level data dir. const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; @@ -63,8 +63,8 @@ pub struct Config { /// This is the method used for the 2019 client interop in Canada. pub dummy_eth1_backend: bool, pub sync_eth1_chain: bool, - /// Graffiti to be inserted everytime we create a block. - pub graffiti: Graffiti, + /// Graffiti to be inserted everytime we create a block if the validator doesn't specify. + pub beacon_graffiti: GraffitiOrigin, pub validator_monitor: ValidatorMonitorConfig, #[serde(skip)] /// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined @@ -104,7 +104,7 @@ impl Default for Config { eth1: <_>::default(), execution_layer: None, trusted_setup: None, - graffiti: Graffiti::default(), + beacon_graffiti: GraffitiOrigin::default(), http_api: <_>::default(), http_metrics: <_>::default(), monitoring_api: None, diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index ace8e24a8e4..28cd16e4ef9 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -52,3 +52,4 @@ arc-swap = "1.6.0" eth2_network_config = { workspace = true } alloy-rlp = "0.3" alloy-consensus = { git = "https://github.com/alloy-rs/alloy.git", rev = "974d488bab5e21e9f17452a39a4bfa56677367b2" } +lighthouse_version = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index a91f5d6a442..d422994c595 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,9 +1,9 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, - ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, + ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, }; use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, @@ -24,11 +24,11 @@ pub use types::{ ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList, Withdrawal, Withdrawals, }; - use types::{ ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadMerge, KzgProofs, }; +use types::{Graffiti, GRAFFITI_BYTES_LEN}; pub mod auth; pub mod http; @@ -61,13 +61,13 @@ pub enum Error { ParentHashEqualsBlockHash(ExecutionBlockHash), PayloadIdUnavailable, TransitionConfigurationMismatch, - PayloadConversionLogicFlaw, SszError(ssz_types::Error), DeserializeWithdrawals(ssz_types::Error), BuilderApi(builder_client::Error), IncorrectStateVariant, RequiredMethodUnsupported(&'static str), UnsupportedForkVariant(String), + InvalidClientVersion(String), RlpDecoderError(rlp::DecoderError), } @@ -652,6 +652,7 @@ pub struct EngineCapabilities { pub get_payload_v1: bool, pub get_payload_v2: bool, pub get_payload_v3: bool, + pub get_client_version_v1: bool, } impl EngineCapabilities { @@ -690,7 +691,141 @@ impl EngineCapabilities { if self.get_payload_v3 { response.push(ENGINE_GET_PAYLOAD_V3); } + if self.get_client_version_v1 { + response.push(ENGINE_GET_CLIENT_VERSION_V1); + } response } } + +#[derive(Clone, Debug, PartialEq)] +pub enum ClientCode { + Besu, + EtherumJS, + Erigon, + GoEthereum, + Grandine, + Lighthouse, + Lodestar, + Nethermind, + Nimbus, + Teku, + Prysm, + Reth, + Unknown(String), +} + +impl std::fmt::Display for ClientCode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + ClientCode::Besu => "BU", + ClientCode::EtherumJS => "EJ", + ClientCode::Erigon => "EG", + ClientCode::GoEthereum => "GE", + ClientCode::Grandine => "GR", + ClientCode::Lighthouse => "LH", + ClientCode::Lodestar => "LS", + ClientCode::Nethermind => "NM", + ClientCode::Nimbus => "NB", + ClientCode::Teku => "TK", + ClientCode::Prysm => "PM", + ClientCode::Reth => "RH", + ClientCode::Unknown(code) => code, + }; + write!(f, "{}", s) + } +} + +impl TryFrom for ClientCode { + type Error = String; + + fn try_from(code: String) -> Result { + match code.as_str() { + "BU" => Ok(Self::Besu), + "EJ" => Ok(Self::EtherumJS), + "EG" => Ok(Self::Erigon), + "GE" => Ok(Self::GoEthereum), + "GR" => Ok(Self::Grandine), + "LH" => Ok(Self::Lighthouse), + "LS" => Ok(Self::Lodestar), + "NM" => Ok(Self::Nethermind), + "NB" => Ok(Self::Nimbus), + "TK" => Ok(Self::Teku), + "PM" => Ok(Self::Prysm), + "RH" => Ok(Self::Reth), + string => { + if string.len() == 2 { + Ok(Self::Unknown(code)) + } else { + Err(format!("Invalid client code: {}", code)) + } + } + } + } +} + +#[derive(Clone, Debug)] +pub struct CommitPrefix(pub String); + +impl TryFrom for CommitPrefix { + type Error = String; + + fn try_from(value: String) -> Result { + // Check if the input starts with '0x' and strip it if it does + let commit_prefix = value.strip_prefix("0x").unwrap_or(&value); + + // Ensure length is exactly 8 characters after '0x' removal + if commit_prefix.len() != 8 { + return Err( + "Input must be exactly 8 characters long (excluding any '0x' prefix)".to_string(), + ); + } + + // Ensure all characters are valid hex digits + if commit_prefix.chars().all(|c| c.is_ascii_hexdigit()) { + Ok(CommitPrefix(commit_prefix.to_lowercase())) + } else { + Err("Input must contain only hexadecimal characters".to_string()) + } + } +} + +impl std::fmt::Display for CommitPrefix { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +#[derive(Clone, Debug)] +pub struct ClientVersionV1 { + pub code: ClientCode, + pub name: String, + pub version: String, + pub commit: CommitPrefix, +} + +impl ClientVersionV1 { + pub fn calculate_graffiti(&self, lighthouse_commit_prefix: CommitPrefix) -> Graffiti { + let graffiti_string = format!( + "{}{}LH{}", + self.code, + self.commit + .0 + .get(..4) + .map_or_else(|| self.commit.0.as_str(), |s| s) + .to_lowercase(), + lighthouse_commit_prefix + .0 + .get(..4) + .unwrap_or("0000") + .to_lowercase(), + ); + let mut graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; + let bytes_to_copy = std::cmp::min(graffiti_string.len(), GRAFFITI_BYTES_LEN); + graffiti_bytes[..bytes_to_copy] + .copy_from_slice(&graffiti_string.as_bytes()[..bytes_to_copy]); + + Graffiti::from(graffiti_bytes) + } +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index ebd6ebeba2a..405416e4f9d 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -3,6 +3,8 @@ use super::*; use crate::auth::Auth; use crate::json_structures::*; +use lazy_static::lazy_static; +use lighthouse_version::{COMMIT_PREFIX, VERSION}; use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; @@ -51,6 +53,9 @@ pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); +pub const ENGINE_GET_CLIENT_VERSION_V1: &str = "engine_getClientVersionV1"; +pub const ENGINE_GET_CLIENT_VERSION_TIMEOUT: Duration = Duration::from_secs(1); + /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; /// This code is returned by all clients when a method is not supported @@ -69,8 +74,22 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + ENGINE_GET_CLIENT_VERSION_V1, ]; +lazy_static! { + /// We opt to initialize the JsonClientVersionV1 rather than the ClientVersionV1 + /// for two reasons: + /// 1. This saves the overhead of converting into Json for every engine call + /// 2. The Json version lacks error checking so we can avoid calling `unwrap()` + pub static ref LIGHTHOUSE_JSON_CLIENT_VERSION: JsonClientVersionV1 = JsonClientVersionV1 { + code: ClientCode::Lighthouse.to_string(), + name: "Lighthouse".to_string(), + version: VERSION.replace("Lighthouse/", ""), + commit: COMMIT_PREFIX.to_string(), + }; +} + /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { use ssz::Decode; @@ -546,22 +565,21 @@ pub mod deposit_methods { } } -#[derive(Clone, Debug)] -pub struct CapabilitiesCacheEntry { - engine_capabilities: EngineCapabilities, - fetch_time: Instant, +pub struct CachedResponse { + pub data: T, + pub fetch_time: Instant, } -impl CapabilitiesCacheEntry { - pub fn new(engine_capabilities: EngineCapabilities) -> Self { +impl CachedResponse { + pub fn new(data: T) -> Self { Self { - engine_capabilities, + data, fetch_time: Instant::now(), } } - pub fn engine_capabilities(&self) -> EngineCapabilities { - self.engine_capabilities + pub fn data(&self) -> T { + self.data.clone() } pub fn age(&self) -> Duration { @@ -578,7 +596,8 @@ pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, pub execution_timeout_multiplier: u32, - pub engine_capabilities_cache: Mutex>, + pub engine_capabilities_cache: Mutex>>, + pub engine_version_cache: Mutex>>>, auth: Option, } @@ -592,6 +611,7 @@ impl HttpJsonRpc { url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), engine_capabilities_cache: Mutex::new(None), + engine_version_cache: Mutex::new(None), auth: None, }) } @@ -606,6 +626,7 @@ impl HttpJsonRpc { url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), engine_capabilities_cache: Mutex::new(None), + engine_version_cache: Mutex::new(None), auth: Some(auth), }) } @@ -1056,6 +1077,7 @@ impl HttpJsonRpc { get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), + get_client_version_v1: capabilities.contains(ENGINE_GET_CLIENT_VERSION_V1), }) } @@ -1078,15 +1100,78 @@ impl HttpJsonRpc { ) -> Result { let mut lock = self.engine_capabilities_cache.lock().await; - if let Some(lock) = lock.as_ref().filter(|entry| !entry.older_than(age_limit)) { - Ok(lock.engine_capabilities()) + if let Some(lock) = lock + .as_ref() + .filter(|cached_response| !cached_response.older_than(age_limit)) + { + Ok(lock.data()) } else { let engine_capabilities = self.exchange_capabilities().await?; - *lock = Some(CapabilitiesCacheEntry::new(engine_capabilities)); + *lock = Some(CachedResponse::new(engine_capabilities)); Ok(engine_capabilities) } } + /// This method fetches the response from the engine without checking + /// any caches or storing the result in the cache. It is better to use + /// `get_engine_version(Some(Duration::ZERO))` if you want to force + /// fetching from the EE as this will cache the result. + pub async fn get_client_version_v1(&self) -> Result, Error> { + let params = json!([*LIGHTHOUSE_JSON_CLIENT_VERSION]); + + let response: Vec = self + .rpc_request( + ENGINE_GET_CLIENT_VERSION_V1, + params, + ENGINE_GET_CLIENT_VERSION_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + response + .into_iter() + .map(TryInto::try_into) + .collect::, _>>() + .map_err(Error::InvalidClientVersion) + } + + pub async fn clear_engine_version_cache(&self) { + *self.engine_version_cache.lock().await = None; + } + + /// Returns the execution engine version resulting from a call to + /// engine_getClientVersionV1. If the version cache is not populated, or if it + /// is populated with a cached result of age >= `age_limit`, this method will + /// fetch the result from the execution engine and populate the cache before + /// returning it. Otherwise it will return the cached result from an earlier + /// call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_version( + &self, + age_limit: Option, + ) -> Result, Error> { + // check engine capabilities first (avoids holding two locks at once) + let engine_capabilities = self.get_engine_capabilities(None).await?; + if !engine_capabilities.get_client_version_v1 { + // We choose an empty vec to denote that this method is not + // supported instead of an error since this method is optional + // & we don't want to log a warning and concern the user + return Ok(vec![]); + } + let mut lock = self.engine_version_cache.lock().await; + if let Some(lock) = lock + .as_ref() + .filter(|cached_response| !cached_response.older_than(age_limit)) + { + Ok(lock.data()) + } else { + let engine_version = self.get_client_version_v1().await?; + *lock = Some(CachedResponse::new(engine_version.clone())); + Ok(engine_version) + } + } + // automatically selects the latest version of // new_payload that the execution engine supports pub async fn new_payload( diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 9f2387ae314..d784aa4cd9c 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -747,3 +747,36 @@ pub mod serde_logs_bloom { .map_err(|e| serde::de::Error::custom(format!("invalid logs bloom: {:?}", e))) } } + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonClientVersionV1 { + pub code: String, + pub name: String, + pub version: String, + pub commit: String, +} + +impl From for JsonClientVersionV1 { + fn from(client_version: ClientVersionV1) -> Self { + Self { + code: client_version.code.to_string(), + name: client_version.name, + version: client_version.version, + commit: client_version.commit.to_string(), + } + } +} + +impl TryFrom for ClientVersionV1 { + type Error = String; + + fn try_from(json: JsonClientVersionV1) -> Result { + Ok(Self { + code: json.code.try_into()?, + name: json.name, + version: json.version, + commit: json.commit.try_into()?, + }) + } +} diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index bc8e4e31404..75d0b872cef 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -4,7 +4,7 @@ use crate::engine_api::{ EngineCapabilities, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, }; -use crate::HttpJsonRpc; +use crate::{ClientVersionV1, HttpJsonRpc}; use lru::LruCache; use slog::{debug, error, info, warn, Logger}; use std::future::Future; @@ -21,7 +21,7 @@ use types::ExecutionBlockHash; /// /// Since the size of each value is small (~800 bytes) a large number is used for safety. const PAYLOAD_ID_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(512); -const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes +const CACHED_RESPONSE_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes /// Stores the remembered state of a engine. #[derive(Copy, Clone, PartialEq, Debug, Eq, Default)] @@ -34,11 +34,11 @@ enum EngineStateInternal { } #[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] -enum CapabilitiesCacheAction { +enum ResponseCacheAction { #[default] None, - Update, - Clear, + Update, // Update cached responses + Clear, // Clear cached responses } /// A subset of the engine state to inform other services if the engine is online or offline. @@ -266,12 +266,12 @@ impl Engine { ); } state.update(EngineStateInternal::Synced); - (**state, CapabilitiesCacheAction::Update) + (**state, ResponseCacheAction::Update) } Err(EngineApiError::IsSyncing) => { let mut state = self.state.write().await; state.update(EngineStateInternal::Syncing); - (**state, CapabilitiesCacheAction::Update) + (**state, ResponseCacheAction::Update) } Err(EngineApiError::Auth(err)) => { error!( @@ -282,7 +282,7 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::AuthFailed); - (**state, CapabilitiesCacheAction::Clear) + (**state, ResponseCacheAction::Clear) } Err(e) => { error!( @@ -293,28 +293,37 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::Offline); - // need to clear the engine capabilities cache if we detect the - // execution engine is offline as it is likely the engine is being - // updated to a newer version with new capabilities - (**state, CapabilitiesCacheAction::Clear) + // need to clear cached responses if we detect the execution engine + // is offline as it is likely the engine is being updated to a newer + // version which might also have new capabilities + (**state, ResponseCacheAction::Clear) } }; // do this after dropping state lock guard to avoid holding two locks at once match cache_action { - CapabilitiesCacheAction::None => {} - CapabilitiesCacheAction::Update => { + ResponseCacheAction::None => {} + ResponseCacheAction::Update => { if let Err(e) = self - .get_engine_capabilities(Some(CACHED_ENGINE_CAPABILITIES_AGE_LIMIT)) + .get_engine_capabilities(Some(CACHED_RESPONSE_AGE_LIMIT)) .await { warn!(self.log, "Error during exchange capabilities"; "error" => ?e, ) + } else { + // no point in running this if there was an error fetching the capabilities + // as it will just result in an error again + let _ = self + .get_engine_version(Some(CACHED_RESPONSE_AGE_LIMIT)) + .await; } } - CapabilitiesCacheAction::Clear => self.api.clear_exchange_capabilties_cache().await, + ResponseCacheAction::Clear => { + self.api.clear_exchange_capabilties_cache().await; + self.api.clear_engine_version_cache().await; + } } debug!( @@ -340,6 +349,22 @@ impl Engine { self.api.get_engine_capabilities(age_limit).await } + /// Returns the execution engine version resulting from a call to + /// engine_clientVersionV1. If the version cache is not populated, or if it + /// is populated with a cached result of age >= `age_limit`, this method will + /// fetch the result from the execution engine and populate the cache before + /// returning it. Otherwise it will return the cached result from an earlier + /// call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_version( + &self, + age_limit: Option, + ) -> Result, EngineApiError> { + self.api.get_engine_version(age_limit).await + } + /// Run `func` on the node regardless of the node's current state. /// /// ## Note diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 3e7bf7f561d..60f450a39de 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -165,6 +165,17 @@ impl From for Error { } } +impl From for Error { + fn from(e: EngineError) -> Self { + match e { + // This removes an unnecessary layer of indirection. + // TODO (mark): consider refactoring these error enums + EngineError::Api { error } => Error::ApiError(error), + _ => Error::EngineError(Box::new(e)), + } + } +} + pub enum BlockProposalContentsType { Full(BlockProposalContents>), Blinded(BlockProposalContents>), @@ -1526,8 +1537,26 @@ impl ExecutionLayer { self.engine() .request(|engine| engine.get_engine_capabilities(age_limit)) .await - .map_err(Box::new) - .map_err(Error::EngineError) + .map_err(Into::into) + } + + /// Returns the execution engine version resulting from a call to + /// engine_clientVersionV1. If the version cache is not populated, or if it + /// is populated with a cached result of age >= `age_limit`, this method will + /// fetch the result from the execution engine and populate the cache before + /// returning it. Otherwise it will return the cached result from an earlier + /// call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_version( + &self, + age_limit: Option, + ) -> Result, Error> { + self.engine() + .request(|engine| engine.get_engine_version(age_limit)) + .await + .map_err(Into::into) } /// Used during block production to determine if the merge has been triggered. diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index e0ca07dcc6e..c16bd43c82c 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,7 +1,7 @@ use super::Context; use crate::engine_api::{http::*, *}; use crate::json_structures::*; -use crate::test_utils::DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI; +use crate::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI}; use serde::{de::DeserializeOwned, Deserialize}; use serde_json::Value as JsonValue; use std::sync::Arc; @@ -528,6 +528,9 @@ pub async fn handle_rpc( let engine_capabilities = ctx.engine_capabilities.read(); Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) } + ENGINE_GET_CLIENT_VERSION_V1 => { + Ok(serde_json::to_value([DEFAULT_CLIENT_VERSION.clone()]).unwrap()) + } ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1 => { #[derive(Deserialize)] #[serde(transparent)] diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 43a6ee8ac22..a6d47995af8 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -4,11 +4,13 @@ use crate::engine_api::auth::JwtKey; use crate::engine_api::{ auth::Auth, http::JSONRPC_VERSION, ExecutionBlock, PayloadStatusV1, PayloadStatusV1Status, }; +use crate::json_structures::JsonClientVersionV1; use bytes::Bytes; use environment::null_logger; use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; use kzg::Kzg; +use lazy_static::lazy_static; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -49,8 +51,18 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v1: true, get_payload_v2: true, get_payload_v3: true, + get_client_version_v1: true, }; +lazy_static! { + pub static ref DEFAULT_CLIENT_VERSION: JsonClientVersionV1 = JsonClientVersionV1 { + code: "MC".to_string(), // "mock client" + name: "Mock Execution Client".to_string(), + version: "0.1.0".to_string(), + commit: "0xabcdef01".to_string(), + }; +} + mod execution_block_generator; mod handle_rpc; mod hook; diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index d31b576989c..9a1d7df124c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -3,6 +3,7 @@ use beacon_chain::chain_config::{ DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_PARENT_THRESHOLD, }; +use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::TrustedSetup; use clap::ArgMatches; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; @@ -17,7 +18,6 @@ use lighthouse_network::ListenAddress; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; use slog::{info, warn, Logger}; -use std::cmp; use std::cmp::max; use std::fmt::Debug; use std::fs; @@ -27,7 +27,8 @@ use std::num::NonZeroU16; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; -use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN}; +use types::graffiti::GraffitiString; +use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes}; /// Gets the fully-initialized global client. /// @@ -576,24 +577,16 @@ pub fn get_config( client_config.chain.genesis_backfill = true; } - let raw_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") { - if graffiti.len() > GRAFFITI_BYTES_LEN { - return Err(format!( - "Your graffiti is too long! {} bytes maximum!", - GRAFFITI_BYTES_LEN - )); - } - - graffiti.as_bytes() + let beacon_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") { + GraffitiOrigin::UserSpecified(GraffitiString::from_str(graffiti)?.into()) } else if cli_args.is_present("private") { - b"" + // When 'private' flag is present, use a zero-initialized bytes array. + GraffitiOrigin::UserSpecified(GraffitiString::empty().into()) } else { - lighthouse_version::VERSION.as_bytes() + // Use the default lighthouse graffiti if no user-specified graffiti flags are present + GraffitiOrigin::default() }; - - let trimmed_graffiti_len = cmp::min(raw_graffiti.len(), GRAFFITI_BYTES_LEN); - client_config.graffiti.0[..trimmed_graffiti_len] - .copy_from_slice(&raw_graffiti[..trimmed_graffiti_len]); + client_config.beacon_graffiti = beacon_graffiti; if let Some(wss_checkpoint) = cli_args.value_of("wss-checkpoint") { let mut split = wss_checkpoint.split(':'); diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 5387d322e96..985eaff1b59 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -21,6 +21,24 @@ pub const VERSION: &str = git_version!( fallback = "Lighthouse/v5.1.3" ); +/// Returns the first eight characters of the latest commit hash for this build. +/// +/// No indication is given if the tree is dirty. This is part of the standard +/// for reporting the client version to the execution engine. +pub const COMMIT_PREFIX: &str = git_version!( + args = [ + "--always", + "--abbrev=8", + // NOTE: using --match instead of --exclude for compatibility with old Git + "--match=thiswillnevermatchlol" + ], + prefix = "", + suffix = "", + cargo_prefix = "", + cargo_suffix = "", + fallback = "00000000" +); + /// Returns `VERSION`, but with platform information appended to the end. /// /// ## Example diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index bd4abe37d8f..c7a0081c9f7 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -47,6 +47,12 @@ impl Into<[u8; GRAFFITI_BYTES_LEN]> for Graffiti { #[serde(transparent)] pub struct GraffitiString(String); +impl GraffitiString { + pub fn empty() -> Self { + Self(String::new()) + } +} + impl FromStr for GraffitiString { type Err = String; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 9983717cb1e..9f1e3d235d9 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -5,9 +5,11 @@ use beacon_node::beacon_chain::chain_config::{ DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, }; +use beacon_node::beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_processor::BeaconProcessorConfig; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; +use lighthouse_version; use std::fs::File; use std::io::{Read, Write}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; @@ -296,13 +298,36 @@ fn graffiti_flag() { .flag("graffiti", Some("nice-graffiti")) .run_with_zero_port() .with_config(|config| { + assert!(matches!( + config.beacon_graffiti, + GraffitiOrigin::UserSpecified(_) + )); assert_eq!( - config.graffiti.to_string(), - "0x6e6963652d677261666669746900000000000000000000000000000000000000" + config.beacon_graffiti.graffiti().to_string(), + "0x6e6963652d677261666669746900000000000000000000000000000000000000", ); }); } +#[test] +fn default_graffiti() { + use types::GRAFFITI_BYTES_LEN; + // test default graffiti when no graffiti flags are provided + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(matches!( + config.beacon_graffiti, + GraffitiOrigin::Calculated(_) + )); + let version_bytes = lighthouse_version::VERSION.as_bytes(); + let trimmed_len = std::cmp::min(version_bytes.len(), GRAFFITI_BYTES_LEN); + let mut bytes = [0u8; GRAFFITI_BYTES_LEN]; + bytes[..trimmed_len].copy_from_slice(&version_bytes[..trimmed_len]); + assert_eq!(config.beacon_graffiti.graffiti().0, bytes); + }); +} + #[test] fn trusted_peers_flag() { let peers = vec![PeerId::random(), PeerId::random()]; @@ -1201,7 +1226,17 @@ fn private_flag() { CommandLineTest::new() .flag("private", None) .run_with_zero_port() - .with_config(|config| assert!(config.network.private)); + .with_config(|config| { + assert!(config.network.private); + assert!(matches!( + config.beacon_graffiti, + GraffitiOrigin::UserSpecified(_) + )); + assert_eq!( + config.beacon_graffiti.graffiti().to_string(), + "0x0000000000000000000000000000000000000000000000000000000000000000".to_string(), + ); + }); } #[test] fn zero_ports_flag() { From dd340eebdc304d79bc5c391d293d5e773fc5089e Mon Sep 17 00:00:00 2001 From: realbigsean Date: Thu, 25 Apr 2024 13:11:01 -0400 Subject: [PATCH 11/16] Fix execution integration tests (#5647) * update waiting status * revert to old nethermind version --- testing/execution_engine_integration/src/test_rig.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 0103f7074b5..c7d5e704524 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -180,7 +180,7 @@ impl TestRig { // Run the routine to check for online nodes. pair.execution_layer.watchdog_task().await; - if pair.execution_layer.is_synced().await { + if !pair.execution_layer.is_offline_or_erroring().await { break; } else if start_instant + EXECUTION_ENGINE_START_TIMEOUT > Instant::now() { sleep(Duration::from_millis(500)).await; From 320345695d3a227571d5c1a2a976aa0feaf7ecd1 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Thu, 25 Apr 2024 14:33:03 -0400 Subject: [PATCH 12/16] Add electra presets to beacon API (#5630) * add presets to API * add extra fields to config spec in beacon API * remove unused * add mainnet presets for gnosis and fix minimal preset default values --- consensus/types/presets/gnosis/electra.yaml | 46 +++++++++++++++++++- consensus/types/presets/mainnet/electra.yaml | 44 ++++++++++++++++++- consensus/types/presets/minimal/electra.yaml | 44 ++++++++++++++++++- consensus/types/src/chain_spec.rs | 2 + consensus/types/src/config_and_preset.rs | 12 ++++- consensus/types/src/consts.rs | 3 ++ consensus/types/src/eth_spec.rs | 10 ++--- consensus/types/src/preset.rs | 43 ++++++++++++++++-- 8 files changed, 190 insertions(+), 14 deletions(-) diff --git a/consensus/types/presets/gnosis/electra.yaml b/consensus/types/presets/gnosis/electra.yaml index cafdcbbf8d3..72c626ded2f 100644 --- a/consensus/types/presets/gnosis/electra.yaml +++ b/consensus/types/presets/gnosis/electra.yaml @@ -1,3 +1,45 @@ -# Gnosis preset - Electra +# Mainnet preset - Electra -ELECTRA_PLACEHOLDER: 0 +# Gwei values +# --------------------------------------------------------------- +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MIN_ACTIVATION_BALANCE: 32000000000 +# 2**11 * 10**9 (= 2,048,000,000,000) Gwei +MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 + +# State list lengths +# --------------------------------------------------------------- +# `uint64(2**27)` (= 134,217,728) +PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +# `uint64(2**27)` (= 134,217,728) +PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 +# `uint64(2**18)` (= 262,144) +PENDING_CONSOLIDATIONS_LIMIT: 262144 + +# Reward and penalty quotients +# --------------------------------------------------------------- +# `uint64(2**12)` (= 4,096) +MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: 4096 +# `uint64(2**12)` (= 4,096) +WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 + +# # Max operations per block +# --------------------------------------------------------------- +# `uint64(2**0)` (= 1) +MAX_ATTESTER_SLASHINGS_ELECTRA: 1 +# `uint64(2**3)` (= 8) +MAX_ATTESTATIONS_ELECTRA: 8 +# `uint64(2**0)` (= 1) +MAX_CONSOLIDATIONS: 1 + +# Execution +# --------------------------------------------------------------- +# 2**13 (= 8192) receipts +MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 8192 +# 2**4 (= 16) withdrawal requests +MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**3 ( = 8) pending withdrawals +MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 diff --git a/consensus/types/presets/mainnet/electra.yaml b/consensus/types/presets/mainnet/electra.yaml index 64d8b97b631..72c626ded2f 100644 --- a/consensus/types/presets/mainnet/electra.yaml +++ b/consensus/types/presets/mainnet/electra.yaml @@ -1,3 +1,45 @@ # Mainnet preset - Electra -ELECTRA_PLACEHOLDER: 0 +# Gwei values +# --------------------------------------------------------------- +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MIN_ACTIVATION_BALANCE: 32000000000 +# 2**11 * 10**9 (= 2,048,000,000,000) Gwei +MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 + +# State list lengths +# --------------------------------------------------------------- +# `uint64(2**27)` (= 134,217,728) +PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +# `uint64(2**27)` (= 134,217,728) +PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 +# `uint64(2**18)` (= 262,144) +PENDING_CONSOLIDATIONS_LIMIT: 262144 + +# Reward and penalty quotients +# --------------------------------------------------------------- +# `uint64(2**12)` (= 4,096) +MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: 4096 +# `uint64(2**12)` (= 4,096) +WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 + +# # Max operations per block +# --------------------------------------------------------------- +# `uint64(2**0)` (= 1) +MAX_ATTESTER_SLASHINGS_ELECTRA: 1 +# `uint64(2**3)` (= 8) +MAX_ATTESTATIONS_ELECTRA: 8 +# `uint64(2**0)` (= 1) +MAX_CONSOLIDATIONS: 1 + +# Execution +# --------------------------------------------------------------- +# 2**13 (= 8192) receipts +MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 8192 +# 2**4 (= 16) withdrawal requests +MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**3 ( = 8) pending withdrawals +MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 diff --git a/consensus/types/presets/minimal/electra.yaml b/consensus/types/presets/minimal/electra.yaml index 3baa7fa8161..11aa5e1f50e 100644 --- a/consensus/types/presets/minimal/electra.yaml +++ b/consensus/types/presets/minimal/electra.yaml @@ -1,3 +1,45 @@ # Minimal preset - Electra -ELECTRA_PLACEHOLDER: 0 +# Gwei values +# --------------------------------------------------------------- +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MIN_ACTIVATION_BALANCE: 32000000000 +# 2**11 * 10**9 (= 2,048,000,000,000) Gwei +MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 + +# State list lengths +# --------------------------------------------------------------- +# `uint64(2**27)` (= 134,217,728) +PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +# [customized] `uint64(2**6)` (= 64) +PENDING_PARTIAL_WITHDRAWALS_LIMIT: 64 +# [customized] `uint64(2**6)` (= 64) +PENDING_CONSOLIDATIONS_LIMIT: 64 + +# Reward and penalty quotients +# --------------------------------------------------------------- +# `uint64(2**12)` (= 4,096) +MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: 4096 +# `uint64(2**12)` (= 4,096) +WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 + +# # Max operations per block +# --------------------------------------------------------------- +# `uint64(2**0)` (= 1) +MAX_ATTESTER_SLASHINGS_ELECTRA: 1 +# `uint64(2**3)` (= 8) +MAX_ATTESTATIONS_ELECTRA: 8 +# `uint64(2**0)` (= 1) +MAX_CONSOLIDATIONS: 1 + +# Execution +# --------------------------------------------------------------- +# [customized] +MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 4 +# [customized] 2**1 (= 2) withdrawal requests +MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 2 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**0 ( = 1) pending withdrawals +MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 1 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index e4f27d6873c..c55fe77a1b9 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -850,6 +850,8 @@ impl ChainSpec { // Electra electra_fork_version: [0x05, 0x00, 0x00, 0x01], electra_fork_epoch: None, + max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 0) + .expect("pow does not overflow"), // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index f2a6e616daa..6fc6e0642ea 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,6 +1,6 @@ use crate::{ - consts::altair, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, - DenebPreset, ElectraPreset, EthSpec, ForkName, + consts::altair, consts::deneb, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, + ChainSpec, Config, DenebPreset, ElectraPreset, EthSpec, ForkName, }; use maplit::hashmap; use serde::{Deserialize, Serialize}; @@ -100,6 +100,7 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { let u8_hex = |v: u8| hex_string(&v.to_le_bytes()); hashmap! { "bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte), + "eth1_address_withdrawal_prefix".to_uppercase() => u8_hex(spec.eth1_address_withdrawal_prefix_byte), "domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer), "domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester), "domain_randao".to_uppercase()=> u32_hex(spec.domain_randao), @@ -119,6 +120,13 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), "target_aggregators_per_sync_subcommittee".to_uppercase() => altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), + // Deneb + "versioned_hash_version_kzg".to_uppercase() => deneb::VERSIONED_HASH_VERSION_KZG.to_string().into(), + // Electra + "compounding_withdrawal_prefix".to_uppercase() => u8_hex(spec.compounding_withdrawal_prefix_byte), + "unset_deposit_receipts_start_index".to_uppercase() => spec.unset_deposit_receipts_start_index.to_string().into(), + "full_exit_request_amount".to_uppercase() => spec.full_exit_request_amount.to_string().into(), + "domain_consolidation".to_uppercase()=> u32_hex(spec.domain_consolidation), } } diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index a9377bc3e00..31d4c5eac59 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -22,3 +22,6 @@ pub mod altair { pub mod merge { pub const INTERVALS_PER_SLOT: u64 = 3; } +pub mod deneb { + pub use crate::VERSIONED_HASH_VERSION_KZG; +} diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index a700c5e9abb..6749a3f3a2b 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -418,6 +418,10 @@ impl EthSpec for MinimalEthSpec { type BytesPerBlob = U131072; type MaxBlobCommitmentsPerBlock = U16; type KzgCommitmentInclusionProofDepth = U9; + type PendingPartialWithdrawalsLimit = U64; + type PendingConsolidationsLimit = U64; + type MaxDepositReceiptsPerPayload = U4; + type MaxWithdrawalRequestsPerPayload = U2; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -442,13 +446,9 @@ impl EthSpec for MinimalEthSpec { MaxBlobsPerBlock, BytesPerFieldElement, PendingBalanceDepositsLimit, - PendingPartialWithdrawalsLimit, - PendingConsolidationsLimit, MaxConsolidations, - MaxDepositReceiptsPerPayload, MaxAttesterSlashingsElectra, - MaxAttestationsElectra, - MaxWithdrawalRequestsPerPayload + MaxAttestationsElectra }); fn default_spec() -> ChainSpec { diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 626a45d971d..f4008d62e1d 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -230,13 +230,50 @@ impl DenebPreset { #[serde(rename_all = "UPPERCASE")] pub struct ElectraPreset { #[serde(with = "serde_utils::quoted_u64")] - pub electra_placeholder: u64, + pub min_activation_balance: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_effective_balance_electra: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub min_slashing_penalty_quotient_electra: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub whistleblower_reward_quotient_electra: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_pending_partials_per_withdrawals_sweep: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub pending_balance_deposits_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub pending_partial_withdrawals_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub pending_consolidations_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_consolidations: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_deposit_receipts_per_payload: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_attester_slashings_electra: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_attestations_electra: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_withdrawal_requests_per_payload: u64, } impl ElectraPreset { - pub fn from_chain_spec(_spec: &ChainSpec) -> Self { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { Self { - electra_placeholder: 0, + min_activation_balance: spec.min_activation_balance, + max_effective_balance_electra: spec.max_effective_balance_electra, + min_slashing_penalty_quotient_electra: spec.min_slashing_penalty_quotient_electra, + whistleblower_reward_quotient_electra: spec.whistleblower_reward_quotient_electra, + max_pending_partials_per_withdrawals_sweep: spec + .max_pending_partials_per_withdrawals_sweep, + pending_balance_deposits_limit: E::pending_balance_deposits_limit() as u64, + pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, + pending_consolidations_limit: E::pending_consolidations_limit() as u64, + max_consolidations: E::max_consolidations() as u64, + max_deposit_receipts_per_payload: E::max_deposit_receipts_per_payload() as u64, + max_attester_slashings_electra: E::max_attester_slashings_electra() as u64, + max_attestations_electra: E::max_attestations_electra() as u64, + max_withdrawal_requests_per_payload: E::max_withdrawal_requests_per_payload() as u64, } } } From 13f94ef0f324d98d313c6e13e1833f1825f245e1 Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 26 Apr 2024 06:19:41 +1000 Subject: [PATCH 13/16] Rename `Merge` to `Bellatrix` (#5601) * Rename Merge to Bellatrix * Remove tree-hash-cache which got readded from the rebase --- Makefile | 2 +- .../beacon_chain/src/attestation_rewards.rs | 2 +- .../src/attestation_verification.rs | 4 +- .../beacon_chain/src/beacon_block_streamer.rs | 6 +- beacon_node/beacon_chain/src/beacon_chain.rs | 16 +-- ...ge_readiness.rs => bellatrix_readiness.rs} | 34 +++--- .../beacon_chain/src/capella_readiness.rs | 2 +- .../overflow_lru_cache.rs | 10 +- .../beacon_chain/src/deneb_readiness.rs | 2 +- .../beacon_chain/src/electra_readiness.rs | 2 +- .../beacon_chain/src/execution_payload.rs | 14 +-- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 4 +- .../tests/attestation_verification.rs | 20 ++-- .../tests/{merge.rs => bellatrix.rs} | 42 ++++---- beacon_node/beacon_chain/tests/capella.rs | 40 +++---- beacon_node/beacon_chain/tests/main.rs | 2 +- .../beacon_chain/tests/validator_monitor.rs | 4 +- beacon_node/client/src/notifier.rs | 24 ++--- beacon_node/execution_layer/src/block_hash.rs | 2 +- beacon_node/execution_layer/src/engine_api.rs | 69 ++++++------ .../execution_layer/src/engine_api/http.rs | 26 ++--- .../src/engine_api/json_structures.rs | 14 +-- .../src/engine_api/new_payload_request.rs | 40 ++++--- beacon_node/execution_layer/src/lib.rs | 44 ++++---- .../test_utils/execution_block_generator.rs | 22 ++-- .../src/test_utils/handle_rpc.rs | 6 +- .../src/test_utils/mock_builder.rs | 30 +++--- .../src/test_utils/mock_execution_layer.rs | 4 +- .../http_api/src/build_block_contents.rs | 2 +- beacon_node/http_api/src/builder_states.rs | 2 +- beacon_node/http_api/src/lib.rs | 2 +- beacon_node/http_api/src/publish_blocks.rs | 10 +- beacon_node/lighthouse_network/src/config.rs | 6 +- .../lighthouse_network/src/rpc/codec/base.rs | 6 +- .../src/rpc/codec/ssz_snappy.rs | 86 ++++++++------- .../lighthouse_network/src/rpc/protocol.rs | 40 ++++--- .../lighthouse_network/src/types/pubsub.rs | 10 +- .../lighthouse_network/src/types/topics.rs | 2 +- .../lighthouse_network/tests/common.rs | 6 +- .../lighthouse_network/tests/rpc_tests.rs | 59 +++++----- .../network_beacon_processor/rpc_methods.rs | 7 +- .../store/src/impls/execution_payload.rs | 9 +- beacon_node/store/src/partial_beacon_state.rs | 30 +++--- common/eth2/src/types.rs | 12 +-- .../gnosis/config.yaml | 2 +- .../holesky/config.yaml | 2 +- .../sepolia/config.yaml | 2 +- common/slot_clock/src/lib.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 4 +- .../common/get_attestation_participation.rs | 2 +- .../src/common/slash_validator.rs | 2 +- consensus/state_processing/src/genesis.rs | 6 +- .../src/per_block_processing.rs | 10 +- .../process_operations.rs | 2 +- .../per_block_processing/signature_sets.rs | 2 +- .../verify_attestation.rs | 2 +- .../src/per_epoch_processing.rs | 2 +- .../src/per_slot_processing.rs | 2 +- consensus/state_processing/src/upgrade.rs | 4 +- .../src/upgrade/{merge.rs => bellatrix.rs} | 10 +- .../state_processing/src/upgrade/capella.rs | 4 +- consensus/types/src/beacon_block.rs | 24 ++--- consensus/types/src/beacon_block_body.rs | 45 ++++---- consensus/types/src/beacon_state.rs | 50 ++++----- .../progressive_balances_cache.rs | 2 +- consensus/types/src/beacon_state/tests.rs | 6 +- consensus/types/src/builder_bid.rs | 14 +-- consensus/types/src/chain_spec.rs | 22 ++-- consensus/types/src/consts.rs | 2 +- consensus/types/src/eth_spec.rs | 2 +- consensus/types/src/execution_payload.rs | 16 +-- .../types/src/execution_payload_header.rs | 28 +++-- consensus/types/src/fork_context.rs | 6 +- consensus/types/src/fork_name.rs | 28 ++--- .../types/src/fork_versioned_response.rs | 9 +- consensus/types/src/lib.rs | 28 ++--- consensus/types/src/light_client_bootstrap.rs | 6 +- .../types/src/light_client_finality_update.rs | 6 +- consensus/types/src/light_client_header.rs | 8 +- .../src/light_client_optimistic_update.rs | 20 ++-- consensus/types/src/light_client_update.rs | 4 +- consensus/types/src/payload.rs | 102 ++++++++++-------- consensus/types/src/signed_beacon_block.rs | 49 +++++---- consensus/types/src/voluntary_exit.rs | 2 +- lcli/src/create_payload_header.rs | 11 +- lcli/src/main.rs | 2 +- lcli/src/new_testnet.rs | 18 ++-- lcli/src/parse_ssz.rs | 12 ++- .../environment/tests/testnet_dir/config.yaml | 2 +- lighthouse/tests/beacon_node.rs | 14 +-- testing/ef_tests/src/cases/common.rs | 4 +- .../ef_tests/src/cases/epoch_processing.rs | 16 +-- testing/ef_tests/src/cases/fork.rs | 4 +- .../src/cases/merkle_proof_validity.rs | 2 +- testing/ef_tests/src/cases/operations.rs | 23 ++-- testing/ef_tests/src/cases/transition.rs | 2 +- testing/ef_tests/src/handler.rs | 8 +- testing/ef_tests/src/type_name.rs | 6 +- testing/ef_tests/tests/tests.rs | 34 +++--- testing/web3signer_tests/src/lib.rs | 41 ++++--- .../src/signing_method/web3signer.rs | 2 +- validator_client/src/validator_store.rs | 2 +- watch/src/database/mod.rs | 2 +- 104 files changed, 808 insertions(+), 714 deletions(-) rename beacon_node/beacon_chain/src/{merge_readiness.rs => bellatrix_readiness.rs} (91%) rename beacon_node/beacon_chain/tests/{merge.rs => bellatrix.rs} (81%) rename consensus/state_processing/src/upgrade/{merge.rs => bellatrix.rs} (90%) diff --git a/Makefile b/Makefile index 4072ab1e6d8..12d33cc3a8b 100644 --- a/Makefile +++ b/Makefile @@ -39,7 +39,7 @@ PROFILE ?= release # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair merge capella deneb electra +FORKS=phase0 altair bellatrix capella deneb electra # Extra flags for Cargo CARGO_INSTALL_EXTRA_FLAGS?= diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 491b7ef7da9..d48a83130e6 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -54,7 +54,7 @@ impl BeaconChain { match state { BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators), BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => self.compute_attestation_rewards_altair(state, validators), diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index f3bde8678e1..471c43d94f8 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -1063,7 +1063,9 @@ pub fn verify_propagation_slot_range( let current_fork = spec.fork_name_at_slot::(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?); let earliest_permissible_slot = match current_fork { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => one_epoch_prior, + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { + one_epoch_prior + } // EIP-7045 ForkName::Deneb | ForkName::Electra => one_epoch_prior .epoch(E::slots_per_epoch()) diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 4f413ce2a86..0c92b7c1f62 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -14,8 +14,8 @@ use types::{ SignedBlindedBeaconBlock, Slot, }; use types::{ - ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadElectra, ExecutionPayloadHeader, - ExecutionPayloadMerge, + ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadElectra, + ExecutionPayloadHeader, }; #[derive(PartialEq)] @@ -95,7 +95,7 @@ fn reconstruct_default_header_block( .map_err(BeaconChainError::InconsistentFork)?; let payload: ExecutionPayload = match fork { - ForkName::Merge => ExecutionPayloadMerge::default().into(), + ForkName::Bellatrix => ExecutionPayloadBellatrix::default().into(), ForkName::Capella => ExecutionPayloadCapella::default().into(), ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Electra => ExecutionPayloadElectra::default().into(), diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index dd1fe59b922..9c7ded313b6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2521,7 +2521,7 @@ impl BeaconChain { /// Check if the current slot is greater than or equal to the Capella fork epoch. pub fn current_slot_is_post_capella(&self) -> Result { let current_fork = self.spec.fork_name_at_slot::(self.slot()?); - if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { + if let ForkName::Base | ForkName::Altair | ForkName::Bellatrix = current_fork { Ok(false) } else { Ok(true) @@ -4823,7 +4823,7 @@ impl BeaconChain { // allows it to run concurrently with things like attestation packing. let prepare_payload_handle = match &state { BeaconState::Base(_) | BeaconState::Altair(_) => None, - BeaconState::Merge(_) + BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { @@ -5083,17 +5083,17 @@ impl BeaconChain { None, Uint256::zero(), ), - BeaconState::Merge(_) => { + BeaconState::Bellatrix(_) => { let block_proposal_contents = block_contents.ok_or(BlockProductionError::MissingExecutionPayload)?; let execution_payload_value = block_proposal_contents.block_value().to_owned(); ( - BeaconBlock::Merge(BeaconBlockMerge { + BeaconBlock::Bellatrix(BeaconBlockBellatrix { slot, proposer_index, parent_root, state_root: Hash256::zero(), - body: BeaconBlockBodyMerge { + body: BeaconBlockBodyBellatrix { randao_reveal, eth1_data, graffiti, @@ -5541,7 +5541,7 @@ impl BeaconChain { } else { let prepare_slot_fork = self.spec.fork_name_at_slot::(prepare_slot); let withdrawals = match prepare_slot_fork { - ForkName::Base | ForkName::Altair | ForkName::Merge => None, + ForkName::Base | ForkName::Altair | ForkName::Bellatrix => None, ForkName::Capella | ForkName::Deneb | ForkName::Electra => { let chain = self.clone(); self.spawn_blocking_handle( @@ -5556,7 +5556,7 @@ impl BeaconChain { }; let parent_beacon_block_root = match prepare_slot_fork { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => None, + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => None, ForkName::Deneb | ForkName::Electra => { Some(pre_payload_attributes.parent_beacon_block_root) } @@ -6599,7 +6599,7 @@ impl BeaconChain { match fork_name { ForkName::Altair - | ForkName::Merge + | ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/bellatrix_readiness.rs similarity index 91% rename from beacon_node/beacon_chain/src/merge_readiness.rs rename to beacon_node/beacon_chain/src/bellatrix_readiness.rs index 52a5ea912e0..bf9e8481261 100644 --- a/beacon_node/beacon_chain/src/merge_readiness.rs +++ b/beacon_node/beacon_chain/src/bellatrix_readiness.rs @@ -11,7 +11,7 @@ use types::*; /// The time before the Bellatrix fork when we will start issuing warnings about preparation. pub const SECONDS_IN_A_WEEK: u64 = 604800; -pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; +pub const BELLATRIX_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; #[derive(Default, Debug, Serialize, Deserialize)] pub struct MergeConfig { @@ -81,7 +81,7 @@ impl MergeConfig { #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] #[serde(tag = "type")] -pub enum MergeReadiness { +pub enum BellatrixReadiness { /// The node is ready, as far as we can tell. Ready { config: MergeConfig, @@ -94,29 +94,29 @@ pub enum MergeReadiness { NoExecutionEndpoint, } -impl fmt::Display for MergeReadiness { +impl fmt::Display for BellatrixReadiness { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - MergeReadiness::Ready { + BellatrixReadiness::Ready { config: params, current_difficulty, } => { write!( f, - "This node appears ready for the merge. \ + "This node appears ready for Bellatrix \ Params: {}, current_difficulty: {:?}", params, current_difficulty ) } - MergeReadiness::NotSynced => write!( + BellatrixReadiness::NotSynced => write!( f, "The execution endpoint is connected and configured, \ however it is not yet synced" ), - MergeReadiness::NoExecutionEndpoint => write!( + BellatrixReadiness::NoExecutionEndpoint => write!( f, "The --execution-endpoint flag is not specified, this is a \ - requirement for the merge" + requirement for Bellatrix" ), } } @@ -143,12 +143,12 @@ pub enum GenesisExecutionPayloadStatus { impl BeaconChain { /// Returns `true` if user has an EL configured, or if the Bellatrix fork has occurred or will - /// occur within `MERGE_READINESS_PREPARATION_SECONDS`. + /// occur within `BELLATRIX_READINESS_PREPARATION_SECONDS`. pub fn is_time_to_prepare_for_bellatrix(&self, current_slot: Slot) -> bool { if let Some(bellatrix_epoch) = self.spec.bellatrix_fork_epoch { let bellatrix_slot = bellatrix_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let merge_readiness_preparation_slots = - MERGE_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + let bellatrix_readiness_preparation_slots = + BELLATRIX_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; if self.execution_layer.is_some() { // The user has already configured an execution layer, start checking for readiness @@ -156,7 +156,7 @@ impl BeaconChain { true } else { // Return `true` if Bellatrix has happened or is within the preparation time. - current_slot + merge_readiness_preparation_slots > bellatrix_slot + current_slot + bellatrix_readiness_preparation_slots > bellatrix_slot } } else { // The Bellatrix fork epoch has not been defined yet, no need to prepare. @@ -164,22 +164,22 @@ impl BeaconChain { } } - /// Attempts to connect to the EL and confirm that it is ready for the merge. - pub async fn check_merge_readiness(&self, current_slot: Slot) -> MergeReadiness { + /// Attempts to connect to the EL and confirm that it is ready for Bellatrix. + pub async fn check_bellatrix_readiness(&self, current_slot: Slot) -> BellatrixReadiness { if let Some(el) = self.execution_layer.as_ref() { if !el.is_synced_for_notifier(current_slot).await { // The EL is not synced. - return MergeReadiness::NotSynced; + return BellatrixReadiness::NotSynced; } let params = MergeConfig::from_chainspec(&self.spec); let current_difficulty = el.get_current_difficulty().await.ok(); - MergeReadiness::Ready { + BellatrixReadiness::Ready { config: params, current_difficulty, } } else { // There is no EL configured. - MergeReadiness::NoExecutionEndpoint + BellatrixReadiness::NoExecutionEndpoint } } diff --git a/beacon_node/beacon_chain/src/capella_readiness.rs b/beacon_node/beacon_chain/src/capella_readiness.rs index cde71d462d7..88af7db0aaa 100644 --- a/beacon_node/beacon_chain/src/capella_readiness.rs +++ b/beacon_node/beacon_chain/src/capella_readiness.rs @@ -10,7 +10,7 @@ use std::time::Duration; use types::*; /// The time before the Capella fork when we will start issuing warnings about preparation. -use super::merge_readiness::SECONDS_IN_A_WEEK; +use super::bellatrix_readiness::SECONDS_IN_A_WEEK; pub const CAPELLA_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 3a8bccbad5c..91c776adc10 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -989,17 +989,17 @@ mod test { // go to bellatrix slot harness.extend_to_slot(bellatrix_fork_slot).await; - let merge_head = &harness.chain.head_snapshot().beacon_block; - assert!(merge_head.as_merge().is_ok()); - assert_eq!(merge_head.slot(), bellatrix_fork_slot); + let bellatrix_head = &harness.chain.head_snapshot().beacon_block; + assert!(bellatrix_head.as_bellatrix().is_ok()); + assert_eq!(bellatrix_head.slot(), bellatrix_fork_slot); assert!( - merge_head + bellatrix_head .message() .body() .execution_payload() .unwrap() .is_default_with_empty_roots(), - "Merge head is default payload" + "Bellatrix head is default payload" ); // Trigger the terminal PoW block. harness diff --git a/beacon_node/beacon_chain/src/deneb_readiness.rs b/beacon_node/beacon_chain/src/deneb_readiness.rs index 1ba6fe3ea6c..e11070a1f4f 100644 --- a/beacon_node/beacon_chain/src/deneb_readiness.rs +++ b/beacon_node/beacon_chain/src/deneb_readiness.rs @@ -10,7 +10,7 @@ use std::time::Duration; use types::*; /// The time before the Deneb fork when we will start issuing warnings about preparation. -use super::merge_readiness::SECONDS_IN_A_WEEK; +use super::bellatrix_readiness::SECONDS_IN_A_WEEK; pub const DENEB_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; diff --git a/beacon_node/beacon_chain/src/electra_readiness.rs b/beacon_node/beacon_chain/src/electra_readiness.rs index 0e911bf37ec..42ee743fe6b 100644 --- a/beacon_node/beacon_chain/src/electra_readiness.rs +++ b/beacon_node/beacon_chain/src/electra_readiness.rs @@ -11,7 +11,7 @@ use std::time::Duration; use types::*; /// The time before the Electra fork when we will start issuing warnings about preparation. -use super::merge_readiness::SECONDS_IN_A_WEEK; +use super::bellatrix_readiness::SECONDS_IN_A_WEEK; pub const ELECTRA_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b3804f0d23a..cbffe363422 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -336,7 +336,7 @@ pub fn validate_execution_payload_for_gossip( block: BeaconBlockRef<'_, T::EthSpec>, chain: &BeaconChain, ) -> Result<(), BlockError> { - // Only apply this validation if this is a merge beacon block. + // Only apply this validation if this is a Bellatrix beacon block. if let Ok(execution_payload) = block.body().execution_payload() { // This logic should match `is_execution_enabled`. We use only the execution block hash of // the parent here in order to avoid loading the parent state during gossip verification. @@ -385,7 +385,7 @@ pub fn validate_execution_payload_for_gossip( /// ## Errors /// /// Will return an error when using a pre-merge fork `state`. Ensure to only run this function -/// after the merge fork. +/// after the Bellatrix fork. /// /// ## Specification /// @@ -415,13 +415,13 @@ pub fn get_execution_payload( &BeaconState::Capella(_) | &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { Some(get_expected_withdrawals(state, spec)?.into()) } - &BeaconState::Merge(_) => None, + &BeaconState::Bellatrix(_) => None, // These shouldn't happen but they're here to make the pattern irrefutable &BeaconState::Base(_) | &BeaconState::Altair(_) => None, }; let parent_beacon_block_root = match state { BeaconState::Deneb(_) | BeaconState::Electra(_) => Some(parent_block_root), - BeaconState::Merge(_) | BeaconState::Capella(_) => None, + BeaconState::Bellatrix(_) | BeaconState::Capella(_) => None, // These shouldn't happen but they're here to make the pattern irrefutable BeaconState::Base(_) | BeaconState::Altair(_) => None, }; @@ -457,12 +457,12 @@ pub fn get_execution_payload( /// Prepares an execution payload for inclusion in a block. /// -/// Will return `Ok(None)` if the merge fork has occurred, but a terminal block has not been found. +/// Will return `Ok(None)` if the Bellatrix fork has occurred, but a terminal block has not been found. /// /// ## Errors /// -/// Will return an error when using a pre-merge fork `state`. Ensure to only run this function -/// after the merge fork. +/// Will return an error when using a pre-Bellatrix fork `state`. Ensure to only run this function +/// after the Bellatrix fork. /// /// ## Specification /// diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 5ee7803436c..221bb8b2922 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -8,6 +8,7 @@ mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; mod beacon_snapshot; +pub mod bellatrix_readiness; pub mod blob_verification; pub mod block_reward; mod block_times_cache; @@ -35,7 +36,6 @@ pub mod kzg_utils; pub mod light_client_finality_update_verification; pub mod light_client_optimistic_update_verification; mod light_client_server_cache; -pub mod merge_readiness; pub mod metrics; pub mod migrate; mod naive_aggregation_pool; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d3eeff8a7d9..8fbd5d575f9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -880,7 +880,7 @@ where let block_contents: SignedBlockContentsTuple = match *signed_block { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Bellatrix(_) | SignedBeaconBlock::Capella(_) => (signed_block, None), SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => { (signed_block, block_response.blob_items) @@ -944,7 +944,7 @@ where let block_contents: SignedBlockContentsTuple = match *signed_block { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Bellatrix(_) | SignedBeaconBlock::Capella(_) => (signed_block, None), SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => { (signed_block, block_response.blob_items) diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 3432604cc93..1463d1c5c15 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -336,7 +336,7 @@ impl GossipTester { pub fn earliest_valid_attestation_slot(&self) -> Slot { let offset = match self.harness.spec.fork_name_at_epoch(self.epoch()) { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { // Subtract an additional slot since the harness will be exactly on the start of the // slot and the propagation tolerance will allow an extra slot. E::slots_per_epoch() + 1 @@ -1382,7 +1382,10 @@ async fn attestation_verification_use_head_state_fork() { .block_at_slot(pre_capella_slot, WhenSlotSkipped::Prev) .expect("should not error getting block at slot") .expect("should find block at slot"); - assert_eq!(pre_capella_block.fork_name(&spec).unwrap(), ForkName::Merge); + assert_eq!( + pre_capella_block.fork_name(&spec).unwrap(), + ForkName::Bellatrix + ); // Advance slot clock to Capella fork. harness.advance_slot(); @@ -1427,7 +1430,7 @@ async fn attestation_verification_use_head_state_fork() { // Scenario 2: other node forgot to update their node and signed attestations using bellatrix fork { let attesters = (VALIDATOR_COUNT / 2..VALIDATOR_COUNT).collect::>(); - let merge_fork = spec.fork_for_name(ForkName::Merge).unwrap(); + let bellatrix_fork = spec.fork_for_name(ForkName::Bellatrix).unwrap(); let committee_attestations = harness .make_unaggregated_attestations_with_opts( attesters.as_slice(), @@ -1436,7 +1439,7 @@ async fn attestation_verification_use_head_state_fork() { pre_capella_block.canonical_root().into(), first_capella_slot, MakeAttestationOptions { - fork: merge_fork, + fork: bellatrix_fork, limit: None, }, ) @@ -1483,7 +1486,10 @@ async fn aggregated_attestation_verification_use_head_state_fork() { .block_at_slot(pre_capella_slot, WhenSlotSkipped::Prev) .expect("should not error getting block at slot") .expect("should find block at slot"); - assert_eq!(pre_capella_block.fork_name(&spec).unwrap(), ForkName::Merge); + assert_eq!( + pre_capella_block.fork_name(&spec).unwrap(), + ForkName::Bellatrix + ); // Advance slot clock to Capella fork. harness.advance_slot(); @@ -1525,7 +1531,7 @@ async fn aggregated_attestation_verification_use_head_state_fork() { // Scenario 2: other node forgot to update their node and signed attestations using bellatrix fork { let attesters = (VALIDATOR_COUNT / 2..VALIDATOR_COUNT).collect::>(); - let merge_fork = spec.fork_for_name(ForkName::Merge).unwrap(); + let bellatrix_fork = spec.fork_for_name(ForkName::Bellatrix).unwrap(); let aggregates = harness .make_attestations_with_opts( attesters.as_slice(), @@ -1534,7 +1540,7 @@ async fn aggregated_attestation_verification_use_head_state_fork() { pre_capella_block.canonical_root().into(), first_capella_slot, MakeAttestationOptions { - fork: merge_fork, + fork: bellatrix_fork, limit: None, }, ) diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/bellatrix.rs similarity index 81% rename from beacon_node/beacon_chain/tests/merge.rs rename to beacon_node/beacon_chain/tests/bellatrix.rs index bff5c4523d1..027082c11c9 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/bellatrix.rs @@ -71,9 +71,9 @@ async fn merge_with_terminal_block_hash_override() { .chain .head_snapshot() .beacon_block - .as_merge() + .as_bellatrix() .is_ok(), - "genesis block should be a merge block" + "genesis block should be a bellatrix block" ); let mut execution_payloads = vec![]; @@ -93,11 +93,11 @@ async fn merge_with_terminal_block_hash_override() { } #[tokio::test] -async fn base_altair_merge_with_terminal_block_after_fork() { +async fn base_altair_bellatrix_with_terminal_block_after_fork() { let altair_fork_epoch = Epoch::new(4); let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); let bellatrix_fork_epoch = Epoch::new(8); - let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); + let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); let mut spec = E::default_spec(); spec.altair_fork_epoch = Some(altair_fork_epoch); @@ -130,41 +130,41 @@ async fn base_altair_merge_with_terminal_block_after_fork() { assert_eq!(altair_head.slot(), altair_fork_slot); /* - * Do the merge fork, without a terminal PoW block. + * Do the Bellatrix fork, without a terminal PoW block. */ - harness.extend_to_slot(merge_fork_slot).await; + harness.extend_to_slot(bellatrix_fork_slot).await; - let merge_head = &harness.chain.head_snapshot().beacon_block; - assert!(merge_head.as_merge().is_ok()); - assert_eq!(merge_head.slot(), merge_fork_slot); + let bellatrix_head = &harness.chain.head_snapshot().beacon_block; + assert!(bellatrix_head.as_bellatrix().is_ok()); + assert_eq!(bellatrix_head.slot(), bellatrix_fork_slot); assert!( - merge_head + bellatrix_head .message() .body() .execution_payload() .unwrap() .is_default_with_empty_roots(), - "Merge head is default payload" + "Bellatrix head is default payload" ); /* - * Next merge block shouldn't include an exec payload. + * Next Bellatrix block shouldn't include an exec payload. */ harness.extend_slots(1).await; - let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; + let one_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( - one_after_merge_head + one_after_bellatrix_head .message() .body() .execution_payload() .unwrap() .is_default_with_empty_roots(), - "One after merge head is default payload" + "One after bellatrix head is default payload" ); - assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); + assert_eq!(one_after_bellatrix_head.slot(), bellatrix_fork_slot + 1); /* * Trigger the terminal PoW block. @@ -188,20 +188,20 @@ async fn base_altair_merge_with_terminal_block_after_fork() { harness.extend_slots(1).await; - let two_after_merge_head = &harness.chain.head_snapshot().beacon_block; + let two_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( - two_after_merge_head + two_after_bellatrix_head .message() .body() .execution_payload() .unwrap() .is_default_with_empty_roots(), - "Two after merge head is default payload" + "Two after bellatrix head is default payload" ); - assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2); + assert_eq!(two_after_bellatrix_head.slot(), bellatrix_fork_slot + 2); /* - * Next merge block should include an exec payload. + * Next Bellatrix block should include an exec payload. */ for _ in 0..4 { harness.extend_slots(1).await; diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index dc40280f530..c8fd2637f0c 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -25,11 +25,11 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { } #[tokio::test] -async fn base_altair_merge_capella() { +async fn base_altair_bellatrix_capella() { let altair_fork_epoch = Epoch::new(4); let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); let bellatrix_fork_epoch = Epoch::new(8); - let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); + let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); let capella_fork_epoch = Epoch::new(12); let capella_fork_slot = capella_fork_epoch.start_slot(E::slots_per_epoch()); @@ -61,39 +61,39 @@ async fn base_altair_merge_capella() { assert_eq!(altair_head.slot(), altair_fork_slot); /* - * Do the merge fork, without a terminal PoW block. + * Do the Bellatrix fork, without a terminal PoW block. */ - harness.extend_to_slot(merge_fork_slot).await; + harness.extend_to_slot(bellatrix_fork_slot).await; - let merge_head = &harness.chain.head_snapshot().beacon_block; - assert!(merge_head.as_merge().is_ok()); - assert_eq!(merge_head.slot(), merge_fork_slot); + let bellatrix_head = &harness.chain.head_snapshot().beacon_block; + assert!(bellatrix_head.as_bellatrix().is_ok()); + assert_eq!(bellatrix_head.slot(), bellatrix_fork_slot); assert!( - merge_head + bellatrix_head .message() .body() .execution_payload() .unwrap() .is_default_with_empty_roots(), - "Merge head is default payload" + "Bellatrix head is default payload" ); /* - * Next merge block shouldn't include an exec payload. + * Next Bellatrix block shouldn't include an exec payload. */ harness.extend_slots(1).await; - let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; + let one_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( - one_after_merge_head + one_after_bellatrix_head .message() .body() .execution_payload() .unwrap() .is_default_with_empty_roots(), - "One after merge head is default payload" + "One after bellatrix head is default payload" ); - assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); + assert_eq!(one_after_bellatrix_head.slot(), bellatrix_fork_slot + 1); /* * Trigger the terminal PoW block. @@ -114,23 +114,23 @@ async fn base_altair_merge_capella() { }); harness.extend_slots(1).await; - let two_after_merge_head = &harness.chain.head_snapshot().beacon_block; + let two_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( - two_after_merge_head + two_after_bellatrix_head .message() .body() .execution_payload() .unwrap() .is_default_with_empty_roots(), - "Two after merge head is default payload" + "Two after bellatrix head is default payload" ); - assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2); + assert_eq!(two_after_bellatrix_head.slot(), bellatrix_fork_slot + 2); /* - * Next merge block should include an exec payload. + * Next Bellatrix block should include an exec payload. */ let mut execution_payloads = vec![]; - for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { + for _ in (bellatrix_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; let full_payload: FullPayload = diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index e0564e1510b..942ce816846 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -1,9 +1,9 @@ mod attestation_production; mod attestation_verification; +mod bellatrix; mod block_verification; mod capella; mod events; -mod merge; mod op_verification; mod payload_invalidation; mod rewards; diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index f595e5037e2..ea9ef73575f 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -211,7 +211,7 @@ async fn produces_missed_blocks() { // `validator_indexes[slot_in_epoch.as_usize()]` and add it below. let validator_index_to_monitor_altair = 2; // Same as above but for the merge upgrade - let validator_index_to_monitor_merge = 4; + let validator_index_to_monitor_bellatrix = 4; // Same as above but for the capella upgrade let validator_index_to_monitor_capella = 11; // Same as above but for the deneb upgrade @@ -224,7 +224,7 @@ async fn produces_missed_blocks() { vec![ validator_index_to_monitor, validator_index_to_monitor_altair, - validator_index_to_monitor_merge, + validator_index_to_monitor_bellatrix, validator_index_to_monitor_capella, validator_index_to_monitor_deneb, validator_index_to_monitor_electra, diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 912babdae31..a6fd07789d8 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,9 +1,9 @@ use crate::metrics; use beacon_chain::{ + bellatrix_readiness::{BellatrixReadiness, GenesisExecutionPayloadStatus, MergeConfig}, capella_readiness::CapellaReadiness, deneb_readiness::DenebReadiness, electra_readiness::ElectraReadiness, - merge_readiness::{GenesisExecutionPayloadStatus, MergeConfig, MergeReadiness}, BeaconChain, BeaconChainTypes, ExecutionStatus, }; use lighthouse_network::{types::SyncState, NetworkGlobals}; @@ -64,7 +64,7 @@ pub fn spawn_notifier( "wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)), ); eth1_logging(&beacon_chain, &log); - merge_readiness_logging(Slot::new(0), &beacon_chain, &log).await; + bellatrix_readiness_logging(Slot::new(0), &beacon_chain, &log).await; capella_readiness_logging(Slot::new(0), &beacon_chain, &log).await; genesis_execution_payload_logging(&beacon_chain, &log).await; sleep(slot_duration).await; @@ -319,7 +319,7 @@ pub fn spawn_notifier( } eth1_logging(&beacon_chain, &log); - merge_readiness_logging(current_slot, &beacon_chain, &log).await; + bellatrix_readiness_logging(current_slot, &beacon_chain, &log).await; capella_readiness_logging(current_slot, &beacon_chain, &log).await; deneb_readiness_logging(current_slot, &beacon_chain, &log).await; electra_readiness_logging(current_slot, &beacon_chain, &log).await; @@ -334,7 +334,7 @@ pub fn spawn_notifier( /// Provides some helpful logging to users to indicate if their node is ready for the Bellatrix /// fork and subsequent merge transition. -async fn merge_readiness_logging( +async fn bellatrix_readiness_logging( current_slot: Slot, beacon_chain: &BeaconChain, log: &Logger, @@ -372,8 +372,8 @@ async fn merge_readiness_logging( return; } - match beacon_chain.check_merge_readiness(current_slot).await { - MergeReadiness::Ready { + match beacon_chain.check_bellatrix_readiness(current_slot).await { + BellatrixReadiness::Ready { config, current_difficulty, } => match config { @@ -384,7 +384,7 @@ async fn merge_readiness_logging( } => { info!( log, - "Ready for the merge"; + "Ready for Bellatrix"; "terminal_total_difficulty" => %ttd, "current_difficulty" => current_difficulty .map(|d| d.to_string()) @@ -398,7 +398,7 @@ async fn merge_readiness_logging( } => { info!( log, - "Ready for the merge"; + "Ready for Bellatrix"; "info" => "you are using override parameters, please ensure that you \ understand these parameters and their implications.", "terminal_block_hash" => ?terminal_block_hash, @@ -411,14 +411,14 @@ async fn merge_readiness_logging( "config" => ?other ), }, - readiness @ MergeReadiness::NotSynced => warn!( + readiness @ BellatrixReadiness::NotSynced => warn!( log, - "Not ready for merge"; + "Not ready Bellatrix"; "info" => %readiness, ), - readiness @ MergeReadiness::NoExecutionEndpoint => warn!( + readiness @ BellatrixReadiness::NoExecutionEndpoint => warn!( log, - "Not ready for merge"; + "Not ready for Bellatrix"; "info" => %readiness, ), } diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index 1f8c29f6b25..0d0cfaf352c 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -146,7 +146,7 @@ mod test { } #[test] - fn test_rlp_encode_merge_block() { + fn test_rlp_encode_bellatrix_block() { let header = ExecutionBlockHeader { parent_hash: Hash256::from_str("927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbe").unwrap(), ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(), diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index d422994c595..f3f059a4355 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -25,8 +25,8 @@ pub use types::{ Withdrawal, Withdrawals, }; use types::{ - ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadMerge, - KzgProofs, + ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, KzgProofs, }; use types::{Graffiti, GRAFFITI_BYTES_LEN}; @@ -36,8 +36,8 @@ pub mod json_structures; mod new_payload_request; pub use new_payload_request::{ - NewPayloadRequest, NewPayloadRequestCapella, NewPayloadRequestDeneb, NewPayloadRequestElectra, - NewPayloadRequestMerge, + NewPayloadRequest, NewPayloadRequestBellatrix, NewPayloadRequestCapella, + NewPayloadRequestDeneb, NewPayloadRequestElectra, }; pub const LATEST_TAG: &str = "latest"; @@ -155,7 +155,7 @@ pub struct ExecutionBlock { /// Representation of an execution block with enough detail to reconstruct a payload. #[superstruct( - variants(Merge, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra), variant_attributes( derive(Clone, Debug, PartialEq, Serialize, Deserialize,), serde(bound = "E: EthSpec", rename_all = "camelCase"), @@ -204,26 +204,28 @@ impl TryFrom> for ExecutionBlockWithTransactions fn try_from(payload: ExecutionPayload) -> Result { let json_payload = match payload { - ExecutionPayload::Merge(block) => Self::Merge(ExecutionBlockWithTransactionsMerge { - parent_hash: block.parent_hash, - fee_recipient: block.fee_recipient, - state_root: block.state_root, - receipts_root: block.receipts_root, - logs_bloom: block.logs_bloom, - prev_randao: block.prev_randao, - block_number: block.block_number, - gas_limit: block.gas_limit, - gas_used: block.gas_used, - timestamp: block.timestamp, - extra_data: block.extra_data, - base_fee_per_gas: block.base_fee_per_gas, - block_hash: block.block_hash, - transactions: block - .transactions - .iter() - .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>()?, - }), + ExecutionPayload::Bellatrix(block) => { + Self::Bellatrix(ExecutionBlockWithTransactionsBellatrix { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>()?, + }) + } ExecutionPayload::Capella(block) => { Self::Capella(ExecutionBlockWithTransactionsCapella { parent_hash: block.parent_hash, @@ -423,7 +425,7 @@ pub struct ProposeBlindedBlockResponse { } #[superstruct( - variants(Merge, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra), variant_attributes(derive(Clone, Debug, PartialEq),), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), @@ -432,8 +434,11 @@ pub struct ProposeBlindedBlockResponse { )] #[derive(Clone, Debug, PartialEq)] pub struct GetPayloadResponse { - #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] - pub execution_payload: ExecutionPayloadMerge, + #[superstruct( + only(Bellatrix), + partial_getter(rename = "execution_payload_bellatrix") + )] + pub execution_payload: ExecutionPayloadBellatrix, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] pub execution_payload: ExecutionPayloadCapella, #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] @@ -482,8 +487,8 @@ impl From> { fn from(response: GetPayloadResponse) -> Self { match response { - GetPayloadResponse::Merge(inner) => ( - ExecutionPayload::Merge(inner.execution_payload), + GetPayloadResponse::Bellatrix(inner) => ( + ExecutionPayload::Bellatrix(inner.execution_payload), inner.block_value, None, ), @@ -529,14 +534,14 @@ impl ExecutionPayloadBodyV1 { header: ExecutionPayloadHeader, ) -> Result, String> { match header { - ExecutionPayloadHeader::Merge(header) => { + ExecutionPayloadHeader::Bellatrix(header) => { if self.withdrawals.is_some() { return Err(format!( "block {} is merge but payload body has withdrawals", header.block_hash )); } - Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { + Ok(ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { parent_hash: header.parent_hash, fee_recipient: header.fee_recipient, state_root: header.state_root, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 405416e4f9d..93705a16925 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -737,7 +737,7 @@ impl HttpJsonRpc { ) -> Result>, Error> { let params = json!([block_hash, true]); Ok(Some(match fork { - ForkName::Merge => ExecutionBlockWithTransactions::Merge( + ForkName::Bellatrix => ExecutionBlockWithTransactions::Bellatrix( self.rpc_request( ETH_GET_BLOCK_BY_HASH, params, @@ -868,7 +868,7 @@ impl HttpJsonRpc { ) .await?; - Ok(GetPayloadResponse::Merge(GetPayloadResponseMerge { + Ok(GetPayloadResponse::Bellatrix(GetPayloadResponseBellatrix { execution_payload: payload_v1.into(), // Set the V1 payload values from the EE to be zero. This simulates // the pre-block-value functionality of always choosing the builder @@ -885,7 +885,7 @@ impl HttpJsonRpc { let params = json!([JsonPayloadIdRequest::from(payload_id)]); match fork_name { - ForkName::Merge => { + ForkName::Bellatrix => { let response: JsonGetPayloadResponseV1 = self .rpc_request( ENGINE_GET_PAYLOAD_V2, @@ -939,7 +939,7 @@ impl HttpJsonRpc { .await?; Ok(JsonGetPayloadResponse::V4(response).into()) } - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Err( + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => Err( Error::UnsupportedForkVariant(format!("called get_payload_v3 with {}", fork_name)), ), } @@ -1180,7 +1180,7 @@ impl HttpJsonRpc { ) -> Result { let engine_capabilities = self.get_engine_capabilities(None).await?; match new_payload_request { - NewPayloadRequest::Merge(_) | NewPayloadRequest::Capella(_) => { + NewPayloadRequest::Bellatrix(_) | NewPayloadRequest::Capella(_) => { if engine_capabilities.new_payload_v2 { self.new_payload_v2(new_payload_request.into_execution_payload()) .await @@ -1218,7 +1218,7 @@ impl HttpJsonRpc { ) -> Result, Error> { let engine_capabilities = self.get_engine_capabilities(None).await?; match fork_name { - ForkName::Merge | ForkName::Capella => { + ForkName::Bellatrix | ForkName::Capella => { if engine_capabilities.get_payload_v2 { self.get_payload_v2(fork_name, payload_id).await } else if engine_capabilities.new_payload_v1 { @@ -1659,8 +1659,8 @@ mod test { .assert_request_equals( |client| async move { let _ = client - .new_payload_v1::(ExecutionPayload::Merge( - ExecutionPayloadMerge { + .new_payload_v1::(ExecutionPayload::Bellatrix( + ExecutionPayloadBellatrix { parent_hash: ExecutionBlockHash::repeat_byte(0), fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), @@ -1706,8 +1706,8 @@ mod test { Tester::new(false) .assert_auth_failure(|client| async move { client - .new_payload_v1::(ExecutionPayload::Merge( - ExecutionPayloadMerge { + .new_payload_v1::(ExecutionPayload::Bellatrix( + ExecutionPayloadBellatrix { parent_hash: ExecutionBlockHash::repeat_byte(0), fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), @@ -1917,7 +1917,7 @@ mod test { .unwrap() .into(); - let expected = ExecutionPayload::Merge(ExecutionPayloadMerge { + let expected = ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), @@ -1942,7 +1942,7 @@ mod test { // engine_newPayloadV1 REQUEST validation |client| async move { let _ = client - .new_payload_v1::(ExecutionPayload::Merge(ExecutionPayloadMerge{ + .new_payload_v1::(ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix{ parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), @@ -1996,7 +1996,7 @@ mod test { })], |client| async move { let response = client - .new_payload_v1::(ExecutionPayload::Merge(ExecutionPayloadMerge::default())) + .new_payload_v1::(ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix::default())) .await .unwrap(); diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index d784aa4cd9c..5c4d6ab1ac3 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -103,8 +103,8 @@ pub struct JsonExecutionPayload { pub excess_blob_gas: u64, } -impl From> for JsonExecutionPayloadV1 { - fn from(payload: ExecutionPayloadMerge) -> Self { +impl From> for JsonExecutionPayloadV1 { + fn from(payload: ExecutionPayloadBellatrix) -> Self { JsonExecutionPayloadV1 { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, @@ -210,7 +210,7 @@ impl From> for JsonExecutionPayloadV4 impl From> for JsonExecutionPayload { fn from(execution_payload: ExecutionPayload) -> Self { match execution_payload { - ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()), + ExecutionPayload::Bellatrix(payload) => JsonExecutionPayload::V1(payload.into()), ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()), ExecutionPayload::Deneb(payload) => JsonExecutionPayload::V3(payload.into()), ExecutionPayload::Electra(payload) => JsonExecutionPayload::V4(payload.into()), @@ -218,9 +218,9 @@ impl From> for JsonExecutionPayload { } } -impl From> for ExecutionPayloadMerge { +impl From> for ExecutionPayloadBellatrix { fn from(payload: JsonExecutionPayloadV1) -> Self { - ExecutionPayloadMerge { + ExecutionPayloadBellatrix { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -326,7 +326,7 @@ impl From> for ExecutionPayloadElectra impl From> for ExecutionPayload { fn from(json_execution_payload: JsonExecutionPayload) -> Self { match json_execution_payload { - JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()), + JsonExecutionPayload::V1(payload) => ExecutionPayload::Bellatrix(payload.into()), JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()), JsonExecutionPayload::V3(payload) => ExecutionPayload::Deneb(payload.into()), JsonExecutionPayload::V4(payload) => ExecutionPayload::Electra(payload.into()), @@ -366,7 +366,7 @@ impl From> for GetPayloadResponse { fn from(json_get_payload_response: JsonGetPayloadResponse) -> Self { match json_get_payload_response { JsonGetPayloadResponse::V1(response) => { - GetPayloadResponse::Merge(GetPayloadResponseMerge { + GetPayloadResponse::Bellatrix(GetPayloadResponseBellatrix { execution_payload: response.execution_payload.into(), block_value: response.block_value, }) diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 6b6df13b704..8d2e3d5ad06 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -8,11 +8,12 @@ use types::{ ExecutionPayloadRef, Hash256, VersionedHash, }; use types::{ - ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadMerge, + ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, }; #[superstruct( - variants(Merge, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra), variant_attributes(derive(Clone, Debug, PartialEq),), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), @@ -27,8 +28,11 @@ use types::{ )] #[derive(Clone, Debug, PartialEq)] pub struct NewPayloadRequest<'block, E: EthSpec> { - #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] - pub execution_payload: &'block ExecutionPayloadMerge, + #[superstruct( + only(Bellatrix), + partial_getter(rename = "execution_payload_bellatrix") + )] + pub execution_payload: &'block ExecutionPayloadBellatrix, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] pub execution_payload: &'block ExecutionPayloadCapella, #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] @@ -44,7 +48,7 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { pub fn parent_hash(&self) -> ExecutionBlockHash { match self { - Self::Merge(payload) => payload.execution_payload.parent_hash, + Self::Bellatrix(payload) => payload.execution_payload.parent_hash, Self::Capella(payload) => payload.execution_payload.parent_hash, Self::Deneb(payload) => payload.execution_payload.parent_hash, Self::Electra(payload) => payload.execution_payload.parent_hash, @@ -53,7 +57,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { pub fn block_hash(&self) -> ExecutionBlockHash { match self { - Self::Merge(payload) => payload.execution_payload.block_hash, + Self::Bellatrix(payload) => payload.execution_payload.block_hash, Self::Capella(payload) => payload.execution_payload.block_hash, Self::Deneb(payload) => payload.execution_payload.block_hash, Self::Electra(payload) => payload.execution_payload.block_hash, @@ -62,7 +66,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { pub fn block_number(&self) -> u64 { match self { - Self::Merge(payload) => payload.execution_payload.block_number, + Self::Bellatrix(payload) => payload.execution_payload.block_number, Self::Capella(payload) => payload.execution_payload.block_number, Self::Deneb(payload) => payload.execution_payload.block_number, Self::Electra(payload) => payload.execution_payload.block_number, @@ -71,7 +75,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { pub fn execution_payload_ref(&self) -> ExecutionPayloadRef<'block, E> { match self { - Self::Merge(request) => ExecutionPayloadRef::Merge(request.execution_payload), + Self::Bellatrix(request) => ExecutionPayloadRef::Bellatrix(request.execution_payload), Self::Capella(request) => ExecutionPayloadRef::Capella(request.execution_payload), Self::Deneb(request) => ExecutionPayloadRef::Deneb(request.execution_payload), Self::Electra(request) => ExecutionPayloadRef::Electra(request.execution_payload), @@ -80,7 +84,9 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { pub fn into_execution_payload(self) -> ExecutionPayload { match self { - Self::Merge(request) => ExecutionPayload::Merge(request.execution_payload.clone()), + Self::Bellatrix(request) => { + ExecutionPayload::Bellatrix(request.execution_payload.clone()) + } Self::Capella(request) => ExecutionPayload::Capella(request.execution_payload.clone()), Self::Deneb(request) => ExecutionPayload::Deneb(request.execution_payload.clone()), Self::Electra(request) => ExecutionPayload::Electra(request.execution_payload.clone()), @@ -150,9 +156,11 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> BeaconBlockRef::Base(_) | BeaconBlockRef::Altair(_) => { Err(Self::Error::IncorrectStateVariant) } - BeaconBlockRef::Merge(block_ref) => Ok(Self::Merge(NewPayloadRequestMerge { - execution_payload: &block_ref.body.execution_payload.execution_payload, - })), + BeaconBlockRef::Bellatrix(block_ref) => { + Ok(Self::Bellatrix(NewPayloadRequestBellatrix { + execution_payload: &block_ref.body.execution_payload.execution_payload, + })) + } BeaconBlockRef::Capella(block_ref) => Ok(Self::Capella(NewPayloadRequestCapella { execution_payload: &block_ref.body.execution_payload.execution_payload, })), @@ -185,9 +193,11 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<' fn try_from(payload: ExecutionPayloadRef<'a, E>) -> Result { match payload { - ExecutionPayloadRef::Merge(payload) => Ok(Self::Merge(NewPayloadRequestMerge { - execution_payload: payload, - })), + ExecutionPayloadRef::Bellatrix(payload) => { + Ok(Self::Bellatrix(NewPayloadRequestBellatrix { + execution_payload: payload, + })) + } ExecutionPayloadRef::Capella(payload) => Ok(Self::Capella(NewPayloadRequestCapella { execution_payload: payload, })), diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 60f450a39de..668a5ce84b2 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -50,8 +50,8 @@ use types::{ AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock, }; use types::{ - BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadCapella, - ExecutionPayloadElectra, ExecutionPayloadMerge, FullPayload, ProposerPreparationData, + BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadBellatrix, + ExecutionPayloadCapella, ExecutionPayloadElectra, FullPayload, ProposerPreparationData, PublicKeyBytes, Signature, Slot, }; @@ -98,8 +98,8 @@ impl TryFrom> for ProvenancedPayload) -> Result { let block_proposal_contents = match value { - BuilderBid::Merge(builder_bid) => BlockProposalContents::Payload { - payload: ExecutionPayloadHeader::Merge(builder_bid.header).into(), + BuilderBid::Bellatrix(builder_bid) => BlockProposalContents::Payload { + payload: ExecutionPayloadHeader::Bellatrix(builder_bid.header).into(), block_value: builder_bid.value, }, BuilderBid::Capella(builder_bid) => BlockProposalContents::Payload { @@ -1804,7 +1804,7 @@ impl ExecutionLayer { // Handle default payload body. if header.block_hash() == ExecutionBlockHash::zero() { let payload = match fork { - ForkName::Merge => ExecutionPayloadMerge::default().into(), + ForkName::Bellatrix => ExecutionPayloadBellatrix::default().into(), ForkName::Capella => ExecutionPayloadCapella::default().into(), ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Electra => ExecutionPayloadElectra::default().into(), @@ -1873,7 +1873,7 @@ impl ExecutionLayer { if hash == ExecutionBlockHash::zero() { return match fork { - ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())), + ForkName::Bellatrix => Ok(Some(ExecutionPayloadBellatrix::default().into())), ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), ForkName::Deneb => Ok(Some(ExecutionPayloadDeneb::default().into())), ForkName::Electra => Ok(Some(ExecutionPayloadElectra::default().into())), @@ -1902,22 +1902,22 @@ impl ExecutionLayer { }; let payload = match block { - ExecutionBlockWithTransactions::Merge(merge_block) => { - ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: merge_block.parent_hash, - fee_recipient: merge_block.fee_recipient, - state_root: merge_block.state_root, - receipts_root: merge_block.receipts_root, - logs_bloom: merge_block.logs_bloom, - prev_randao: merge_block.prev_randao, - block_number: merge_block.block_number, - gas_limit: merge_block.gas_limit, - gas_used: merge_block.gas_used, - timestamp: merge_block.timestamp, - extra_data: merge_block.extra_data, - base_fee_per_gas: merge_block.base_fee_per_gas, - block_hash: merge_block.block_hash, - transactions: convert_transactions(merge_block.transactions)?, + ExecutionBlockWithTransactions::Bellatrix(bellatrix_block) => { + ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { + parent_hash: bellatrix_block.parent_hash, + fee_recipient: bellatrix_block.fee_recipient, + state_root: bellatrix_block.state_root, + receipts_root: bellatrix_block.receipts_root, + logs_bloom: bellatrix_block.logs_bloom, + prev_randao: bellatrix_block.prev_randao, + block_number: bellatrix_block.block_number, + gas_limit: bellatrix_block.gas_limit, + gas_used: bellatrix_block.gas_used, + timestamp: bellatrix_block.timestamp, + extra_data: bellatrix_block.extra_data, + base_fee_per_gas: bellatrix_block.base_fee_per_gas, + block_hash: bellatrix_block.block_hash, + transactions: convert_transactions(bellatrix_block.transactions)?, }) } ExecutionBlockWithTransactions::Capella(capella_block) => { diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index bac2304fa85..77e12bcef66 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -21,9 +21,9 @@ use std::sync::Arc; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ - Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, - ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadHeader, ExecutionPayloadMerge, - ForkName, Hash256, Transaction, Transactions, Uint256, + Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, + ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, + ExecutionPayloadHeader, ForkName, Hash256, Transaction, Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; @@ -92,7 +92,7 @@ impl Block { match self { Block::PoS(payload) => Some(payload.clone().try_into().unwrap()), Block::PoW(block) => Some( - ExecutionPayload::Merge(ExecutionPayloadMerge { + ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { block_hash: block.block_hash, ..Default::default() }) @@ -232,7 +232,7 @@ impl ExecutionBlockGenerator { Some(fork_time) if timestamp >= fork_time => ForkName::Deneb, _ => match self.shanghai_time { Some(fork_time) if timestamp >= fork_time => ForkName::Capella, - _ => ForkName::Merge, + _ => ForkName::Bellatrix, }, }, } @@ -569,7 +569,7 @@ impl ExecutionBlockGenerator { attributes: &PayloadAttributes, ) -> Result, String> { let mut execution_payload = match attributes { - PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { + PayloadAttributes::V1(pa) => ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { parent_hash: head_block_hash, fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), @@ -586,7 +586,7 @@ impl ExecutionBlockGenerator { transactions: vec![].into(), }), PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) { - ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { + ForkName::Bellatrix => ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { parent_hash: head_block_hash, fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), @@ -665,7 +665,7 @@ impl ExecutionBlockGenerator { }; match execution_payload.fork_name() { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {} + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => {} ForkName::Deneb | ForkName::Electra => { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); @@ -782,14 +782,14 @@ pub fn generate_genesis_header( let empty_transactions_root = Transactions::::empty().tree_hash_root(); match genesis_fork { ForkName::Base | ForkName::Altair => None, - ForkName::Merge => { + ForkName::Bellatrix => { if post_transition_merge { - let mut header = ExecutionPayloadHeader::Merge(<_>::default()); + let mut header = ExecutionPayloadHeader::Bellatrix(<_>::default()); *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); *header.transactions_root_mut() = empty_transactions_root; Some(header) } else { - Some(ExecutionPayloadHeader::::Merge(<_>::default())) + Some(ExecutionPayloadHeader::::Bellatrix(<_>::default())) } } ForkName::Capella => { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index c16bd43c82c..1dc8f0ab83e 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -135,7 +135,7 @@ pub async fn handle_rpc( .get_fork_at_timestamp(*request.timestamp()); // validate method called correctly according to fork time match fork { - ForkName::Merge => { + ForkName::Bellatrix => { if matches!(request, JsonExecutionPayload::V2(_)) { return Err(( format!( @@ -395,7 +395,7 @@ pub async fn handle_rpc( .read() .get_fork_at_timestamp(*pa.timestamp()) { - ForkName::Merge => { + ForkName::Bellatrix => { get_param::>(params, 1) .map(|opt| opt.map(JsonPayloadAttributes::V1)) .transpose() @@ -427,7 +427,7 @@ pub async fn handle_rpc( .read() .get_fork_at_timestamp(*pa.timestamp()) { - ForkName::Merge => { + ForkName::Bellatrix => { if matches!(pa, JsonPayloadAttributes::V2(_)) { return Err(( format!( diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 756e0b793f8..c9ae1e60cdc 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -15,7 +15,7 @@ use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; use types::builder_bid::{ - BuilderBid, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, BuilderBidMerge, + BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, SignedBuilderBid, }; use types::{ @@ -77,7 +77,7 @@ pub trait BidStuff { impl BidStuff for BuilderBid { fn set_fee_recipient(&mut self, fee_recipient: Address) { match self.to_mut().header_mut() { - ExecutionPayloadHeaderRefMut::Merge(header) => { + ExecutionPayloadHeaderRefMut::Bellatrix(header) => { header.fee_recipient = fee_recipient; } ExecutionPayloadHeaderRefMut::Capella(header) => { @@ -94,7 +94,7 @@ impl BidStuff for BuilderBid { fn set_gas_limit(&mut self, gas_limit: u64) { match self.to_mut().header_mut() { - ExecutionPayloadHeaderRefMut::Merge(header) => { + ExecutionPayloadHeaderRefMut::Bellatrix(header) => { header.gas_limit = gas_limit; } ExecutionPayloadHeaderRefMut::Capella(header) => { @@ -115,7 +115,7 @@ impl BidStuff for BuilderBid { fn set_parent_hash(&mut self, parent_hash: Hash256) { match self.to_mut().header_mut() { - ExecutionPayloadHeaderRefMut::Merge(header) => { + ExecutionPayloadHeaderRefMut::Bellatrix(header) => { header.parent_hash = ExecutionBlockHash::from_root(parent_hash); } ExecutionPayloadHeaderRefMut::Capella(header) => { @@ -132,7 +132,7 @@ impl BidStuff for BuilderBid { fn set_prev_randao(&mut self, prev_randao: Hash256) { match self.to_mut().header_mut() { - ExecutionPayloadHeaderRefMut::Merge(header) => { + ExecutionPayloadHeaderRefMut::Bellatrix(header) => { header.prev_randao = prev_randao; } ExecutionPayloadHeaderRefMut::Capella(header) => { @@ -149,7 +149,7 @@ impl BidStuff for BuilderBid { fn set_block_number(&mut self, block_number: u64) { match self.to_mut().header_mut() { - ExecutionPayloadHeaderRefMut::Merge(header) => { + ExecutionPayloadHeaderRefMut::Bellatrix(header) => { header.block_number = block_number; } ExecutionPayloadHeaderRefMut::Capella(header) => { @@ -166,7 +166,7 @@ impl BidStuff for BuilderBid { fn set_timestamp(&mut self, timestamp: u64) { match self.to_mut().header_mut() { - ExecutionPayloadHeaderRefMut::Merge(header) => { + ExecutionPayloadHeaderRefMut::Bellatrix(header) => { header.timestamp = timestamp; } ExecutionPayloadHeaderRefMut::Capella(header) => { @@ -183,7 +183,7 @@ impl BidStuff for BuilderBid { fn set_withdrawals_root(&mut self, withdrawals_root: Hash256) { match self.to_mut().header_mut() { - ExecutionPayloadHeaderRefMut::Merge(_) => { + ExecutionPayloadHeaderRefMut::Bellatrix(_) => { panic!("no withdrawals before capella") } ExecutionPayloadHeaderRefMut::Capella(header) => { @@ -336,7 +336,7 @@ pub fn serve( SignedBlindedBeaconBlock::Base(_) | types::SignedBeaconBlock::Altair(_) => { return Err(reject("invalid fork")); } - SignedBlindedBeaconBlock::Merge(block) => { + SignedBlindedBeaconBlock::Bellatrix(block) => { block.message.body.execution_payload.tree_hash_root() } SignedBlindedBeaconBlock::Capella(block) => { @@ -480,7 +480,7 @@ pub fn serve( .get_randao_mix(head_state.current_epoch()) .map_err(|_| reject("couldn't get prev randao"))?; let expected_withdrawals = match fork { - ForkName::Base | ForkName::Altair | ForkName::Merge => None, + ForkName::Base | ForkName::Altair | ForkName::Bellatrix => None, ForkName::Capella | ForkName::Deneb | ForkName::Electra => Some( builder .beacon_client @@ -496,7 +496,7 @@ pub fn serve( // first to avoid polluting the execution block generator with invalid payload attributes // NOTE: this was part of an effort to add payload attribute uniqueness checks, // which was abandoned because it broke too many tests in subtle ways. - ForkName::Merge | ForkName::Capella => PayloadAttributes::new( + ForkName::Bellatrix | ForkName::Capella => PayloadAttributes::new( timestamp, *prev_randao, fee_recipient, @@ -577,9 +577,9 @@ pub fn serve( value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), pubkey: builder.builder_sk.public_key().compress(), }), - ForkName::Merge => BuilderBid::Merge(BuilderBidMerge { + ForkName::Bellatrix => BuilderBid::Bellatrix(BuilderBidBellatrix { header: payload - .as_merge() + .as_bellatrix() .map_err(|_| reject("incorrect payload variant"))? .into(), value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), @@ -627,9 +627,9 @@ pub fn serve( value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), pubkey: builder.builder_sk.public_key().compress(), }), - ForkName::Merge => BuilderBid::Merge(BuilderBidMerge { + ForkName::Bellatrix => BuilderBid::Bellatrix(BuilderBidBellatrix { header: payload - .as_merge() + .as_bellatrix() .map_err(|_| reject("incorrect payload variant"))? .into(), value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 6717bbc2ab3..da9b2817f69 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -138,7 +138,7 @@ impl MockExecutionLayer { &payload_attributes, forkchoice_update_params, builder_params, - ForkName::Merge, + ForkName::Bellatrix, &self.spec, None, BlockProductionVersion::FullV2, @@ -178,7 +178,7 @@ impl MockExecutionLayer { &payload_attributes, forkchoice_update_params, builder_params, - ForkName::Merge, + ForkName::Bellatrix, &self.spec, None, BlockProductionVersion::BlindedV2, diff --git a/beacon_node/http_api/src/build_block_contents.rs b/beacon_node/http_api/src/build_block_contents.rs index 7e3778b3fbb..05a6735b327 100644 --- a/beacon_node/http_api/src/build_block_contents.rs +++ b/beacon_node/http_api/src/build_block_contents.rs @@ -12,7 +12,7 @@ pub fn build_block_contents( Ok(ProduceBlockV3Response::Blinded(block.block)) } BeaconBlockResponseWrapper::Full(block) => match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Ok( + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => Ok( ProduceBlockV3Response::Full(FullBlockContents::Block(block.block)), ), ForkName::Deneb | ForkName::Electra => { diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs index 90203f2d60c..a540113ab43 100644 --- a/beacon_node/http_api/src/builder_states.rs +++ b/beacon_node/http_api/src/builder_states.rs @@ -53,7 +53,7 @@ fn get_next_withdrawals_sanity_checks( } let fork = chain.spec.fork_name_at_slot::(proposal_slot); - if let ForkName::Base | ForkName::Altair | ForkName::Merge = fork { + if let ForkName::Base | ForkName::Altair | ForkName::Bellatrix = fork { return Err(warp_utils::reject::custom_bad_request( "the specified state is a pre-capella state.".to_string(), )); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 46d3ad7569b..5f4620589eb 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -4296,7 +4296,7 @@ pub fn serve( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P1, async move { let current_slot = chain.slot_clock.now_or_genesis().unwrap_or(Slot::new(0)); - let merge_readiness = chain.check_merge_readiness(current_slot).await; + let merge_readiness = chain.check_bellatrix_readiness(current_slot).await; Ok::<_, warp::reject::Rejection>( warp::reply::json(&api_types::GenericResponse::from(merge_readiness)) .into_response(), diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 52e77753ce4..0d176e6a53a 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -20,8 +20,8 @@ use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ AbstractExecPayload, BeaconBlockRef, BlobSidecarList, EthSpec, ExecPayload, ExecutionBlockHash, - ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, - VariableList, + ForkName, FullPayload, FullPayloadBellatrix, Hash256, SignedBeaconBlock, + SignedBlindedBeaconBlock, VariableList, }; use warp::http::StatusCode; use warp::{reply::Response, Rejection, Reply}; @@ -80,7 +80,7 @@ pub async fn publish_block { crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block)) .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; @@ -331,8 +331,8 @@ pub async fn reconstruct_block( let fork_name = chain .spec .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())); - if fork_name == ForkName::Merge { - let payload: FullPayload = FullPayloadMerge::default().into(); + if fork_name == ForkName::Bellatrix { + let payload: FullPayload = FullPayloadBellatrix::default().into(); ProvenancedPayload::Local(FullPayloadContents::Payload(payload.into())) } else { Err(warp_utils::reject::custom_server_error( diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 03f530db4d1..91c5b62d0b2 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -438,7 +438,7 @@ pub fn gossipsub_config( let topic_bytes = message.topic.as_str().as_bytes(); match fork_context.current_fork() { ForkName::Altair - | ForkName::Merge + | ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { @@ -461,7 +461,7 @@ pub fn gossipsub_config( } } let message_domain_valid_snappy = gossipsub_config_params.message_domain_valid_snappy; - let is_merge_enabled = fork_context.fork_exists(ForkName::Merge); + let is_bellatrix_enabled = fork_context.fork_exists(ForkName::Bellatrix); let gossip_message_id = move |message: &gossipsub::Message| { gossipsub::MessageId::from( &Sha256::digest( @@ -481,7 +481,7 @@ pub fn gossipsub_config( gossipsub::ConfigBuilder::default() .max_transmit_size(gossip_max_size( - is_merge_enabled, + is_bellatrix_enabled, gossipsub_config_params.gossip_max_size, )) .heartbeat_interval(load.heartbeat_interval) diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index 287f0a3f5fd..42a31d3480a 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -184,13 +184,13 @@ mod tests { fn fork_context(fork_name: ForkName) -> ForkContext { let mut chain_spec = Spec::default_spec(); let altair_fork_epoch = Epoch::new(1); - let merge_fork_epoch = Epoch::new(2); + let bellatrix_fork_epoch = Epoch::new(2); let capella_fork_epoch = Epoch::new(3); let deneb_fork_epoch = Epoch::new(4); let electra_fork_epoch = Epoch::new(5); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); - chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); chain_spec.electra_fork_epoch = Some(electra_fork_epoch); @@ -198,7 +198,7 @@ mod tests { let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Bellatrix => bellatrix_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Electra => electra_fork_epoch.start_slot(Spec::slots_per_epoch()), diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 4cbb6582583..482d1d96b4a 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -18,8 +18,8 @@ use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockMerge, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, }; use unsigned_varint::codec::Uvi; @@ -403,8 +403,8 @@ fn context_bytes( SignedBeaconBlock::Capella { .. } => { fork_context.to_context_bytes(ForkName::Capella) } - SignedBeaconBlock::Merge { .. } => { - fork_context.to_context_bytes(ForkName::Merge) + SignedBeaconBlock::Bellatrix { .. } => { + fork_context.to_context_bytes(ForkName::Bellatrix) } SignedBeaconBlock::Altair { .. } => { fork_context.to_context_bytes(ForkName::Altair) @@ -658,8 +658,10 @@ fn handle_rpc_response( Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Merge) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( - SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes(decoded_buffer)?), + Some(ForkName::Bellatrix) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Bellatrix(SignedBeaconBlockBellatrix::from_ssz_bytes( + decoded_buffer, + )?), )))), Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( @@ -689,8 +691,10 @@ fn handle_rpc_response( Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Merge) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( - SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes(decoded_buffer)?), + Some(ForkName::Bellatrix) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Bellatrix(SignedBeaconBlockBellatrix::from_ssz_bytes( + decoded_buffer, + )?), )))), Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( @@ -743,7 +747,7 @@ mod tests { use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use types::{ blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, - BeaconBlockMerge, EmptyBlock, Epoch, FullPayload, Signature, Slot, + BeaconBlockBellatrix, EmptyBlock, Epoch, FullPayload, Signature, Slot, }; type Spec = types::MainnetEthSpec; @@ -751,13 +755,13 @@ mod tests { fn fork_context(fork_name: ForkName) -> ForkContext { let mut chain_spec = Spec::default_spec(); let altair_fork_epoch = Epoch::new(1); - let merge_fork_epoch = Epoch::new(2); + let bellatrix_fork_epoch = Epoch::new(2); let capella_fork_epoch = Epoch::new(3); let deneb_fork_epoch = Epoch::new(4); let electra_fork_epoch = Epoch::new(5); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); - chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); chain_spec.electra_fork_epoch = Some(electra_fork_epoch); @@ -765,7 +769,7 @@ mod tests { let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Bellatrix => bellatrix_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Electra => electra_fork_epoch.start_slot(Spec::slots_per_epoch()), @@ -790,32 +794,38 @@ mod tests { Arc::new(BlobSidecar::empty()) } - /// Merge block with length < max_rpc_size. - fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock { - let mut block: BeaconBlockMerge<_, FullPayload> = - BeaconBlockMerge::empty(&Spec::default_spec()); + /// Bellatrix block with length < max_rpc_size. + fn bellatrix_block_small( + fork_context: &ForkContext, + spec: &ChainSpec, + ) -> SignedBeaconBlock { + let mut block: BeaconBlockBellatrix<_, FullPayload> = + BeaconBlockBellatrix::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; - let block = BeaconBlock::Merge(block); + let block = BeaconBlock::Bellatrix(block); assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); SignedBeaconBlock::from_block(block, Signature::empty()) } - /// Merge block with length > MAX_RPC_SIZE. - /// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. - /// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. - fn merge_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock { - let mut block: BeaconBlockMerge<_, FullPayload> = - BeaconBlockMerge::empty(&Spec::default_spec()); + /// Bellatrix block with length > MAX_RPC_SIZE. + /// The max limit for a Bellatrix block is in the order of ~16GiB which wouldn't fit in memory. + /// Hence, we generate a Bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. + fn bellatrix_block_large( + fork_context: &ForkContext, + spec: &ChainSpec, + ) -> SignedBeaconBlock { + let mut block: BeaconBlockBellatrix<_, FullPayload> = + BeaconBlockBellatrix::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; - let block = BeaconBlock::Merge(block); + let block = BeaconBlock::Bellatrix(block); assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); SignedBeaconBlock::from_block(block, Signature::empty()) } @@ -1172,25 +1182,27 @@ mod tests { Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) ); - let merge_block_small = merge_block_small(&fork_context(ForkName::Merge), &chain_spec); - let merge_block_large = merge_block_large(&fork_context(ForkName::Merge), &chain_spec); + let bellatrix_block_small = + bellatrix_block_small(&fork_context(ForkName::Bellatrix), &chain_spec); + let bellatrix_block_large = + bellatrix_block_large(&fork_context(ForkName::Bellatrix), &chain_spec); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new( - merge_block_small.clone() + bellatrix_block_small.clone() ))), - ForkName::Merge, + ForkName::Bellatrix, &chain_spec, ), Ok(Some(RPCResponse::BlocksByRange(Arc::new( - merge_block_small.clone() + bellatrix_block_small.clone() )))) ); let mut encoded = - encode_without_length_checks(merge_block_large.as_ssz_bytes(), ForkName::Merge) + encode_without_length_checks(bellatrix_block_large.as_ssz_bytes(), ForkName::Bellatrix) .unwrap(); assert!( @@ -1198,7 +1210,7 @@ mod tests { decode_response( SupportedProtocol::BlocksByRangeV2, &mut encoded, - ForkName::Merge, + ForkName::Bellatrix, &chain_spec, ) .unwrap_err(), @@ -1248,16 +1260,18 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new( - merge_block_small.clone() + bellatrix_block_small.clone() ))), - ForkName::Merge, + ForkName::Bellatrix, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot(Arc::new(merge_block_small)))) + Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + bellatrix_block_small + )))) ); let mut encoded = - encode_without_length_checks(merge_block_large.as_ssz_bytes(), ForkName::Merge) + encode_without_length_checks(bellatrix_block_large.as_ssz_bytes(), ForkName::Bellatrix) .unwrap(); assert!( @@ -1265,7 +1279,7 @@ mod tests { decode_response( SupportedProtocol::BlocksByRootV2, &mut encoded, - ForkName::Merge, + ForkName::Bellatrix, &chain_spec, ) .unwrap_err(), diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index f65586087c2..12a7f09338e 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -17,8 +17,8 @@ use tokio_util::{ compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockElectra, - BeaconBlockMerge, BlobSidecar, ChainSpec, EmptyBlock, EthSpec, ForkContext, ForkName, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, + BeaconBlockElectra, BlobSidecar, ChainSpec, EmptyBlock, EthSpec, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, SignedBeaconBlock, @@ -53,8 +53,8 @@ lazy_static! { .as_ssz_bytes() .len(); - pub static ref SIGNED_BEACON_BLOCK_MERGE_MIN: usize = SignedBeaconBlock::::from_block( - BeaconBlock::Merge(BeaconBlockMerge::::empty(&MainnetEthSpec::default_spec())), + pub static ref SIGNED_BEACON_BLOCK_BELLATRIX_MIN: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Bellatrix(BeaconBlockBellatrix::::empty(&MainnetEthSpec::default_spec())), Signature::empty(), ) .as_ssz_bytes() @@ -74,14 +74,14 @@ lazy_static! { .as_ssz_bytes() .len(); - /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. + /// The `BeaconBlockBellatrix` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network /// with `max_chunk_size`. - pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = + pub static ref SIGNED_BEACON_BLOCK_BELLATRIX_MAX: usize = // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX - + types::ExecutionPayload::::max_execution_payload_merge_size() // adding max size of execution payload (~16gb) + + types::ExecutionPayload::::max_execution_payload_bellatrix_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD @@ -134,7 +134,7 @@ const REQUEST_TIMEOUT: u64 = 15; pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize { match fork_context.current_fork() { ForkName::Altair | ForkName::Base => max_chunk_size / 10, - ForkName::Merge => max_chunk_size, + ForkName::Bellatrix => max_chunk_size, ForkName::Capella => max_chunk_size, ForkName::Deneb => max_chunk_size, ForkName::Electra => max_chunk_size, @@ -154,20 +154,20 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair blocks *SIGNED_BEACON_BLOCK_ALTAIR_MAX, // Altair block is larger than base blocks ), - ForkName::Merge => RpcLimits::new( - *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks - *SIGNED_BEACON_BLOCK_MERGE_MAX, // Merge block is larger than base and altair blocks + ForkName::Bellatrix => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks + *SIGNED_BEACON_BLOCK_BELLATRIX_MAX, // Bellatrix block is larger than base and altair blocks ), ForkName::Capella => RpcLimits::new( - *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks *SIGNED_BEACON_BLOCK_CAPELLA_MAX, // Capella block is larger than base, altair and merge blocks ), ForkName::Deneb => RpcLimits::new( - *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks *SIGNED_BEACON_BLOCK_DENEB_MAX, // Deneb block is larger than all prior fork blocks ), ForkName::Electra => RpcLimits::new( - *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks *SIGNED_BEACON_BLOCK_ELECTRA_MAX, // Electra block is larger than Deneb block ), } @@ -178,7 +178,9 @@ fn rpc_light_client_finality_update_limits_by_fork(current_fork: ForkName) -> Rp match ¤t_fork { ForkName::Base => RpcLimits::new(0, 0), - ForkName::Altair | ForkName::Merge => RpcLimits::new(altair_fixed_len, altair_fixed_len), + ForkName::Altair | ForkName::Bellatrix => { + RpcLimits::new(altair_fixed_len, altair_fixed_len) + } ForkName::Capella => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_FINALITY_UPDATE_CAPELLA_MAX) } @@ -196,7 +198,9 @@ fn rpc_light_client_optimistic_update_limits_by_fork(current_fork: ForkName) -> match ¤t_fork { ForkName::Base => RpcLimits::new(0, 0), - ForkName::Altair | ForkName::Merge => RpcLimits::new(altair_fixed_len, altair_fixed_len), + ForkName::Altair | ForkName::Bellatrix => { + RpcLimits::new(altair_fixed_len, altair_fixed_len) + } ForkName::Capella => RpcLimits::new( altair_fixed_len, *LIGHT_CLIENT_OPTIMISTIC_UPDATE_CAPELLA_MAX, @@ -216,7 +220,9 @@ fn rpc_light_client_bootstrap_limits_by_fork(current_fork: ForkName) -> RpcLimit match ¤t_fork { ForkName::Base => RpcLimits::new(0, 0), - ForkName::Altair | ForkName::Merge => RpcLimits::new(altair_fixed_len, altair_fixed_len), + ForkName::Altair | ForkName::Bellatrix => { + RpcLimits::new(altair_fixed_len, altair_fixed_len) + } ForkName::Capella => RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_CAPELLA_MAX), ForkName::Deneb => RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_DENEB_MAX), ForkName::Electra => RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX), diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index a5cf03412d5..ed63ad014c9 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -10,8 +10,8 @@ use types::{ Attestation, AttesterSlashing, BlobSidecar, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockMerge, SignedBlsToExecutionChange, SignedContributionAndProof, + SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, + SignedBeaconBlockElectra, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; @@ -179,8 +179,8 @@ impl PubsubMessage { SignedBeaconBlockAltair::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), - Some(ForkName::Merge) => SignedBeaconBlock::::Merge( - SignedBeaconBlockMerge::from_ssz_bytes(data) + Some(ForkName::Bellatrix) => SignedBeaconBlock::::Bellatrix( + SignedBeaconBlockBellatrix::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), Some(ForkName::Capella) => SignedBeaconBlock::::Capella( @@ -219,7 +219,7 @@ impl PubsubMessage { Some( ForkName::Base | ForkName::Altair - | ForkName::Merge + | ForkName::Bellatrix | ForkName::Capella, ) | None => Err(format!( diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 7cbb5bac57f..c5f4b0c9ebb 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -47,7 +47,7 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V match fork_name { ForkName::Base => BASE_CORE_TOPICS.to_vec(), ForkName::Altair => ALTAIR_CORE_TOPICS.to_vec(), - ForkName::Merge => vec![], + ForkName::Bellatrix => vec![], ForkName::Capella => CAPELLA_CORE_TOPICS.to_vec(), ForkName::Deneb => { // All of deneb blob topics are core topics diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 40ae341bd26..32e3a034666 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -21,13 +21,13 @@ use tempfile::Builder as TempBuilder; pub fn fork_context(fork_name: ForkName) -> ForkContext { let mut chain_spec = E::default_spec(); let altair_fork_epoch = Epoch::new(1); - let merge_fork_epoch = Epoch::new(2); + let bellatrix_fork_epoch = Epoch::new(2); let capella_fork_epoch = Epoch::new(3); let deneb_fork_epoch = Epoch::new(4); let electra_fork_epoch = Epoch::new(5); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); - chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); chain_spec.electra_fork_epoch = Some(electra_fork_epoch); @@ -35,7 +35,7 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Bellatrix => bellatrix_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Deneb => deneb_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Electra => electra_fork_epoch.start_slot(E::slots_per_epoch()), diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 5a21b462d43..a60af4db3db 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -13,37 +13,37 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobSidecar, ChainSpec, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec, EmptyBlock, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, }; type E = MinimalEthSpec; -/// Merge block with length < max_rpc_size. -fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { - let mut block = BeaconBlockMerge::::empty(spec); +/// Bellatrix block with length < max_rpc_size. +fn bellatrix_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { + let mut block = BeaconBlockBellatrix::::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; - let block = BeaconBlock::Merge(block); + let block = BeaconBlock::Bellatrix(block); assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); block } -/// Merge block with length > MAX_RPC_SIZE. -/// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. -/// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. -fn merge_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { - let mut block = BeaconBlockMerge::::empty(spec); +/// Bellatrix block with length > MAX_RPC_SIZE. +/// The max limit for a bellatrix block is in the order of ~16GiB which wouldn't fit in memory. +/// Hence, we generate a bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. +fn bellatrix_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { + let mut block = BeaconBlockBellatrix::::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; - let block = BeaconBlock::Merge(block); + let block = BeaconBlock::Bellatrix(block); assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); block } @@ -167,7 +167,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), &log, - ForkName::Merge, + ForkName::Bellatrix, &spec, Protocol::Tcp, ) @@ -187,9 +187,10 @@ fn test_tcp_blocks_by_range_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block))); - let full_block = merge_block_small(&common::fork_context(ForkName::Merge), &spec); + let full_block = bellatrix_block_small(&common::fork_context(ForkName::Bellatrix), &spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_small = Response::BlocksByRange(Some(Arc::new(signed_full_block))); + let rpc_response_bellatrix_small = + Response::BlocksByRange(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -216,7 +217,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { } else if messages_received < 4 { assert_eq!(response, rpc_response_altair.clone()); } else { - assert_eq!(response, rpc_response_merge_small.clone()); + assert_eq!(response, rpc_response_bellatrix_small.clone()); } messages_received += 1; warn!(log, "Chunk received"); @@ -249,13 +250,13 @@ fn test_tcp_blocks_by_range_chunked_rpc() { warn!(log, "Receiver got request"); for i in 0..messages_to_send { // Send first third of responses as base blocks, - // second as altair and third as merge. + // second as altair and third as bellatrix. let rpc_response = if i < 2 { rpc_response_base.clone() } else if i < 4 { rpc_response_altair.clone() } else { - rpc_response_merge_small.clone() + rpc_response_bellatrix_small.clone() }; receiver.send_response(peer_id, id, rpc_response.clone()); } @@ -368,7 +369,7 @@ fn test_blobs_by_range_chunked_rpc() { warn!(log, "Receiver got request"); for _ in 0..messages_to_send { // Send first third of responses as base blocks, - // second as altair and third as merge. + // second as altair and third as bellatrix. receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination @@ -411,7 +412,7 @@ fn test_tcp_blocks_by_range_over_limit() { let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), &log, - ForkName::Merge, + ForkName::Bellatrix, &spec, Protocol::Tcp, ) @@ -421,9 +422,10 @@ fn test_tcp_blocks_by_range_over_limit() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); // BlocksByRange Response - let full_block = merge_block_large(&common::fork_context(ForkName::Merge), &spec); + let full_block = bellatrix_block_large(&common::fork_context(ForkName::Bellatrix), &spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); + let rpc_response_bellatrix_large = + Response::BlocksByRange(Some(Arc::new(signed_full_block))); let request_id = messages_to_send as usize; // build the sender future @@ -458,7 +460,7 @@ fn test_tcp_blocks_by_range_over_limit() { // send the response warn!(log, "Receiver got request"); for _ in 0..messages_to_send { - let rpc_response = rpc_response_merge_large.clone(); + let rpc_response = rpc_response_bellatrix_large.clone(); receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination @@ -736,7 +738,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), &log, - ForkName::Merge, + ForkName::Bellatrix, &spec, Protocol::Tcp, ) @@ -764,9 +766,10 @@ fn test_tcp_blocks_by_root_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); - let full_block = merge_block_small(&common::fork_context(ForkName::Merge), &spec); + let full_block = bellatrix_block_small(&common::fork_context(ForkName::Bellatrix), &spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); + let rpc_response_bellatrix_small = + Response::BlocksByRoot(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -790,7 +793,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { } else if messages_received < 4 { assert_eq!(response, rpc_response_altair.clone()); } else { - assert_eq!(response, rpc_response_merge_small.clone()); + assert_eq!(response, rpc_response_bellatrix_small.clone()); } messages_received += 1; debug!(log, "Chunk received"); @@ -822,13 +825,13 @@ fn test_tcp_blocks_by_root_chunked_rpc() { debug!(log, "Receiver got request"); for i in 0..messages_to_send { - // Send equal base, altair and merge blocks + // Send equal base, altair and bellatrix blocks let rpc_response = if i < 2 { rpc_response_base.clone() } else if i < 4 { rpc_response_altair.clone() } else { - rpc_response_merge_small.clone() + rpc_response_bellatrix_small.clone() }; receiver.send_response(peer_id, id, rpc_response); debug!(log, "Sending message"); diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 1e72dc42578..2a0c7ea089b 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -433,9 +433,10 @@ impl NetworkBeaconProcessor { ForkName::Deneb | ForkName::Electra => { self.chain.spec.max_request_blocks_deneb } - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - self.chain.spec.max_request_blocks - } + ForkName::Base + | ForkName::Altair + | ForkName::Bellatrix + | ForkName::Capella => self.chain.spec.max_request_blocks, } }); if *req.count() > max_request_size { diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index a874031ca27..14fc10ad6de 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -1,8 +1,8 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; use types::{ - BlobSidecarList, EthSpec, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionPayloadMerge, + BlobSidecarList, EthSpec, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, }; macro_rules! impl_store_item { @@ -22,7 +22,7 @@ macro_rules! impl_store_item { } }; } -impl_store_item!(ExecutionPayloadMerge); +impl_store_item!(ExecutionPayloadBellatrix); impl_store_item!(ExecutionPayloadCapella); impl_store_item!(ExecutionPayloadDeneb); impl_store_item!(ExecutionPayloadElectra); @@ -51,7 +51,8 @@ impl StoreItem for ExecutionPayload { ExecutionPayloadCapella::from_ssz_bytes(bytes) .map(Self::Capella) .or_else(|_| { - ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge) + ExecutionPayloadBellatrix::from_ssz_bytes(bytes) + .map(Self::Bellatrix) }) }) }) diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 25438fc7e0a..c2a15c0266f 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -14,7 +14,7 @@ use types::*; /// /// Utilises lazy-loading from separate storage for its vector fields. #[superstruct( - variants(Base, Altair, Merge, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] @@ -66,9 +66,9 @@ where pub current_epoch_attestations: List, E::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] pub previous_epoch_participation: List, - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] pub current_epoch_participation: List, // Finality @@ -78,21 +78,21 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] pub inactivity_scores: List, // Light-client sync committees - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] pub next_sync_committee: Arc>, // Execution #[superstruct( - only(Merge), - partial_getter(rename = "latest_execution_payload_header_merge") + only(Bellatrix), + partial_getter(rename = "latest_execution_payload_header_bellatrix") )] - pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, + pub latest_execution_payload_header: ExecutionPayloadHeaderBellatrix, #[superstruct( only(Capella), partial_getter(rename = "latest_execution_payload_header_capella") @@ -199,11 +199,11 @@ impl PartialBeaconState { ], [] ), - BeaconState::Merge(s) => impl_from_state_forgetful!( + BeaconState::Bellatrix(s) => impl_from_state_forgetful!( s, outer, - Merge, - PartialBeaconStateMerge, + Bellatrix, + PartialBeaconStateBellatrix, [ previous_epoch_participation, current_epoch_participation, @@ -467,10 +467,10 @@ impl TryInto> for PartialBeaconState { ], [] ), - PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!( + PartialBeaconState::Bellatrix(inner) => impl_try_into_beacon_state!( inner, - Merge, - BeaconStateMerge, + Bellatrix, + BeaconStateBellatrix, [ previous_epoch_participation, current_epoch_participation, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index feff1d391a9..838be4beffb 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1023,7 +1023,7 @@ impl ForkVersionDeserialize for SsePayloadAttributes { fork_name: ForkName, ) -> Result { match fork_name { - ForkName::Merge => serde_json::from_value(value) + ForkName::Bellatrix => serde_json::from_value(value) .map(Self::V1) .map_err(serde::de::Error::custom), ForkName::Capella => serde_json::from_value(value) @@ -1598,7 +1598,7 @@ impl FullBlockContents { fork_name: ForkName, ) -> Result { match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) .map(|block| FullBlockContents::Block(block)) } @@ -1658,7 +1658,7 @@ impl ForkVersionDeserialize for FullBlockContents { fork_name: ForkName, ) -> Result { match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { Ok(FullBlockContents::Block( BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, )) @@ -1758,7 +1758,7 @@ impl PublishBlockRequest { /// SSZ decode with fork variant determined by `fork_name`. pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) .map(|block| PublishBlockRequest::Block(Arc::new(block))) } @@ -1844,7 +1844,7 @@ impl TryFrom>> for PublishBlockRequest { match *block { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Bellatrix(_) | SignedBeaconBlock::Capella(_) => Ok(PublishBlockRequest::Block(block)), SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => Err( "post-Deneb block contents cannot be fully constructed from just the signed block", @@ -1953,7 +1953,7 @@ impl ForkVersionDeserialize for FullPayloadContents { fork_name: ForkName, ) -> Result { match fork_name { - ForkName::Merge | ForkName::Capella => serde_json::from_value(value) + ForkName::Bellatrix | ForkName::Capella => serde_json::from_value(value) .map(Self::Payload) .map_err(serde::de::Error::custom), ForkName::Deneb | ForkName::Electra => serde_json::from_value(value) diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 27fb81a5139..50a5fcc3a50 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -33,7 +33,7 @@ GENESIS_DELAY: 6000 # Altair ALTAIR_FORK_VERSION: 0x01000064 ALTAIR_FORK_EPOCH: 512 -# Merge +# Bellatrix BELLATRIX_FORK_VERSION: 0x02000064 BELLATRIX_FORK_EPOCH: 385536 # Capella diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index bd384cfe497..6a399b957d2 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -22,7 +22,7 @@ GENESIS_DELAY: 300 # Altair ALTAIR_FORK_VERSION: 0x02017000 ALTAIR_FORK_EPOCH: 0 -# Merge +# Bellatrix BELLATRIX_FORK_VERSION: 0x03017000 BELLATRIX_FORK_EPOCH: 0 TERMINAL_TOTAL_DIFFICULTY: 0 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 2df40798c11..72a48679118 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -21,7 +21,7 @@ GENESIS_DELAY: 86400 ALTAIR_FORK_VERSION: 0x90000070 ALTAIR_FORK_EPOCH: 50 -# Merge +# Bellatrix BELLATRIX_FORK_VERSION: 0x90000071 BELLATRIX_FORK_EPOCH: 100 TERMINAL_TOTAL_DIFFICULTY: 17000000000000000 diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 6bf74645000..4f54b2ee76b 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -11,7 +11,7 @@ pub use crate::manual_slot_clock::ManualSlotClock as TestingSlotClock; pub use crate::manual_slot_clock::ManualSlotClock; pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use metrics::scrape_for_metrics; -use types::consts::merge::INTERVALS_PER_SLOT; +use types::consts::bellatrix::INTERVALS_PER_SLOT; pub use types::Slot; /// A clock that reports the current slot. diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 6e3f6717ede..2846a0112cd 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -13,7 +13,7 @@ use std::collections::BTreeSet; use std::marker::PhantomData; use std::time::Duration; use types::{ - consts::merge::INTERVALS_PER_SLOT, AbstractExecPayload, AttestationShufflingId, + consts::bellatrix::INTERVALS_PER_SLOT, AbstractExecPayload, AttestationShufflingId, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, @@ -751,7 +751,7 @@ where BeaconBlockRef::Electra(_) | BeaconBlockRef::Deneb(_) | BeaconBlockRef::Capella(_) - | BeaconBlockRef::Merge(_) + | BeaconBlockRef::Bellatrix(_) | BeaconBlockRef::Altair(_) => { // NOTE: Processing justification & finalization requires the progressive // balances cache, but we cannot initialize it here as we only have an diff --git a/consensus/state_processing/src/common/get_attestation_participation.rs b/consensus/state_processing/src/common/get_attestation_participation.rs index d27a00c3826..fc09dad1f4e 100644 --- a/consensus/state_processing/src/common/get_attestation_participation.rs +++ b/consensus/state_processing/src/common/get_attestation_participation.rs @@ -47,7 +47,7 @@ pub fn get_attestation_participation_flag_indices( match state { &BeaconState::Base(_) | &BeaconState::Altair(_) - | &BeaconState::Merge(_) + | &BeaconState::Bellatrix(_) | &BeaconState::Capella(_) => { if is_matching_target && inclusion_delay <= E::slots_per_epoch() { participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 16b4e74ece9..520b58a8af3 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -58,7 +58,7 @@ pub fn slash_validator( let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => whistleblower_reward diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 5ab9a99a3b1..a84f359389c 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -63,7 +63,7 @@ pub fn initialize_beacon_state_from_eth1( .bellatrix_fork_epoch .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) { - // this will set state.latest_execution_payload_header = ExecutionPayloadHeaderMerge::default() + // this will set state.latest_execution_payload_header = ExecutionPayloadHeaderBellatrix::default() upgrade_to_bellatrix(&mut state, spec)?; // Remove intermediate Altair fork from `state.fork`. @@ -71,8 +71,8 @@ pub fn initialize_beacon_state_from_eth1( // Override latest execution payload header. // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - if let Some(ExecutionPayloadHeader::Merge(ref header)) = execution_payload_header { - *state.latest_execution_payload_header_merge_mut()? = header.clone(); + if let Some(ExecutionPayloadHeader::Bellatrix(ref header)) = execution_payload_header { + *state.latest_execution_payload_header_bellatrix_mut()? = header.clone(); } } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index a8447b7714a..2efa1218829 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -418,9 +418,9 @@ pub fn process_execution_payload>( partially_verify_execution_payload::(state, state.slot(), body, spec)?; let payload = body.execution_payload()?; match state.latest_execution_payload_header_mut()? { - ExecutionPayloadHeaderRefMut::Merge(header_mut) => { + ExecutionPayloadHeaderRefMut::Bellatrix(header_mut) => { match payload.to_execution_payload_header() { - ExecutionPayloadHeader::Merge(header) => *header_mut = header, + ExecutionPayloadHeader::Bellatrix(header) => *header_mut = header, _ => return Err(BlockProcessingError::IncorrectStateType), } } @@ -449,14 +449,14 @@ pub fn process_execution_payload>( /// These functions will definitely be called before the merge. Their entire purpose is to check if /// the merge has happened or if we're on the transition block. Thus we don't want to propagate -/// errors from the `BeaconState` being an earlier variant than `BeaconStateMerge` as we'd have to +/// errors from the `BeaconState` being an earlier variant than `BeaconStateBellatrix` as we'd have to /// repeatedly write code to treat these errors as false. /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_complete pub fn is_merge_transition_complete(state: &BeaconState) -> bool { match state { // We must check defaultness against the payload header with 0x0 roots, as that's what's meant // by `ExecutionPayloadHeader()` in the spec. - BeaconState::Merge(_) => state + BeaconState::Bellatrix(_) => state .latest_execution_payload_header() .map(|header| !header.is_default_with_zero_roots()) .unwrap_or(false), @@ -557,7 +557,7 @@ pub fn process_withdrawals>( spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { match state { - BeaconState::Merge(_) => Ok(()), + BeaconState::Bellatrix(_) => Ok(()), BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { let expected_withdrawals = get_expected_withdrawals(state, spec)?; let expected_root = expected_withdrawals.tree_hash_root(); diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index d812306a0f7..3aefcf8a9c5 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -273,7 +273,7 @@ pub fn process_attestations>( )?; } BeaconBlockBodyRef::Altair(_) - | BeaconBlockBodyRef::Merge(_) + | BeaconBlockBodyRef::Bellatrix(_) | BeaconBlockBodyRef::Capella(_) | BeaconBlockBodyRef::Deneb(_) | BeaconBlockBodyRef::Electra(_) => { diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 163b2cff7a9..9468893f762 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -390,7 +390,7 @@ where let domain = match state { BeaconState::Base(_) | BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) => spec.get_domain( exit.epoch, Domain::VoluntaryExit, diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 73454559dfd..c904ba55f0a 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -35,7 +35,7 @@ pub fn verify_attestation_for_block_inclusion<'ctxt, E: EthSpec>( match state { BeaconState::Base(_) | BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) => { verify!( state.slot() <= data.slot.safe_add(E::slots_per_epoch())?, diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index b51aa23f370..55e8853f3f8 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -44,7 +44,7 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => altair::process_epoch(state, spec), diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index cc28340962a..6554423199f 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -54,7 +54,7 @@ pub fn per_slot_processing( if spec.altair_fork_epoch == Some(state.current_epoch()) { upgrade_to_altair(state, spec)?; } - // If the Merge fork epoch is reached, perform an irregular state upgrade. + // If the Bellatrix fork epoch is reached, perform an irregular state upgrade. if spec.bellatrix_fork_epoch == Some(state.current_epoch()) { upgrade_to_bellatrix(state, spec)?; } diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index 98602c66ba3..93cafa73d03 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -1,11 +1,11 @@ pub mod altair; +pub mod bellatrix; pub mod capella; pub mod deneb; pub mod electra; -pub mod merge; pub use altair::upgrade_to_altair; +pub use bellatrix::upgrade_to_bellatrix; pub use capella::upgrade_to_capella; pub use deneb::upgrade_to_deneb; pub use electra::upgrade_to_electra; -pub use merge::upgrade_to_bellatrix; diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/bellatrix.rs similarity index 90% rename from consensus/state_processing/src/upgrade/merge.rs rename to consensus/state_processing/src/upgrade/bellatrix.rs index 02705743ceb..f23e571cd12 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/bellatrix.rs @@ -1,10 +1,10 @@ use std::mem; use types::{ - BeaconState, BeaconStateError as Error, BeaconStateMerge, ChainSpec, EpochCache, EthSpec, - ExecutionPayloadHeaderMerge, Fork, + BeaconState, BeaconStateBellatrix, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, + ExecutionPayloadHeaderBellatrix, Fork, }; -/// Transform a `Altair` state into an `Merge` state. +/// Transform a `Altair` state into an `Bellatrix` state. pub fn upgrade_to_bellatrix( pre_state: &mut BeaconState, spec: &ChainSpec, @@ -17,7 +17,7 @@ pub fn upgrade_to_bellatrix( // // Fixed size vectors get cloned because replacing them would require the same size // allocation as cloning. - let post = BeaconState::Merge(BeaconStateMerge { + let post = BeaconState::Bellatrix(BeaconStateBellatrix { // Versioning genesis_time: pre.genesis_time, genesis_validators_root: pre.genesis_validators_root, @@ -57,7 +57,7 @@ pub fn upgrade_to_bellatrix( current_sync_committee: pre.current_sync_committee.clone(), next_sync_committee: pre.next_sync_committee.clone(), // Execution - latest_execution_payload_header: >::default(), + latest_execution_payload_header: >::default(), // Caches total_active_balance: pre.total_active_balance, progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index 51e29d10f3c..ae0dbde7678 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -4,13 +4,13 @@ use types::{ Fork, List, }; -/// Transform a `Merge` state into an `Capella` state. +/// Transform a `Bellatrix` state into an `Capella` state. pub fn upgrade_to_capella( pre_state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), Error> { let epoch = pre_state.current_epoch(); - let pre = pre_state.as_merge_mut()?; + let pre = pre_state.as_bellatrix_mut()?; // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 14874f0204f..5af53b3fa17 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -12,7 +12,7 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Merge, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), variant_attributes( derive( Debug, @@ -63,8 +63,8 @@ pub struct BeaconBlock = FullPayload pub body: BeaconBlockBodyBase, #[superstruct(only(Altair), partial_getter(rename = "body_altair"))] pub body: BeaconBlockBodyAltair, - #[superstruct(only(Merge), partial_getter(rename = "body_merge"))] - pub body: BeaconBlockBodyMerge, + #[superstruct(only(Bellatrix), partial_getter(rename = "body_bellatrix"))] + pub body: BeaconBlockBodyBellatrix, #[superstruct(only(Capella), partial_getter(rename = "body_capella"))] pub body: BeaconBlockBodyCapella, #[superstruct(only(Deneb), partial_getter(rename = "body_deneb"))] @@ -130,7 +130,7 @@ impl> BeaconBlock { .map(BeaconBlock::Electra) .or_else(|_| BeaconBlockDeneb::from_ssz_bytes(bytes).map(BeaconBlock::Deneb)) .or_else(|_| BeaconBlockCapella::from_ssz_bytes(bytes).map(BeaconBlock::Capella)) - .or_else(|_| BeaconBlockMerge::from_ssz_bytes(bytes).map(BeaconBlock::Merge)) + .or_else(|_| BeaconBlockBellatrix::from_ssz_bytes(bytes).map(BeaconBlock::Bellatrix)) .or_else(|_| BeaconBlockAltair::from_ssz_bytes(bytes).map(BeaconBlock::Altair)) .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) } @@ -221,7 +221,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, E, Payl match self { BeaconBlockRef::Base { .. } => ForkName::Base, BeaconBlockRef::Altair { .. } => ForkName::Altair, - BeaconBlockRef::Merge { .. } => ForkName::Merge, + BeaconBlockRef::Bellatrix { .. } => ForkName::Bellatrix, BeaconBlockRef::Capella { .. } => ForkName::Capella, BeaconBlockRef::Deneb { .. } => ForkName::Deneb, BeaconBlockRef::Electra { .. } => ForkName::Electra, @@ -466,15 +466,15 @@ impl> BeaconBlockAltair } } -impl> EmptyBlock for BeaconBlockMerge { - /// Returns an empty Merge block to be used during genesis. +impl> EmptyBlock for BeaconBlockBellatrix { + /// Returns an empty Bellatrix block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { - BeaconBlockMerge { + BeaconBlockBellatrix { slot: spec.genesis_slot, proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), - body: BeaconBlockBodyMerge { + body: BeaconBlockBodyBellatrix { randao_reveal: Signature::empty(), eth1_data: Eth1Data { deposit_root: Hash256::zero(), @@ -488,7 +488,7 @@ impl> EmptyBlock for BeaconBlockMerg deposits: VariableList::empty(), voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), - execution_payload: Payload::Merge::default(), + execution_payload: Payload::Bellatrix::default(), }, } } @@ -753,7 +753,7 @@ macro_rules! impl_from { impl_from!(BeaconBlockBase, >, >, |body: BeaconBlockBodyBase<_, _>| body.into()); impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); -impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); +impl_from!(BeaconBlockBellatrix, >, >, |body: BeaconBlockBodyBellatrix<_, _>| body.into()); impl_from!(BeaconBlockCapella, >, >, |body: BeaconBlockBodyCapella<_, _>| body.into()); impl_from!(BeaconBlockDeneb, >, >, |body: BeaconBlockBodyDeneb<_, _>| body.into()); impl_from!(BeaconBlockElectra, >, >, |body: BeaconBlockBodyElectra<_, _>| body.into()); @@ -786,7 +786,7 @@ macro_rules! impl_clone_as_blinded { impl_clone_as_blinded!(BeaconBlockBase, >, >); impl_clone_as_blinded!(BeaconBlockAltair, >, >); -impl_clone_as_blinded!(BeaconBlockMerge, >, >); +impl_clone_as_blinded!(BeaconBlockBellatrix, >, >); impl_clone_as_blinded!(BeaconBlockCapella, >, >); impl_clone_as_blinded!(BeaconBlockDeneb, >, >); impl_clone_as_blinded!(BeaconBlockElectra, >, >); diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index a55c16b80d5..c3077c4ab68 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -29,7 +29,7 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Merge, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), variant_attributes( derive( Debug, @@ -67,14 +67,17 @@ pub struct BeaconBlockBody = FullPay pub attestations: VariableList, E::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded // payloads. - #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + #[superstruct( + only(Bellatrix), + partial_getter(rename = "execution_payload_bellatrix") + )] #[serde(flatten)] - pub execution_payload: Payload::Merge, + pub execution_payload: Payload::Bellatrix, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] #[serde(flatten)] pub execution_payload: Payload::Capella, @@ -107,7 +110,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, pub fn execution_payload(&self) -> Result, Error> { match self { Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant), - Self::Merge(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Bellatrix(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Electra(body) => Ok(Payload::Ref::from(&body.execution_payload)), @@ -121,7 +124,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, index: usize, ) -> Result, Error> { match self { - Self::Base(_) | Self::Altair(_) | Self::Merge(_) | Self::Capella(_) => { + Self::Base(_) | Self::Altair(_) | Self::Bellatrix(_) | Self::Capella(_) => { Err(Error::IncorrectStateVariant) } Self::Deneb(body) => { @@ -261,7 +264,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, match self { BeaconBlockBodyRef::Base { .. } => ForkName::Base, BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, - BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, + BeaconBlockBodyRef::Bellatrix { .. } => ForkName::Bellatrix, BeaconBlockBodyRef::Capella { .. } => ForkName::Capella, BeaconBlockBodyRef::Deneb { .. } => ForkName::Deneb, BeaconBlockBodyRef::Electra { .. } => ForkName::Electra, @@ -407,14 +410,14 @@ impl From>> } } -impl From>> +impl From>> for ( - BeaconBlockBodyMerge>, - Option>, + BeaconBlockBodyBellatrix>, + Option>, ) { - fn from(body: BeaconBlockBodyMerge>) -> Self { - let BeaconBlockBodyMerge { + fn from(body: BeaconBlockBodyBellatrix>) -> Self { + let BeaconBlockBodyBellatrix { randao_reveal, eth1_data, graffiti, @@ -424,11 +427,11 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayloadMerge { execution_payload }, + execution_payload: FullPayloadBellatrix { execution_payload }, } = body; ( - BeaconBlockBodyMerge { + BeaconBlockBodyBellatrix { randao_reveal, eth1_data, graffiti, @@ -438,7 +441,7 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayloadMerge { + execution_payload: BlindedPayloadBellatrix { execution_payload_header: From::from(&execution_payload), }, }, @@ -592,9 +595,9 @@ impl BeaconBlockBodyAltair> { } } -impl BeaconBlockBodyMerge> { - pub fn clone_as_blinded(&self) -> BeaconBlockBodyMerge> { - let BeaconBlockBodyMerge { +impl BeaconBlockBodyBellatrix> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyBellatrix> { + let BeaconBlockBodyBellatrix { randao_reveal, eth1_data, graffiti, @@ -604,10 +607,10 @@ impl BeaconBlockBodyMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayloadMerge { execution_payload }, + execution_payload: FullPayloadBellatrix { execution_payload }, } = self; - BeaconBlockBodyMerge { + BeaconBlockBodyBellatrix { randao_reveal: randao_reveal.clone(), eth1_data: eth1_data.clone(), graffiti: *graffiti, @@ -617,7 +620,7 @@ impl BeaconBlockBodyMerge> { deposits: deposits.clone(), voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), - execution_payload: BlindedPayloadMerge { + execution_payload: BlindedPayloadBellatrix { execution_payload_header: execution_payload.into(), }, } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 5da81f6a752..f6bae53505b 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -206,7 +206,7 @@ impl From for Hash256 { /// The state of the `BeaconChain` at some slot. #[superstruct( - variants(Base, Altair, Merge, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), variant_attributes( derive( Derivative, @@ -254,14 +254,14 @@ impl From for Hash256 { )), num_fields(all()), )), - Merge(metastruct( + Bellatrix(metastruct( mappings( map_beacon_state_bellatrix_fields(), map_beacon_state_bellatrix_tree_list_fields(mutable, fallible, groups(tree_lists)), map_beacon_state_bellatrix_tree_list_fields_immutable(groups(tree_lists)), ), bimappings(bimap_beacon_state_bellatrix_tree_list_fields( - other_type = "BeaconStateMerge", + other_type = "BeaconStateBellatrix", self_mutable, fallible, groups(tree_lists) @@ -392,10 +392,10 @@ where pub current_epoch_attestations: List, E::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] #[test_random(default)] pub previous_epoch_participation: List, - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] #[test_random(default)] pub current_epoch_participation: List, @@ -415,25 +415,25 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] #[test_random(default)] pub inactivity_scores: List, // Light-client sync committees - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] #[metastruct(exclude_from(tree_lists))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] #[metastruct(exclude_from(tree_lists))] pub next_sync_committee: Arc>, // Execution #[superstruct( - only(Merge), - partial_getter(rename = "latest_execution_payload_header_merge") + only(Bellatrix), + partial_getter(rename = "latest_execution_payload_header_bellatrix") )] #[metastruct(exclude_from(tree_lists))] - pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, + pub latest_execution_payload_header: ExecutionPayloadHeaderBellatrix, #[superstruct( only(Capella), partial_getter(rename = "latest_execution_payload_header_capella") @@ -601,7 +601,7 @@ impl BeaconState { match self { BeaconState::Base { .. } => ForkName::Base, BeaconState::Altair { .. } => ForkName::Altair, - BeaconState::Merge { .. } => ForkName::Merge, + BeaconState::Bellatrix { .. } => ForkName::Bellatrix, BeaconState::Capella { .. } => ForkName::Capella, BeaconState::Deneb { .. } => ForkName::Deneb, BeaconState::Electra { .. } => ForkName::Electra, @@ -884,7 +884,7 @@ impl BeaconState { pub fn latest_execution_payload_header(&self) -> Result, Error> { match self { BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), - BeaconState::Merge(state) => Ok(ExecutionPayloadHeaderRef::Merge( + BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRef::Bellatrix( &state.latest_execution_payload_header, )), BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRef::Capella( @@ -904,7 +904,7 @@ impl BeaconState { ) -> Result, Error> { match self { BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), - BeaconState::Merge(state) => Ok(ExecutionPayloadHeaderRefMut::Merge( + BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRefMut::Bellatrix( &mut state.latest_execution_payload_header, )), BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRefMut::Capella( @@ -1390,7 +1390,7 @@ impl BeaconState { &mut state.exit_cache, &mut state.epoch_cache, )), - BeaconState::Merge(state) => Ok(( + BeaconState::Bellatrix(state) => Ok(( &mut state.validators, &mut state.balances, &state.previous_epoch_participation, @@ -1555,7 +1555,7 @@ impl BeaconState { Ok(match self { BeaconState::Base(_) | BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) => self.get_validator_churn_limit(spec)?, BeaconState::Deneb(_) | BeaconState::Electra(_) => std::cmp::min( spec.max_per_epoch_activation_churn_limit, @@ -1674,7 +1674,7 @@ impl BeaconState { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), - BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Bellatrix(state) => Ok(&mut state.current_epoch_participation), BeaconState::Capella(state) => Ok(&mut state.current_epoch_participation), BeaconState::Deneb(state) => Ok(&mut state.current_epoch_participation), BeaconState::Electra(state) => Ok(&mut state.current_epoch_participation), @@ -1683,7 +1683,7 @@ impl BeaconState { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), - BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Bellatrix(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Capella(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Deneb(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Electra(state) => Ok(&mut state.previous_epoch_participation), @@ -1924,7 +1924,7 @@ impl BeaconState { any_pending_mutations |= self_field.has_pending_updates(); }); } - Self::Merge(self_inner) => { + Self::Bellatrix(self_inner) => { map_beacon_state_bellatrix_tree_list_fields_immutable!( self_inner, |_, self_field| { @@ -2052,14 +2052,14 @@ impl BeaconState { ); } (Self::Altair(_), _) => (), - (Self::Merge(self_inner), Self::Merge(base_inner)) => { + (Self::Bellatrix(self_inner), Self::Bellatrix(base_inner)) => { bimap_beacon_state_bellatrix_tree_list_fields!( self_inner, base_inner, |_, self_field, base_field| { self_field.rebase_on(base_field) } ); } - (Self::Merge(_), _) => (), + (Self::Bellatrix(_), _) => (), (Self::Capella(self_inner), Self::Capella(base_inner)) => { bimap_beacon_state_capella_tree_list_fields!( self_inner, @@ -2150,7 +2150,7 @@ impl BeaconState { /// /// We assume this value is stable across forks. This assumption is checked in the /// `check_num_fields_pow2` test. - pub const NUM_FIELDS_POW2: usize = BeaconStateMerge::::NUM_FIELDS.next_power_of_two(); + pub const NUM_FIELDS_POW2: usize = BeaconStateBellatrix::::NUM_FIELDS.next_power_of_two(); /// Specialised deserialisation method that uses the `ChainSpec` as context. #[allow(clippy::arithmetic_side_effects)] @@ -2185,7 +2185,7 @@ impl BeaconState { Self::Altair(inner) => { map_beacon_state_altair_tree_list_fields!(inner, |_, x| { x.apply_updates() }) } - Self::Merge(inner) => { + Self::Bellatrix(inner) => { map_beacon_state_bellatrix_tree_list_fields!(inner, |_, x| { x.apply_updates() }) } Self::Capella(inner) => { @@ -2241,7 +2241,7 @@ impl BeaconState { leaves.push(field.tree_hash_root()); }); } - BeaconState::Merge(state) => { + BeaconState::Bellatrix(state) => { map_beacon_state_bellatrix_fields!(state, |_, field| { leaves.push(field.tree_hash_root()); }); @@ -2331,7 +2331,7 @@ impl CompareFields for BeaconState { match (self, other) { (BeaconState::Base(x), BeaconState::Base(y)) => x.compare_fields(y), (BeaconState::Altair(x), BeaconState::Altair(y)) => x.compare_fields(y), - (BeaconState::Merge(x), BeaconState::Merge(y)) => x.compare_fields(y), + (BeaconState::Bellatrix(x), BeaconState::Bellatrix(y)) => x.compare_fields(y), (BeaconState::Capella(x), BeaconState::Capella(y)) => x.compare_fields(y), (BeaconState::Deneb(x), BeaconState::Deneb(y)) => x.compare_fields(y), (BeaconState::Electra(x), BeaconState::Electra(y)) => x.compare_fields(y), diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index 523c94cf57e..fd5e51313f7 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -288,7 +288,7 @@ pub fn is_progressive_balances_enabled(state: &BeaconState) -> bo match state { BeaconState::Base(_) => false, BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => true, diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 012c063afef..35afe314957 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -2,8 +2,8 @@ use crate::{test_utils::*, ForkName}; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::types::{ - test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateCapella, - BeaconStateDeneb, BeaconStateElectra, BeaconStateError, BeaconStateMerge, ChainSpec, Domain, + test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateBellatrix, + BeaconStateCapella, BeaconStateDeneb, BeaconStateElectra, BeaconStateError, ChainSpec, Domain, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, Slot, Vector, }; use ssz::Encode; @@ -413,7 +413,7 @@ fn check_num_fields_pow2() { let num_fields = match fork_name { ForkName::Base => BeaconStateBase::::NUM_FIELDS, ForkName::Altair => BeaconStateAltair::::NUM_FIELDS, - ForkName::Merge => BeaconStateMerge::::NUM_FIELDS, + ForkName::Bellatrix => BeaconStateBellatrix::::NUM_FIELDS, ForkName::Capella => BeaconStateCapella::::NUM_FIELDS, ForkName::Deneb => BeaconStateDeneb::::NUM_FIELDS, ForkName::Electra => BeaconStateElectra::::NUM_FIELDS, diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 121d3f84277..9885f78474f 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,7 +1,7 @@ use crate::beacon_block_body::KzgCommitments; use crate::{ - ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, + ChainSpec, EthSpec, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName, ForkVersionDeserialize, SignedRoot, Uint256, }; use bls::PublicKeyBytes; @@ -11,7 +11,7 @@ use superstruct::superstruct; use tree_hash_derive::TreeHash; #[superstruct( - variants(Merge, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra), variant_attributes( derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone), serde(bound = "E: EthSpec", deny_unknown_fields) @@ -23,8 +23,8 @@ use tree_hash_derive::TreeHash; #[serde(bound = "E: EthSpec", deny_unknown_fields, untagged)] #[tree_hash(enum_behaviour = "transparent")] pub struct BuilderBid { - #[superstruct(only(Merge), partial_getter(rename = "header_merge"))] - pub header: ExecutionPayloadHeaderMerge, + #[superstruct(only(Bellatrix), partial_getter(rename = "header_bellatrix"))] + pub header: ExecutionPayloadHeaderBellatrix, #[superstruct(only(Capella), partial_getter(rename = "header_capella"))] pub header: ExecutionPayloadHeaderCapella, #[superstruct(only(Deneb), partial_getter(rename = "header_deneb"))] @@ -79,7 +79,9 @@ impl ForkVersionDeserialize for BuilderBid { |e| serde::de::Error::custom(format!("BuilderBid failed to deserialize: {:?}", e)); Ok(match fork_name { - ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Bellatrix => { + Self::Bellatrix(serde_json::from_value(value).map_err(convert_err)?) + } ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index c55fe77a1b9..b0346a14ef8 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -147,13 +147,13 @@ pub struct ChainSpec { pub altair_fork_epoch: Option, /* - * Merge hard fork params + * Bellatrix hard fork params */ pub inactivity_penalty_quotient_bellatrix: u64, pub min_slashing_penalty_quotient_bellatrix: u64, pub proportional_slashing_multiplier_bellatrix: u64, pub bellatrix_fork_version: [u8; 4], - /// The Merge fork epoch is optional, with `None` representing "Merge never happens". + /// The Bellatrix fork epoch is optional, with `None` representing "Bellatrix never happens". pub bellatrix_fork_epoch: Option, pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, @@ -310,7 +310,7 @@ impl ChainSpec { _ => match self.capella_fork_epoch { Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, _ => match self.bellatrix_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Bellatrix, _ => match self.altair_fork_epoch { Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, _ => ForkName::Base, @@ -326,7 +326,7 @@ impl ChainSpec { match fork_name { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, - ForkName::Merge => self.bellatrix_fork_version, + ForkName::Bellatrix => self.bellatrix_fork_version, ForkName::Capella => self.capella_fork_version, ForkName::Deneb => self.deneb_fork_version, ForkName::Electra => self.electra_fork_version, @@ -338,7 +338,7 @@ impl ChainSpec { match fork_name { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, - ForkName::Merge => self.bellatrix_fork_epoch, + ForkName::Bellatrix => self.bellatrix_fork_epoch, ForkName::Capella => self.capella_fork_epoch, ForkName::Deneb => self.deneb_fork_epoch, ForkName::Electra => self.electra_fork_epoch, @@ -346,7 +346,7 @@ impl ChainSpec { } pub fn inactivity_penalty_quotient_for_fork(&self, fork_name: ForkName) -> u64 { - if fork_name >= ForkName::Merge { + if fork_name >= ForkName::Bellatrix { self.inactivity_penalty_quotient_bellatrix } else if fork_name >= ForkName::Altair { self.inactivity_penalty_quotient_altair @@ -361,7 +361,7 @@ impl ChainSpec { state: &BeaconState, ) -> u64 { let fork_name = state.fork_name_unchecked(); - if fork_name >= ForkName::Merge { + if fork_name >= ForkName::Bellatrix { self.proportional_slashing_multiplier_bellatrix } else if fork_name >= ForkName::Altair { self.proportional_slashing_multiplier_altair @@ -378,7 +378,7 @@ impl ChainSpec { let fork_name = state.fork_name_unchecked(); if fork_name >= ForkName::Electra { self.min_slashing_penalty_quotient_electra - } else if fork_name >= ForkName::Merge { + } else if fork_name >= ForkName::Bellatrix { self.min_slashing_penalty_quotient_bellatrix } else if fork_name >= ForkName::Altair { self.min_slashing_penalty_quotient_altair @@ -693,7 +693,7 @@ impl ChainSpec { altair_fork_epoch: Some(Epoch::new(74240)), /* - * Merge hard fork params + * Bellatrix hard fork params */ inactivity_penalty_quotient_bellatrix: u64::checked_pow(2, 24) .expect("pow does not overflow"), @@ -830,7 +830,7 @@ impl ChainSpec { epochs_per_sync_committee_period: Epoch::new(8), altair_fork_version: [0x01, 0x00, 0x00, 0x01], altair_fork_epoch: None, - // Merge + // Bellatrix bellatrix_fork_version: [0x02, 0x00, 0x00, 0x01], bellatrix_fork_epoch: None, terminal_total_difficulty: Uint256::MAX @@ -993,7 +993,7 @@ impl ChainSpec { altair_fork_epoch: Some(Epoch::new(512)), /* - * Merge hard fork params + * Bellatrix hard fork params */ inactivity_penalty_quotient_bellatrix: u64::checked_pow(2, 24) .expect("pow does not overflow"), diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index 31d4c5eac59..c20d5fe8f33 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -19,7 +19,7 @@ pub mod altair { pub const NUM_FLAG_INDICES: usize = 3; } -pub mod merge { +pub mod bellatrix { pub const INTERVALS_PER_SLOT: u64 = 3; } pub mod deneb { diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 6749a3f3a2b..62f7f1b8698 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -91,7 +91,7 @@ pub trait EthSpec: /// The number of `sync_committee` subnets. type SyncCommitteeSubnetCount: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* - * New in Merge + * New in Bellatrix */ type MaxBytesPerTransaction: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxTransactionsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 27dc8cab0a4..a46e7c29fff 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -15,7 +15,7 @@ pub type Transactions = VariableList< pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; #[superstruct( - variants(Merge, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra), variant_attributes( derive( Default, @@ -107,7 +107,9 @@ impl ExecutionPayload { ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( "unsupported fork for ExecutionPayload: {fork_name}", ))), - ForkName::Merge => ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge), + ForkName::Bellatrix => { + ExecutionPayloadBellatrix::from_ssz_bytes(bytes).map(Self::Bellatrix) + } ForkName::Capella => ExecutionPayloadCapella::from_ssz_bytes(bytes).map(Self::Capella), ForkName::Deneb => ExecutionPayloadDeneb::from_ssz_bytes(bytes).map(Self::Deneb), ForkName::Electra => ExecutionPayloadElectra::from_ssz_bytes(bytes).map(Self::Electra), @@ -116,9 +118,9 @@ impl ExecutionPayload { #[allow(clippy::arithmetic_side_effects)] /// Returns the maximum size of an execution payload. - pub fn max_execution_payload_merge_size() -> usize { + pub fn max_execution_payload_bellatrix_size() -> usize { // Fixed part - ExecutionPayloadMerge::::default().as_ssz_bytes().len() + ExecutionPayloadBellatrix::::default().as_ssz_bytes().len() // Max size of variable length `extra_data` field + (E::max_extra_data_bytes() * ::ssz_fixed_len()) // Max size of variable length `transactions` field @@ -175,7 +177,9 @@ impl ForkVersionDeserialize for ExecutionPayload { }; Ok(match fork_name { - ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Bellatrix => { + Self::Bellatrix(serde_json::from_value(value).map_err(convert_err)?) + } ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), @@ -192,7 +196,7 @@ impl ForkVersionDeserialize for ExecutionPayload { impl ExecutionPayload { pub fn fork_name(&self) -> ForkName { match self { - ExecutionPayload::Merge(_) => ForkName::Merge, + ExecutionPayload::Bellatrix(_) => ForkName::Bellatrix, ExecutionPayload::Capella(_) => ForkName::Capella, ExecutionPayload::Deneb(_) => ForkName::Deneb, ExecutionPayload::Electra(_) => ForkName::Electra, diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 02850304f1d..8515e386c22 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -8,7 +8,7 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; #[superstruct( - variants(Merge, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra), variant_attributes( derive( Default, @@ -100,7 +100,9 @@ impl ExecutionPayloadHeader { ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( "unsupported fork for ExecutionPayloadHeader: {fork_name}", ))), - ForkName::Merge => ExecutionPayloadHeaderMerge::from_ssz_bytes(bytes).map(Self::Merge), + ForkName::Bellatrix => { + ExecutionPayloadHeaderBellatrix::from_ssz_bytes(bytes).map(Self::Bellatrix) + } ForkName::Capella => { ExecutionPayloadHeaderCapella::from_ssz_bytes(bytes).map(Self::Capella) } @@ -118,7 +120,7 @@ impl ExecutionPayloadHeader { match fork_name { ForkName::Base | ForkName::Altair - | ForkName::Merge + | ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { @@ -138,7 +140,7 @@ impl<'a, E: EthSpec> ExecutionPayloadHeaderRef<'a, E> { } } -impl ExecutionPayloadHeaderMerge { +impl ExecutionPayloadHeaderBellatrix { pub fn upgrade_to_capella(&self) -> ExecutionPayloadHeaderCapella { ExecutionPayloadHeaderCapella { parent_hash: self.parent_hash, @@ -208,8 +210,8 @@ impl ExecutionPayloadHeaderDeneb { } } -impl<'a, E: EthSpec> From<&'a ExecutionPayloadMerge> for ExecutionPayloadHeaderMerge { - fn from(payload: &'a ExecutionPayloadMerge) -> Self { +impl<'a, E: EthSpec> From<&'a ExecutionPayloadBellatrix> for ExecutionPayloadHeaderBellatrix { + fn from(payload: &'a ExecutionPayloadBellatrix) -> Self { Self { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, @@ -301,7 +303,7 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadElectra> for ExecutionPayloadHe // These impls are required to work around an inelegance in `to_execution_payload_header`. // They only clone headers so they should be relatively cheap. -impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderMerge { +impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderBellatrix { fn from(payload: &'a Self) -> Self { payload.clone() } @@ -335,11 +337,13 @@ impl<'a, E: EthSpec> From> for ExecutionPayloadHeader } } -impl TryFrom> for ExecutionPayloadHeaderMerge { +impl TryFrom> for ExecutionPayloadHeaderBellatrix { type Error = BeaconStateError; fn try_from(header: ExecutionPayloadHeader) -> Result { match header { - ExecutionPayloadHeader::Merge(execution_payload_header) => Ok(execution_payload_header), + ExecutionPayloadHeader::Bellatrix(execution_payload_header) => { + Ok(execution_payload_header) + } _ => Err(BeaconStateError::IncorrectStateVariant), } } @@ -369,7 +373,7 @@ impl<'a, E: EthSpec> ExecutionPayloadHeaderRefMut<'a, E> { /// Mutate through pub fn replace(self, header: ExecutionPayloadHeader) -> Result<(), BeaconStateError> { match self { - ExecutionPayloadHeaderRefMut::Merge(mut_ref) => { + ExecutionPayloadHeaderRefMut::Bellatrix(mut_ref) => { *mut_ref = header.try_into()?; } ExecutionPayloadHeaderRefMut::Capella(mut_ref) => { @@ -411,7 +415,9 @@ impl ForkVersionDeserialize for ExecutionPayloadHeader { }; Ok(match fork_name { - ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Bellatrix => { + Self::Bellatrix(serde_json::from_value(value).map_err(convert_err)?) + } ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 6b052d83976..0f7f0eb769e 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -36,11 +36,11 @@ impl ForkContext { )); } - // Only add Merge to list of forks if it's enabled - // Note: `bellatrix_fork_epoch == None` implies merge hasn't been activated yet on the config. + // Only add Bellatrix to list of forks if it's enabled + // Note: `bellatrix_fork_epoch == None` implies bellatrix hasn't been activated yet on the config. if spec.bellatrix_fork_epoch.is_some() { fork_to_digest.push(( - ForkName::Merge, + ForkName::Bellatrix, ChainSpec::compute_fork_digest( spec.bellatrix_fork_version, genesis_validators_root, diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index f6bd5cbadf9..5cc66214733 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -13,7 +13,7 @@ use std::str::FromStr; pub enum ForkName { Base, Altair, - Merge, + Bellatrix, Capella, Deneb, Electra, @@ -24,7 +24,7 @@ impl ForkName { vec![ ForkName::Base, ForkName::Altair, - ForkName::Merge, + ForkName::Bellatrix, ForkName::Capella, ForkName::Deneb, ForkName::Electra, @@ -57,7 +57,7 @@ impl ForkName { spec.electra_fork_epoch = None; spec } - ForkName::Merge => { + ForkName::Bellatrix => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = None; @@ -99,8 +99,8 @@ impl ForkName { match self { ForkName::Base => None, ForkName::Altair => Some(ForkName::Base), - ForkName::Merge => Some(ForkName::Altair), - ForkName::Capella => Some(ForkName::Merge), + ForkName::Bellatrix => Some(ForkName::Altair), + ForkName::Capella => Some(ForkName::Bellatrix), ForkName::Deneb => Some(ForkName::Capella), ForkName::Electra => Some(ForkName::Deneb), } @@ -112,8 +112,8 @@ impl ForkName { pub fn next_fork(self) -> Option { match self { ForkName::Base => Some(ForkName::Altair), - ForkName::Altair => Some(ForkName::Merge), - ForkName::Merge => Some(ForkName::Capella), + ForkName::Altair => Some(ForkName::Bellatrix), + ForkName::Bellatrix => Some(ForkName::Capella), ForkName::Capella => Some(ForkName::Deneb), ForkName::Deneb => Some(ForkName::Electra), ForkName::Electra => None, @@ -154,9 +154,9 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Altair(value), extra_data) } - ForkName::Merge => { + ForkName::Bellatrix => { let (value, extra_data) = $body; - ($t::Merge(value), extra_data) + ($t::Bellatrix(value), extra_data) } ForkName::Capella => { let (value, extra_data) = $body; @@ -181,7 +181,7 @@ impl FromStr for ForkName { Ok(match fork_name.to_lowercase().as_ref() { "phase0" | "base" => ForkName::Base, "altair" => ForkName::Altair, - "bellatrix" | "merge" => ForkName::Merge, + "bellatrix" | "merge" => ForkName::Bellatrix, "capella" => ForkName::Capella, "deneb" => ForkName::Deneb, "electra" => ForkName::Electra, @@ -195,7 +195,7 @@ impl Display for ForkName { match self { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), - ForkName::Merge => "bellatrix".fmt(f), + ForkName::Bellatrix => "bellatrix".fmt(f), ForkName::Capella => "capella".fmt(f), ForkName::Deneb => "deneb".fmt(f), ForkName::Electra => "electra".fmt(f), @@ -259,9 +259,9 @@ mod test { #[test] fn fork_name_bellatrix_or_merge() { - assert_eq!(ForkName::from_str("bellatrix"), Ok(ForkName::Merge)); - assert_eq!(ForkName::from_str("merge"), Ok(ForkName::Merge)); - assert_eq!(ForkName::Merge.to_string(), "bellatrix"); + assert_eq!(ForkName::from_str("bellatrix"), Ok(ForkName::Bellatrix)); + assert_eq!(ForkName::from_str("merge"), Ok(ForkName::Bellatrix)); + assert_eq!(ForkName::Bellatrix.to_string(), "bellatrix"); } #[test] diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs index 195c083e295..cd78b5b3ca0 100644 --- a/consensus/types/src/fork_versioned_response.rs +++ b/consensus/types/src/fork_versioned_response.rs @@ -104,7 +104,8 @@ impl ForkVersionedResponse { #[cfg(test)] mod fork_version_response_tests { use crate::{ - ExecutionPayload, ExecutionPayloadMerge, ForkName, ForkVersionedResponse, MainnetEthSpec, + ExecutionPayload, ExecutionPayloadBellatrix, ForkName, ForkVersionedResponse, + MainnetEthSpec, }; use serde_json::json; @@ -114,9 +115,9 @@ mod fork_version_response_tests { let response_json = serde_json::to_string(&json!(ForkVersionedResponse::> { - version: Some(ForkName::Merge), + version: Some(ForkName::Bellatrix), metadata: Default::default(), - data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), + data: ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix::default()), })) .unwrap(); @@ -134,7 +135,7 @@ mod fork_version_response_tests { serde_json::to_string(&json!(ForkVersionedResponse::> { version: Some(ForkName::Capella), metadata: Default::default(), - data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), + data: ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix::default()), })) .unwrap(); diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index feefbc48946..5c521d98af9 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -120,13 +120,13 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockDeneb, - BeaconBlockElectra, BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, + BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock, }; pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella, - BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyMerge, BeaconBlockBodyRef, + BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, + BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; @@ -154,12 +154,12 @@ pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::ExecutionBlockHeader; pub use crate::execution_layer_withdrawal_request::ExecutionLayerWithdrawalRequest; pub use crate::execution_payload::{ - ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, - ExecutionPayloadMerge, ExecutionPayloadRef, Transaction, Transactions, Withdrawals, + ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, ExecutionPayloadRef, Transaction, Transactions, Withdrawals, }; pub use crate::execution_payload_header::{ - ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, + ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; pub use crate::fork::Fork; @@ -192,9 +192,9 @@ pub use crate::light_client_update::{ pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; pub use crate::payload::{ - AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadDeneb, - BlindedPayloadElectra, BlindedPayloadMerge, BlindedPayloadRef, BlockType, ExecPayload, - FullPayload, FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadMerge, + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadRef, BlockType, ExecPayload, + FullPayload, FullPayloadBellatrix, FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; @@ -213,9 +213,9 @@ pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockHash, - SignedBeaconBlockMerge, SignedBlindedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockHash, SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 4d42d357c19..61da0e1b117 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -78,7 +78,7 @@ impl LightClientBootstrap { pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { let bootstrap = match fork_name { - ForkName::Altair | ForkName::Merge => { + ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientBootstrapAltair::from_ssz_bytes(bytes)?) } ForkName::Capella => Self::Capella(LightClientBootstrapCapella::from_ssz_bytes(bytes)?), @@ -101,7 +101,7 @@ impl LightClientBootstrap { match fork_name { ForkName::Base => 0, ForkName::Altair - | ForkName::Merge + | ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { @@ -128,7 +128,7 @@ impl LightClientBootstrap { .map_err(|_| Error::InconsistentFork)? { ForkName::Base => return Err(Error::AltairForkNotActive), - ForkName::Altair | ForkName::Merge => Self::Altair(LightClientBootstrapAltair { + ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch, diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 5f83b0db523..29c526e2916 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -79,7 +79,7 @@ impl LightClientFinalityUpdate { .fork_name(chain_spec) .map_err(|_| Error::InconsistentFork)? { - ForkName::Altair | ForkName::Merge => { + ForkName::Altair | ForkName::Bellatrix => { let finality_update = LightClientFinalityUpdateAltair { attested_header: LightClientHeaderAltair::block_to_light_client_header( attested_block, @@ -147,7 +147,7 @@ impl LightClientFinalityUpdate { pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { let finality_update = match fork_name { - ForkName::Altair | ForkName::Merge => { + ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientFinalityUpdateAltair::from_ssz_bytes(bytes)?) } ForkName::Capella => { @@ -172,7 +172,7 @@ impl LightClientFinalityUpdate { match fork_name { ForkName::Base => 0, ForkName::Altair - | ForkName::Merge + | ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 647ece99499..213ec90f955 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -75,7 +75,7 @@ impl LightClientHeader { .map_err(|_| Error::InconsistentFork)? { ForkName::Base => return Err(Error::AltairForkNotActive), - ForkName::Altair | ForkName::Merge => LightClientHeader::Altair( + ForkName::Altair | ForkName::Bellatrix => LightClientHeader::Altair( LightClientHeaderAltair::block_to_light_client_header(block)?, ), ForkName::Capella => LightClientHeader::Capella( @@ -90,7 +90,7 @@ impl LightClientHeader { pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { let header = match fork_name { - ForkName::Altair | ForkName::Merge => { + ForkName::Altair | ForkName::Bellatrix => { LightClientHeader::Altair(LightClientHeaderAltair::from_ssz_bytes(bytes)?) } ForkName::Capella => { @@ -119,7 +119,7 @@ impl LightClientHeader { pub fn ssz_max_var_len_for_fork(fork_name: ForkName) -> usize { match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge => 0, + ForkName::Base | ForkName::Altair | ForkName::Bellatrix => 0, ForkName::Capella | ForkName::Deneb | ForkName::Electra => { ExecutionPayloadHeader::::ssz_max_var_len_for_fork(fork_name) } @@ -198,7 +198,7 @@ impl ForkVersionDeserialize for LightClientHeader { fork_name: ForkName, ) -> Result { match fork_name { - ForkName::Altair | ForkName::Merge => serde_json::from_value(value) + ForkName::Altair | ForkName::Bellatrix => serde_json::from_value(value) .map(|light_client_header| Self::Altair(light_client_header)) .map_err(serde::de::Error::custom), ForkName::Capella => serde_json::from_value(value) diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index d22c4535f1e..4727673f6c0 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -70,13 +70,15 @@ impl LightClientOptimisticUpdate { .fork_name(chain_spec) .map_err(|_| Error::InconsistentFork)? { - ForkName::Altair | ForkName::Merge => Self::Altair(LightClientOptimisticUpdateAltair { - attested_header: LightClientHeaderAltair::block_to_light_client_header( - attested_block, - )?, - sync_aggregate, - signature_slot, - }), + ForkName::Altair | ForkName::Bellatrix => { + Self::Altair(LightClientOptimisticUpdateAltair { + attested_header: LightClientHeaderAltair::block_to_light_client_header( + attested_block, + )?, + sync_aggregate, + signature_slot, + }) + } ForkName::Capella => Self::Capella(LightClientOptimisticUpdateCapella { attested_header: LightClientHeaderCapella::block_to_light_client_header( attested_block, @@ -131,7 +133,7 @@ impl LightClientOptimisticUpdate { pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { let optimistic_update = match fork_name { - ForkName::Altair | ForkName::Merge => { + ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientOptimisticUpdateAltair::from_ssz_bytes(bytes)?) } ForkName::Capella => { @@ -156,7 +158,7 @@ impl LightClientOptimisticUpdate { match fork_name { ForkName::Base => 0, ForkName::Altair - | ForkName::Merge + | ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index d5e8cd592df..002fbea2d37 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -191,7 +191,7 @@ impl LightClientUpdate { .map_err(|_| Error::InconsistentFork)? { ForkName::Base => return Err(Error::AltairForkNotActive), - ForkName::Altair | ForkName::Merge => { + ForkName::Altair | ForkName::Bellatrix => { let attested_header = LightClientHeaderAltair::block_to_light_client_header(attested_block)?; let finalized_header = @@ -243,7 +243,7 @@ impl LightClientUpdate { pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { let update = match fork_name { - ForkName::Altair | ForkName::Merge => { + ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientUpdateAltair::from_ssz_bytes(bytes)?) } ForkName::Capella => Self::Capella(LightClientUpdateCapella::from_ssz_bytes(bytes)?), diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 18b3199bd35..80a70c171f5 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -79,22 +79,22 @@ pub trait AbstractExecPayload: + Sized + From> + TryFrom> - + TryInto + + TryInto + TryInto + TryInto + TryInto { type Ref<'a>: ExecPayload + Copy - + From<&'a Self::Merge> + + From<&'a Self::Bellatrix> + From<&'a Self::Capella> + From<&'a Self::Deneb> + From<&'a Self::Electra>; - type Merge: OwnedExecPayload + type Bellatrix: OwnedExecPayload + Into - + for<'a> From>> - + TryFrom>; + + for<'a> From>> + + TryFrom>; type Capella: OwnedExecPayload + Into + for<'a> From>> @@ -110,7 +110,7 @@ pub trait AbstractExecPayload: } #[superstruct( - variants(Merge, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra), variant_attributes( derive( Debug, @@ -145,8 +145,11 @@ pub trait AbstractExecPayload: #[arbitrary(bound = "E: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] pub struct FullPayload { - #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] - pub execution_payload: ExecutionPayloadMerge, + #[superstruct( + only(Bellatrix), + partial_getter(rename = "execution_payload_bellatrix") + )] + pub execution_payload: ExecutionPayloadBellatrix, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] pub execution_payload: ExecutionPayloadCapella, #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] @@ -252,7 +255,7 @@ impl ExecPayload for FullPayload { fn withdrawals_root(&self) -> Result { match self { - FullPayload::Merge(_) => Err(Error::IncorrectStateVariant), + FullPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), FullPayload::Capella(ref inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } @@ -267,7 +270,9 @@ impl ExecPayload for FullPayload { fn blob_gas_used(&self) -> Result { match self { - FullPayload::Merge(_) | FullPayload::Capella(_) => Err(Error::IncorrectStateVariant), + FullPayload::Bellatrix(_) | FullPayload::Capella(_) => { + Err(Error::IncorrectStateVariant) + } FullPayload::Deneb(ref inner) => Ok(inner.execution_payload.blob_gas_used), FullPayload::Electra(ref inner) => Ok(inner.execution_payload.blob_gas_used), } @@ -296,7 +301,7 @@ impl FullPayload { pub fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), - ForkName::Merge => Ok(FullPayloadMerge::default().into()), + ForkName::Bellatrix => Ok(FullPayloadBellatrix::default().into()), ForkName::Capella => Ok(FullPayloadCapella::default().into()), ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), ForkName::Electra => Ok(FullPayloadElectra::default().into()), @@ -382,7 +387,7 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { fn withdrawals_root(&self) -> Result { match self { - FullPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Bellatrix(_) => Err(Error::IncorrectStateVariant), FullPayloadRef::Capella(inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } @@ -397,7 +402,7 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { fn blob_gas_used(&self) -> Result { match self { - FullPayloadRef::Merge(_) | FullPayloadRef::Capella(_) => { + FullPayloadRef::Bellatrix(_) | FullPayloadRef::Capella(_) => { Err(Error::IncorrectStateVariant) } FullPayloadRef::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), @@ -420,7 +425,7 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { impl AbstractExecPayload for FullPayload { type Ref<'a> = FullPayloadRef<'a, E>; - type Merge = FullPayloadMerge; + type Bellatrix = FullPayloadBellatrix; type Capella = FullPayloadCapella; type Deneb = FullPayloadDeneb; type Electra = FullPayloadElectra; @@ -442,7 +447,7 @@ impl TryFrom> for FullPayload { } #[superstruct( - variants(Merge, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra), variant_attributes( derive( Debug, @@ -476,8 +481,11 @@ impl TryFrom> for FullPayload { #[arbitrary(bound = "E: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] pub struct BlindedPayload { - #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] - pub execution_payload_header: ExecutionPayloadHeaderMerge, + #[superstruct( + only(Bellatrix), + partial_getter(rename = "execution_payload_bellatrix") + )] + pub execution_payload_header: ExecutionPayloadHeaderBellatrix, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] pub execution_payload_header: ExecutionPayloadHeaderCapella, #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] @@ -561,7 +569,7 @@ impl ExecPayload for BlindedPayload { fn withdrawals_root(&self) -> Result { match self { - BlindedPayload::Merge(_) => Err(Error::IncorrectStateVariant), + BlindedPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), BlindedPayload::Capella(ref inner) => { Ok(inner.execution_payload_header.withdrawals_root) } @@ -574,7 +582,7 @@ impl ExecPayload for BlindedPayload { fn blob_gas_used(&self) -> Result { match self { - BlindedPayload::Merge(_) | BlindedPayload::Capella(_) => { + BlindedPayload::Bellatrix(_) | BlindedPayload::Capella(_) => { Err(Error::IncorrectStateVariant) } BlindedPayload::Deneb(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), @@ -662,7 +670,7 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { fn withdrawals_root(&self) -> Result { match self { - BlindedPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), + BlindedPayloadRef::Bellatrix(_) => Err(Error::IncorrectStateVariant), BlindedPayloadRef::Capella(inner) => { Ok(inner.execution_payload_header.withdrawals_root) } @@ -675,7 +683,7 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { fn blob_gas_used(&self) -> Result { match self { - BlindedPayloadRef::Merge(_) | BlindedPayloadRef::Capella(_) => { + BlindedPayloadRef::Bellatrix(_) | BlindedPayloadRef::Capella(_) => { Err(Error::IncorrectStateVariant) } BlindedPayloadRef::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), @@ -699,12 +707,12 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } macro_rules! impl_exec_payload_common { - ($wrapper_type:ident, // BlindedPayloadMerge | FullPayloadMerge - $wrapped_type:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadMerge - $wrapped_type_full:ident, // ExecutionPayloadMerge | ExecutionPayloadMerge - $wrapped_type_header:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadHeaderMerge + ($wrapper_type:ident, // BlindedPayloadBellatrix | FullPayloadBellatrix + $wrapped_type:ident, // ExecutionPayloadHeaderBellatrix | ExecutionPayloadBellatrix + $wrapped_type_full:ident, // ExecutionPayloadBellatrix | ExecutionPayloadBellatrix + $wrapped_type_header:ident, // ExecutionPayloadHeaderBellatrix | ExecutionPayloadHeaderBellatrix $wrapped_field:ident, // execution_payload_header | execution_payload - $fork_variant:ident, // Merge | Merge + $fork_variant:ident, // Bellatrix | Bellatrix $block_type_variant:ident, // Blinded | Full $is_default_with_empty_roots:block, $f:block, @@ -783,17 +791,17 @@ macro_rules! impl_exec_payload_common { } macro_rules! impl_exec_payload_for_fork { - // BlindedPayloadMerge, FullPayloadMerge, ExecutionPayloadHeaderMerge, ExecutionPayloadMerge, Merge + // BlindedPayloadBellatrix, FullPayloadBellatrix, ExecutionPayloadHeaderBellatrix, ExecutionPayloadBellatrix, Bellatrix ($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident) => { //*************** Blinded payload implementations ******************// impl_exec_payload_common!( - $wrapper_type_header, // BlindedPayloadMerge - $wrapped_type_header, // ExecutionPayloadHeaderMerge - $wrapped_type_full, // ExecutionPayloadMerge - $wrapped_type_header, // ExecutionPayloadHeaderMerge + $wrapper_type_header, // BlindedPayloadBellatrix + $wrapped_type_header, // ExecutionPayloadHeaderBellatrix + $wrapped_type_full, // ExecutionPayloadBellatrix + $wrapped_type_header, // ExecutionPayloadHeaderBellatrix execution_payload_header, - $fork_variant, // Merge + $fork_variant, // Bellatrix Blinded, { |wrapper: &$wrapper_type_header| { @@ -872,12 +880,12 @@ macro_rules! impl_exec_payload_for_fork { //*************** Full payload implementations ******************// impl_exec_payload_common!( - $wrapper_type_full, // FullPayloadMerge - $wrapped_type_full, // ExecutionPayloadMerge - $wrapped_type_full, // ExecutionPayloadMerge - $wrapped_type_header, // ExecutionPayloadHeaderMerge + $wrapper_type_full, // FullPayloadBellatrix + $wrapped_type_full, // ExecutionPayloadBellatrix + $wrapped_type_full, // ExecutionPayloadBellatrix + $wrapped_type_header, // ExecutionPayloadHeaderBellatrix execution_payload, - $fork_variant, // Merge + $fork_variant, // Bellatrix Full, { |wrapper: &$wrapper_type_full| { @@ -952,11 +960,11 @@ macro_rules! impl_exec_payload_for_fork { } impl_exec_payload_for_fork!( - BlindedPayloadMerge, - FullPayloadMerge, - ExecutionPayloadHeaderMerge, - ExecutionPayloadMerge, - Merge + BlindedPayloadBellatrix, + FullPayloadBellatrix, + ExecutionPayloadHeaderBellatrix, + ExecutionPayloadBellatrix, + Bellatrix ); impl_exec_payload_for_fork!( BlindedPayloadCapella, @@ -982,7 +990,7 @@ impl_exec_payload_for_fork!( impl AbstractExecPayload for BlindedPayload { type Ref<'a> = BlindedPayloadRef<'a, E>; - type Merge = BlindedPayloadMerge; + type Bellatrix = BlindedPayloadBellatrix; type Capella = BlindedPayloadCapella; type Deneb = BlindedPayloadDeneb; type Electra = BlindedPayloadElectra; @@ -1002,8 +1010,8 @@ impl From> for BlindedPayload { impl From> for BlindedPayload { fn from(execution_payload_header: ExecutionPayloadHeader) -> Self { match execution_payload_header { - ExecutionPayloadHeader::Merge(execution_payload_header) => { - Self::Merge(BlindedPayloadMerge { + ExecutionPayloadHeader::Bellatrix(execution_payload_header) => { + Self::Bellatrix(BlindedPayloadBellatrix { execution_payload_header, }) } @@ -1029,8 +1037,8 @@ impl From> for BlindedPayload { impl From> for ExecutionPayloadHeader { fn from(blinded: BlindedPayload) -> Self { match blinded { - BlindedPayload::Merge(blinded_payload) => { - ExecutionPayloadHeader::Merge(blinded_payload.execution_payload_header) + BlindedPayload::Bellatrix(blinded_payload) => { + ExecutionPayloadHeader::Bellatrix(blinded_payload.execution_payload_header) } BlindedPayload::Capella(blinded_payload) => { ExecutionPayloadHeader::Capella(blinded_payload.execution_payload_header) diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index da4ac392362..4d3279a7f77 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -37,7 +37,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Merge, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), variant_attributes( derive( Debug, @@ -72,8 +72,8 @@ pub struct SignedBeaconBlock = FullP pub message: BeaconBlockBase, #[superstruct(only(Altair), partial_getter(rename = "message_altair"))] pub message: BeaconBlockAltair, - #[superstruct(only(Merge), partial_getter(rename = "message_merge"))] - pub message: BeaconBlockMerge, + #[superstruct(only(Bellatrix), partial_getter(rename = "message_bellatrix"))] + pub message: BeaconBlockBellatrix, #[superstruct(only(Capella), partial_getter(rename = "message_capella"))] pub message: BeaconBlockCapella, #[superstruct(only(Deneb), partial_getter(rename = "message_deneb"))] @@ -150,8 +150,8 @@ impl> SignedBeaconBlock BeaconBlock::Altair(message) => { SignedBeaconBlock::Altair(SignedBeaconBlockAltair { message, signature }) } - BeaconBlock::Merge(message) => { - SignedBeaconBlock::Merge(SignedBeaconBlockMerge { message, signature }) + BeaconBlock::Bellatrix(message) => { + SignedBeaconBlock::Bellatrix(SignedBeaconBlockBellatrix { message, signature }) } BeaconBlock::Capella(message) => { SignedBeaconBlock::Capella(SignedBeaconBlockCapella { message, signature }) @@ -310,20 +310,20 @@ impl From>> // Post-Bellatrix blocks can be "unblinded" by adding the full payload. // NOTE: It might be nice to come up with a `superstruct` pattern to abstract over this before // the first fork after Bellatrix. -impl SignedBeaconBlockMerge> { +impl SignedBeaconBlockBellatrix> { pub fn into_full_block( self, - execution_payload: ExecutionPayloadMerge, - ) -> SignedBeaconBlockMerge> { - let SignedBeaconBlockMerge { + execution_payload: ExecutionPayloadBellatrix, + ) -> SignedBeaconBlockBellatrix> { + let SignedBeaconBlockBellatrix { message: - BeaconBlockMerge { + BeaconBlockBellatrix { slot, proposer_index, parent_root, state_root, body: - BeaconBlockBodyMerge { + BeaconBlockBodyBellatrix { randao_reveal, eth1_data, graffiti, @@ -333,18 +333,18 @@ impl SignedBeaconBlockMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayloadMerge { .. }, + execution_payload: BlindedPayloadBellatrix { .. }, }, }, signature, } = self; - SignedBeaconBlockMerge { - message: BeaconBlockMerge { + SignedBeaconBlockBellatrix { + message: BeaconBlockBellatrix { slot, proposer_index, parent_root, state_root, - body: BeaconBlockBodyMerge { + body: BeaconBlockBodyBellatrix { randao_reveal, eth1_data, graffiti, @@ -354,7 +354,7 @@ impl SignedBeaconBlockMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayloadMerge { execution_payload }, + execution_payload: FullPayloadBellatrix { execution_payload }, }, }, signature, @@ -536,8 +536,8 @@ impl SignedBeaconBlock> { let full_block = match (self, execution_payload) { (SignedBeaconBlock::Base(block), _) => SignedBeaconBlock::Base(block.into()), (SignedBeaconBlock::Altair(block), _) => SignedBeaconBlock::Altair(block.into()), - (SignedBeaconBlock::Merge(block), Some(ExecutionPayload::Merge(payload))) => { - SignedBeaconBlock::Merge(block.into_full_block(payload)) + (SignedBeaconBlock::Bellatrix(block), Some(ExecutionPayload::Bellatrix(payload))) => { + SignedBeaconBlock::Bellatrix(block.into_full_block(payload)) } (SignedBeaconBlock::Capella(block), Some(ExecutionPayload::Capella(payload))) => { SignedBeaconBlock::Capella(block.into_full_block(payload)) @@ -550,7 +550,7 @@ impl SignedBeaconBlock> { } // avoid wildcard matching forks so that compiler will // direct us here when a new fork has been added - (SignedBeaconBlock::Merge(_), _) => return None, + (SignedBeaconBlock::Bellatrix(_), _) => return None, (SignedBeaconBlock::Capella(_), _) => return None, (SignedBeaconBlock::Deneb(_), _) => return None, (SignedBeaconBlock::Electra(_), _) => return None, @@ -687,8 +687,8 @@ pub mod ssz_tagged_signed_beacon_block { ForkName::Altair => Ok(SignedBeaconBlock::Altair( SignedBeaconBlockAltair::from_ssz_bytes(body)?, )), - ForkName::Merge => Ok(SignedBeaconBlock::Merge( - SignedBeaconBlockMerge::from_ssz_bytes(body)?, + ForkName::Bellatrix => Ok(SignedBeaconBlock::Bellatrix( + SignedBeaconBlockBellatrix::from_ssz_bytes(body)?, )), ForkName::Capella => Ok(SignedBeaconBlock::Capella( SignedBeaconBlockCapella::from_ssz_bytes(body)?, @@ -744,7 +744,10 @@ mod test { BeaconBlock::Altair(BeaconBlockAltair::empty(spec)), sig.clone(), ), - SignedBeaconBlock::from_block(BeaconBlock::Merge(BeaconBlockMerge::empty(spec)), sig), + SignedBeaconBlock::from_block( + BeaconBlock::Bellatrix(BeaconBlockBellatrix::empty(spec)), + sig, + ), ]; for block in blocks { @@ -783,7 +786,7 @@ mod test { sig.clone(), ), SignedBeaconBlock::from_block( - BeaconBlock::Merge(BeaconBlockMerge::empty(spec)), + BeaconBlock::Bellatrix(BeaconBlockBellatrix::empty(spec)), sig.clone(), ), SignedBeaconBlock::from_block( diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 74175423e34..4c7c16757ed 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -42,7 +42,7 @@ impl VoluntaryExit { ) -> SignedVoluntaryExit { let fork_name = spec.fork_name_at_epoch(self.epoch); let fork_version = match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { spec.fork_version_for_name(fork_name) } // EIP-7044 diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index 7aa1ef70089..974a34591f0 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -5,8 +5,9 @@ use std::fs::File; use std::io::Write; use std::time::{SystemTime, UNIX_EPOCH}; use types::{ - EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderMerge, ForkName, + EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, + ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, + ForkName, }; pub fn run(matches: &ArgMatches) -> Result<(), String> { @@ -20,17 +21,17 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?; let gas_limit = parse_required(matches, "gas-limit")?; let file_name = matches.value_of("file").ok_or("No file supplied")?; - let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Merge); + let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Bellatrix); let execution_payload_header: ExecutionPayloadHeader = match fork_name { ForkName::Base | ForkName::Altair => return Err("invalid fork name".to_string()), - ForkName::Merge => ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { + ForkName::Bellatrix => ExecutionPayloadHeader::Bellatrix(ExecutionPayloadHeaderBellatrix { gas_limit, base_fee_per_gas, timestamp: genesis_time, block_hash: eth1_block_hash, prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeaderMerge::default() + ..ExecutionPayloadHeaderBellatrix::default() }), ForkName::Capella => ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella { gas_limit, diff --git a/lcli/src/main.rs b/lcli/src/main.rs index c374a8f4b37..7b5c1598c9e 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -433,7 +433,7 @@ fn main() { .takes_value(true) .default_value("bellatrix") .help("The fork for which the execution payload header should be created.") - .possible_values(&["merge", "bellatrix", "capella", "deneb", "electra"]) + .possible_values(&["bellatrix", "capella", "deneb", "electra"]) ) ) .subcommand( diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index edba4249966..f6bfb2ac013 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -21,8 +21,8 @@ use std::time::{SystemTime, UNIX_EPOCH}; use types::ExecutionBlockHash; use types::{ test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch, - Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderMerge, + Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, + ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ForkName, Hash256, Keypair, PublicKey, Validator, }; @@ -114,9 +114,9 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( "genesis fork must be post-merge".to_string(), )), - ForkName::Merge => { - ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Merge) + ForkName::Bellatrix => { + ExecutionPayloadHeaderBellatrix::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Bellatrix) } ForkName::Capella => { ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) @@ -246,10 +246,10 @@ fn initialize_state_with_validators( ) -> Result, String> { // If no header is provided, then start from a Bellatrix state by default let default_header: ExecutionPayloadHeader = - ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { + ExecutionPayloadHeader::Bellatrix(ExecutionPayloadHeaderBellatrix { block_hash: ExecutionBlockHash::from_root(eth1_block_hash), parent_hash: ExecutionBlockHash::zero(), - ..ExecutionPayloadHeaderMerge::default() + ..ExecutionPayloadHeaderBellatrix::default() }); let execution_payload_header = execution_payload_header.unwrap_or(default_header); // Empty eth1 data @@ -314,9 +314,9 @@ fn initialize_state_with_validators( // Override latest execution payload header. // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - if let ExecutionPayloadHeader::Merge(ref header) = execution_payload_header { + if let ExecutionPayloadHeader::Bellatrix(ref header) = execution_payload_header { *state - .latest_execution_payload_header_merge_mut() + .latest_execution_payload_header_bellatrix_mut() .or(Err("mismatched fork".to_string()))? = header.clone(); } } diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index 5b34fedfb9e..e86ffb73dc2 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -70,9 +70,11 @@ pub fn run_parse_ssz( "SignedBeaconBlockAltair" => { decode_and_print(&bytes, SignedBeaconBlockAltair::::from_ssz_bytes, format)? } - "SignedBeaconBlockMerge" | "SignedBeaconBlockBellatrix" => { - decode_and_print(&bytes, SignedBeaconBlockMerge::::from_ssz_bytes, format)? - } + "SignedBeaconBlockBellatrix" => decode_and_print( + &bytes, + SignedBeaconBlockBellatrix::::from_ssz_bytes, + format, + )?, "SignedBeaconBlockCapella" => decode_and_print( &bytes, SignedBeaconBlockCapella::::from_ssz_bytes, @@ -97,8 +99,8 @@ pub fn run_parse_ssz( "BeaconStateAltair" => { decode_and_print(&bytes, BeaconStateAltair::::from_ssz_bytes, format)? } - "BeaconStateMerge" | "BeaconStateBellatrix" => { - decode_and_print(&bytes, BeaconStateMerge::::from_ssz_bytes, format)? + "BeaconStateBellatrix" => { + decode_and_print(&bytes, BeaconStateBellatrix::::from_ssz_bytes, format)? } "BeaconStateCapella" => { decode_and_print(&bytes, BeaconStateCapella::::from_ssz_bytes, format)? diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 86f4dce239b..c71feaa7dc5 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -32,7 +32,7 @@ GENESIS_DELAY: 604800 # Altair ALTAIR_FORK_VERSION: 0x01000000 ALTAIR_FORK_EPOCH: 18446744073709551615 -# Merge +# Bellatrix BELLATRIX_FORK_VERSION: 0x02000000 BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 9f1e3d235d9..7dfde69d3a9 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -416,7 +416,7 @@ fn eth1_cache_follow_distance_manual() { } // Tests for Bellatrix flags. -fn run_merge_execution_endpoints_flag_test(flag: &str) { +fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { use sensitive_url::SensitiveUrl; let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; // we don't support redundancy for execution-endpoints @@ -500,15 +500,15 @@ fn execution_timeout_multiplier_flag() { }); } #[test] -fn merge_execution_endpoints_flag() { - run_merge_execution_endpoints_flag_test("execution-endpoints") +fn bellatrix_execution_endpoints_flag() { + run_bellatrix_execution_endpoints_flag_test("execution-endpoints") } #[test] -fn merge_execution_endpoint_flag() { - run_merge_execution_endpoints_flag_test("execution-endpoint") +fn bellatrix_execution_endpoint_flag() { + run_bellatrix_execution_endpoints_flag_test("execution-endpoint") } #[test] -fn merge_jwt_secrets_flag() { +fn bellatrix_jwt_secrets_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); let mut file = File::create(dir.path().join("jwtsecrets")).expect("Unable to create file"); file.write_all(b"0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33") @@ -533,7 +533,7 @@ fn merge_jwt_secrets_flag() { }); } #[test] -fn merge_fee_recipient_flag() { +fn bellatrix_fee_recipient_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() .flag("execution-endpoint", Some("http://meow.cats")) diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index 342a48ba46c..6763edbe22b 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -63,8 +63,8 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { match fork_name { ForkName::Base => ForkName::Base, ForkName::Altair => ForkName::Base, - ForkName::Merge => ForkName::Altair, - ForkName::Capella => ForkName::Merge, + ForkName::Bellatrix => ForkName::Altair, + ForkName::Capella => ForkName::Bellatrix, ForkName::Deneb => ForkName::Capella, ForkName::Electra => ForkName::Deneb, } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index a9c77c53c52..c4c592e4cf2 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -105,7 +105,7 @@ impl EpochTransition for JustificationAndFinalization { Ok(()) } BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { @@ -128,7 +128,7 @@ impl EpochTransition for RewardsAndPenalties { base::process_rewards_and_penalties(state, &validator_statuses, spec) } BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => altair::process_rewards_and_penalties_slow(state, spec), @@ -161,7 +161,7 @@ impl EpochTransition for Slashings { )?; } BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { @@ -203,7 +203,7 @@ impl EpochTransition for RandaoMixesReset { impl EpochTransition for HistoricalRootsUpdate { fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { - BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Bellatrix(_) => { process_historical_roots_update(state) } _ => Ok(()), @@ -237,7 +237,7 @@ impl EpochTransition for SyncCommitteeUpdates { match state { BeaconState::Base(_) => Ok(()), BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => altair::process_sync_committee_updates(state, spec), @@ -250,7 +250,7 @@ impl EpochTransition for InactivityUpdates { match state { BeaconState::Base(_) => Ok(()), BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => altair::process_inactivity_updates_slow(state, spec), @@ -263,7 +263,7 @@ impl EpochTransition for ParticipationFlagUpdates { match state { BeaconState::Base(_) => Ok(()), BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => altair::process_participation_flag_updates(state), @@ -313,7 +313,7 @@ impl> Case for EpochProcessing { && T::name() != "historical_summaries_update" } // No phase0 tests for Altair and later. - ForkName::Altair | ForkName::Merge => { + ForkName::Altair | ForkName::Bellatrix => { T::name() != "participation_record_updates" && T::name() != "historical_summaries_update" } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index be8a344a35a..132cfb4c0ae 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -63,7 +63,9 @@ impl Case for ForkTest { let mut result = match fork_name { ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), - ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), + ForkName::Bellatrix => { + upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state) + } ForkName::Capella => upgrade_to_capella(&mut result_state, spec).map(|_| result_state), ForkName::Deneb => upgrade_to_deneb(&mut result_state, spec).map(|_| result_state), ForkName::Electra => upgrade_to_electra(&mut result_state, spec).map(|_| result_state), diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index cf0b9f77c8f..8d5c0687753 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -92,7 +92,7 @@ pub struct KzgInclusionMerkleProofValidity { impl LoadCase for KzgInclusionMerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let block: BeaconBlockBody> = match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { return Err(Error::InternalError(format!( "KZG inclusion merkle proof validity test skipped for {:?}", fork_name diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index a2f50896a57..158f2334dc3 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -20,8 +20,8 @@ use state_processing::{ }; use std::fmt::Debug; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockBodyCapella, - BeaconBlockBodyDeneb, BeaconBlockBodyMerge, BeaconState, BlindedPayload, Deposit, + Attestation, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockBodyBellatrix, + BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconState, BlindedPayload, Deposit, ExecutionPayload, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SyncAggregate, }; @@ -99,7 +99,7 @@ impl Operation for Attestation { spec, ), BeaconState::Altair(_) - | BeaconState::Merge(_) + | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { @@ -289,7 +289,7 @@ impl Operation for BeaconBlockBody> { fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file_with(path, |bytes| { Ok(match fork_name { - ForkName::Merge => BeaconBlockBody::Merge(<_>::from_ssz_bytes(bytes)?), + ForkName::Bellatrix => BeaconBlockBody::Bellatrix(<_>::from_ssz_bytes(bytes)?), ForkName::Capella => BeaconBlockBody::Capella(<_>::from_ssz_bytes(bytes)?), ForkName::Deneb => BeaconBlockBody::Deneb(<_>::from_ssz_bytes(bytes)?), _ => panic!(), @@ -330,9 +330,10 @@ impl Operation for BeaconBlockBody> { fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file_with(path, |bytes| { Ok(match fork_name { - ForkName::Merge => { - let inner = >>::from_ssz_bytes(bytes)?; - BeaconBlockBody::Merge(inner.clone_as_blinded()) + ForkName::Bellatrix => { + let inner = + >>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Bellatrix(inner.clone_as_blinded()) } ForkName::Capella => { let inner = >>::from_ssz_bytes(bytes)?; @@ -375,7 +376,9 @@ impl Operation for WithdrawalsPayload { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge + fork_name != ForkName::Base + && fork_name != ForkName::Altair + && fork_name != ForkName::Bellatrix } fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { @@ -407,7 +410,9 @@ impl Operation for SignedBlsToExecutionChange { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge + fork_name != ForkName::Base + && fork_name != ForkName::Altair + && fork_name != ForkName::Bellatrix } fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index b2c49a96feb..dc5029d53e7 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -38,7 +38,7 @@ impl LoadCase for TransitionTest { ForkName::Altair => { spec.altair_fork_epoch = Some(metadata.fork_epoch); } - ForkName::Merge => { + ForkName::Bellatrix => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 59b5cb6ba74..2d5ea4149ef 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -217,8 +217,8 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Altair]) } - pub fn merge_only() -> Self { - Self::for_forks(vec![ForkName::Merge]) + pub fn bellatrix_only() -> Self { + Self::for_forks(vec![ForkName::Bellatrix]) } pub fn capella_only() -> Self { @@ -558,7 +558,7 @@ impl Handler for ForkChoiceHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Merge block tests are only enabled for Bellatrix. - if self.handler_name == "on_merge_block" && fork_name != ForkName::Merge { + if self.handler_name == "on_merge_block" && fork_name != ForkName::Bellatrix { return false; } @@ -823,7 +823,7 @@ impl Handler for KzgInclusionMerkleProofValidityHandler, MainnetEthSpec>::altair_only() .run(); - SszStaticHandler::, MinimalEthSpec>::merge_only() + SszStaticHandler::, MinimalEthSpec>::bellatrix_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_only() + SszStaticHandler::, MainnetEthSpec>::bellatrix_only() .run(); SszStaticHandler::, MinimalEthSpec>::capella_only() .run(); @@ -290,10 +290,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::altair_only() .run(); - SszStaticHandler::, MinimalEthSpec>::merge_only( + SszStaticHandler::, MinimalEthSpec>::bellatrix_only( ) .run(); - SszStaticHandler::, MainnetEthSpec>::merge_only( + SszStaticHandler::, MainnetEthSpec>::bellatrix_only( ) .run(); SszStaticHandler::, MinimalEthSpec>::capella_only() @@ -313,9 +313,9 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::altair_only() .run(); - SszStaticHandler::, MinimalEthSpec>::merge_only() + SszStaticHandler::, MinimalEthSpec>::bellatrix_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_only() + SszStaticHandler::, MainnetEthSpec>::bellatrix_only() .run(); SszStaticHandler::, MinimalEthSpec>::capella_only( @@ -340,10 +340,10 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::altair_only( ) .run(); - SszStaticHandler::, MinimalEthSpec>::merge_only( + SszStaticHandler::, MinimalEthSpec>::bellatrix_only( ) .run(); - SszStaticHandler::, MainnetEthSpec>::merge_only( + SszStaticHandler::, MainnetEthSpec>::bellatrix_only( ) .run(); SszStaticHandler::, MinimalEthSpec>::capella_only( @@ -369,10 +369,10 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::altair_only( ) .run(); - SszStaticHandler::, MinimalEthSpec>::merge_only( + SszStaticHandler::, MinimalEthSpec>::bellatrix_only( ) .run(); - SszStaticHandler::, MainnetEthSpec>::merge_only( + SszStaticHandler::, MainnetEthSpec>::bellatrix_only( ) .run(); SszStaticHandler::, MinimalEthSpec>::capella_only( @@ -396,9 +396,9 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::altair_only() .run(); - SszStaticHandler::, MinimalEthSpec>::merge_only() + SszStaticHandler::, MinimalEthSpec>::bellatrix_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_only() + SszStaticHandler::, MainnetEthSpec>::bellatrix_only() .run(); SszStaticHandler::, MinimalEthSpec>::capella_only( ) @@ -448,12 +448,12 @@ mod ssz_static { SszStaticHandler::::altair_and_later().run(); } - // Merge and later + // Bellatrix and later #[test] fn execution_payload() { - SszStaticHandler::, MinimalEthSpec>::merge_only() + SszStaticHandler::, MinimalEthSpec>::bellatrix_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_only() + SszStaticHandler::, MainnetEthSpec>::bellatrix_only() .run(); SszStaticHandler::, MinimalEthSpec>::capella_only() .run(); @@ -467,9 +467,9 @@ mod ssz_static { #[test] fn execution_payload_header() { - SszStaticHandler::, MinimalEthSpec>::merge_only() + SszStaticHandler::, MinimalEthSpec>::bellatrix_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_only() + SszStaticHandler::, MainnetEthSpec>::bellatrix_only() .run(); SszStaticHandler::, MinimalEthSpec> ::capella_only().run(); diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 3090b4da556..292e10d0542 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -724,11 +724,11 @@ mod tests { .await; } - /// Test all the Merge types. - async fn test_merge_types(network: &str, listen_port: u16) { + /// Test all the Bellatrix types. + async fn test_bellatrix_types(network: &str, listen_port: u16) { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); let spec = &network_config.chain_spec::().unwrap(); - let merge_fork_slot = spec + let bellatrix_fork_slot = spec .bellatrix_fork_epoch .unwrap() .start_slot(E::slots_per_epoch()); @@ -740,14 +740,21 @@ mod tests { listen_port, ) .await - .assert_signatures_match("beacon_block_merge", |pubkey, validator_store| async move { - let mut merge_block = BeaconBlockMerge::empty(spec); - merge_block.slot = merge_fork_slot; - validator_store - .sign_block(pubkey, BeaconBlock::Merge(merge_block), merge_fork_slot) - .await - .unwrap() - }) + .assert_signatures_match( + "beacon_block_bellatrix", + |pubkey, validator_store| async move { + let mut bellatrix_block = BeaconBlockBellatrix::empty(spec); + bellatrix_block.slot = bellatrix_fork_slot; + validator_store + .sign_block( + pubkey, + BeaconBlock::Bellatrix(bellatrix_block), + bellatrix_fork_slot, + ) + .await + .unwrap() + }, + ) .await; } @@ -760,7 +767,7 @@ mod tests { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); let spec = &network_config.chain_spec::().unwrap(); - let merge_fork_slot = spec + let bellatrix_fork_slot = spec .bellatrix_fork_epoch .unwrap() .start_slot(E::slots_per_epoch()); @@ -797,9 +804,9 @@ mod tests { }; let first_block = || { - let mut merge_block = BeaconBlockMerge::empty(spec); - merge_block.slot = merge_fork_slot; - BeaconBlock::Merge(merge_block) + let mut bellatrix_block = BeaconBlockBellatrix::empty(spec); + bellatrix_block.slot = bellatrix_fork_slot; + BeaconBlock::Bellatrix(bellatrix_block) }; let double_vote_block = || { @@ -914,8 +921,8 @@ mod tests { } #[tokio::test] - async fn sepolia_merge_types() { - test_merge_types("sepolia", 4252).await + async fn sepolia_bellatrix_types() { + test_bellatrix_types("sepolia", 4252).await } #[tokio::test] diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 7973cab2c1e..8ad37a1620a 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -87,7 +87,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, E, Pa block: Some(block), block_header: None, }), - BeaconBlock::Merge(_) => Ok(Web3SignerObject::BeaconBlock { + BeaconBlock::Bellatrix(_) => Ok(Web3SignerObject::BeaconBlock { version: ForkName::Bellatrix, block: None, block_header: Some(block.block_header()), diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 0a00dad9beb..f3bdc2c0f69 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -360,7 +360,7 @@ impl ValidatorStore { fn signing_context(&self, domain: Domain, signing_epoch: Epoch) -> SigningContext { if domain == Domain::VoluntaryExit { match self.spec.fork_name_at_epoch(signing_epoch) { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { SigningContext { domain, epoch: signing_epoch, diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs index 59547c303ab..315fcbc8358 100644 --- a/watch/src/database/mod.rs +++ b/watch/src/database/mod.rs @@ -146,7 +146,7 @@ pub fn insert_beacon_block( let full_payload = block_message.execution_payload().ok(); let transaction_count: Option = if let Some(bellatrix_payload) = - full_payload.and_then(|payload| payload.execution_payload_merge().ok()) + full_payload.and_then(|payload| payload.execution_payload_bellatrix().ok()) { Some(bellatrix_payload.transactions.len() as i32) } else { From a1141ea1ef88dab78a85edd6440586c54b69475f Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 26 Apr 2024 22:25:45 +0900 Subject: [PATCH 14/16] Deterministic block generation for tests (#5654) * Deterministic block generation for tests --- .../types/src/test_utils/test_random/secret_key.rs | 2 ++ .../types/src/test_utils/test_random/signature.rs | 11 +++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/consensus/types/src/test_utils/test_random/secret_key.rs b/consensus/types/src/test_utils/test_random/secret_key.rs index 3f3f6ed5184..da1614aa24e 100644 --- a/consensus/types/src/test_utils/test_random/secret_key.rs +++ b/consensus/types/src/test_utils/test_random/secret_key.rs @@ -2,6 +2,8 @@ use super::*; impl TestRandom for SecretKey { fn random_for_test(_rng: &mut impl RngCore) -> Self { + // TODO: Not deterministic generation. Using `SecretKey::deserialize` results in + // `BlstError(BLST_BAD_ENCODING)`, need to debug with blst source on what encoding expects. SecretKey::random() } } diff --git a/consensus/types/src/test_utils/test_random/signature.rs b/consensus/types/src/test_utils/test_random/signature.rs index 5b952296b61..8bc0d711103 100644 --- a/consensus/types/src/test_utils/test_random/signature.rs +++ b/consensus/types/src/test_utils/test_random/signature.rs @@ -1,11 +1,10 @@ use super::*; impl TestRandom for Signature { - fn random_for_test(rng: &mut impl RngCore) -> Self { - let secret_key = SecretKey::random_for_test(rng); - let mut message = vec![0; 32]; - rng.fill_bytes(&mut message); - - secret_key.sign(Hash256::from_slice(&message)) + fn random_for_test(_rng: &mut impl RngCore) -> Self { + // TODO: `SecretKey::random_for_test` does not return a deterministic signature. Since this + // signature will not pass verification we could just return the generator point or the + // generator point multiplied by a random scalar if we want disctint signatures. + Signature::infinity().expect("infinity signature is valid") } } From 000a4fdf4db62fdc6b1cc3fc4c30301633d7d0d5 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 26 Apr 2024 16:17:23 -0400 Subject: [PATCH 15/16] Electra other containers (#5652) * add new fields to execution payload and header * beacon state changes * partial beacon state * safe arith in upgrade to electra * initialize balances cache in interop genesis state * Revert "initialize balances cache in interop genesis state" This reverts commit c60b522865177fe7f53c6b3be4e2e12567b27c53. * always initialize balances cache if necessary in electra upgrade * build cache earlier * fix block test * per fork NUM_FIELDS_POW2 * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra-other-containers * fix lints * get fields based on post state, as is spec'd * fix type and move cache build --- beacon_node/execution_layer/src/engine_api.rs | 3 + .../src/engine_api/json_structures.rs | 3 + beacon_node/execution_layer/src/lib.rs | 5 + .../test_utils/execution_block_generator.rs | 2 + beacon_node/store/src/partial_beacon_state.rs | 45 +++++- .../state_processing/src/upgrade/electra.rs | 62 ++++++++- consensus/types/src/beacon_block.rs | 5 +- consensus/types/src/beacon_state.rs | 130 +++++++++++++++++- consensus/types/src/beacon_state/tests.rs | 29 +--- consensus/types/src/execution_payload.rs | 5 + .../types/src/execution_payload_header.rs | 8 ++ consensus/types/src/validator.rs | 16 +++ 12 files changed, 275 insertions(+), 38 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index f3f059a4355..ce1e0fec5dd 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -632,6 +632,9 @@ impl ExecutionPayloadBodyV1 { withdrawals, blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, + // TODO(electra) + deposit_receipts: <_>::default(), + withdrawal_requests: <_>::default(), })) } else { Err(format!( diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 5c4d6ab1ac3..50d3519e129 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -319,6 +319,9 @@ impl From> for ExecutionPayloadElectra .into(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, + // TODO(electra) + deposit_receipts: Default::default(), + withdrawal_requests: Default::default(), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 668a5ce84b2..d441596edda 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -2003,6 +2003,11 @@ impl ExecutionLayer { withdrawals, blob_gas_used: electra_block.blob_gas_used, excess_blob_gas: electra_block.excess_blob_gas, + // TODO(electra) + // deposit_receipts: electra_block.deposit_receipts, + // withdrawal_requests: electra_block.withdrawal_requests, + deposit_receipts: <_>::default(), + withdrawal_requests: <_>::default(), }) } }; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 77e12bcef66..e80c6b23705 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -659,6 +659,8 @@ impl ExecutionBlockGenerator { withdrawals: pa.withdrawals.clone().into(), blob_gas_used: 0, excess_blob_gas: 0, + deposit_receipts: vec![].into(), + withdrawal_requests: vec![].into(), }), _ => unreachable!(), }, diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index c2a15c0266f..e56d0580ac2 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -118,6 +118,29 @@ where #[ssz(skip_serializing, skip_deserializing)] #[superstruct(only(Capella, Deneb, Electra))] pub historical_summaries: Option>, + + // Electra + #[superstruct(only(Electra))] + pub deposit_receipts_start_index: u64, + #[superstruct(only(Electra))] + pub deposit_balance_to_consume: u64, + #[superstruct(only(Electra))] + pub exit_balance_to_consume: u64, + #[superstruct(only(Electra))] + pub earliest_exit_epoch: Epoch, + #[superstruct(only(Electra))] + pub consolidation_balance_to_consume: u64, + #[superstruct(only(Electra))] + pub earliest_consolidation_epoch: Epoch, + + // TODO(electra) should these be optional? + #[superstruct(only(Electra))] + pub pending_balance_deposits: List, + #[superstruct(only(Electra))] + pub pending_partial_withdrawals: + List, + #[superstruct(only(Electra))] + pub pending_consolidations: List, } /// Implement the conversion function from BeaconState -> PartialBeaconState. @@ -261,7 +284,16 @@ impl PartialBeaconState { inactivity_scores, latest_execution_payload_header, next_withdrawal_index, - next_withdrawal_validator_index + next_withdrawal_validator_index, + deposit_receipts_start_index, + deposit_balance_to_consume, + exit_balance_to_consume, + earliest_exit_epoch, + consolidation_balance_to_consume, + earliest_consolidation_epoch, + pending_balance_deposits, + pending_partial_withdrawals, + pending_consolidations ], [historical_summaries] ), @@ -525,7 +557,16 @@ impl TryInto> for PartialBeaconState { inactivity_scores, latest_execution_payload_header, next_withdrawal_index, - next_withdrawal_validator_index + next_withdrawal_validator_index, + deposit_receipts_start_index, + deposit_balance_to_consume, + exit_balance_to_consume, + earliest_exit_epoch, + consolidation_balance_to_consume, + earliest_consolidation_epoch, + pending_balance_deposits, + pending_partial_withdrawals, + pending_consolidations ], [historical_summaries] ), diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index f64228f050b..1e60bf488db 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -1,3 +1,4 @@ +use safe_arith::SafeArith; use std::mem; use types::{ BeaconState, BeaconStateElectra, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, @@ -10,14 +11,28 @@ pub fn upgrade_to_electra( spec: &ChainSpec, ) -> Result<(), Error> { let epoch = pre_state.current_epoch(); - let pre = pre_state.as_deneb_mut()?; + let earliest_exit_epoch = pre_state + .validators() + .iter() + .filter(|v| v.exit_epoch != spec.far_future_epoch) + .map(|v| v.exit_epoch) + .max() + .unwrap_or(epoch) + .safe_add(1)?; + + // The total active balance cache must be built before the consolidation churn limit + // is calculated. + pre_state.build_total_active_balance_cache(spec)?; + let earliest_consolidation_epoch = spec.compute_activation_exit_epoch(epoch)?; + + let pre = pre_state.as_deneb_mut()?; // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. // // Fixed size vectors get cloned because replacing them would require the same size // allocation as cloning. - let post = BeaconState::Electra(BeaconStateElectra { + let mut post = BeaconState::Electra(BeaconStateElectra { // Versioning genesis_time: pre.genesis_time, genesis_validators_root: pre.genesis_validators_root, @@ -62,6 +77,16 @@ pub fn upgrade_to_electra( next_withdrawal_index: pre.next_withdrawal_index, next_withdrawal_validator_index: pre.next_withdrawal_validator_index, historical_summaries: pre.historical_summaries.clone(), + // Electra + deposit_receipts_start_index: spec.unset_deposit_receipts_start_index, + deposit_balance_to_consume: 0, + exit_balance_to_consume: 0, + earliest_exit_epoch, + consolidation_balance_to_consume: 0, + earliest_consolidation_epoch, + pending_balance_deposits: Default::default(), + pending_partial_withdrawals: Default::default(), + pending_consolidations: Default::default(), // Caches total_active_balance: pre.total_active_balance, progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), @@ -71,6 +96,39 @@ pub fn upgrade_to_electra( slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), }); + *post.exit_balance_to_consume_mut()? = post.get_activation_exit_churn_limit(spec)?; + *post.consolidation_balance_to_consume_mut()? = post.get_consolidation_churn_limit(spec)?; + + // Add validators that are not yet active to pending balance deposits + let validators = post.validators().clone(); + let mut pre_activation = validators + .iter() + .enumerate() + .filter(|(_, validator)| validator.activation_epoch == spec.far_future_epoch) + .collect::>(); + + // Sort the indices by activation_eligibility_epoch and then by index + pre_activation.sort_by(|(index_a, val_a), (index_b, val_b)| { + if val_a.activation_eligibility_epoch == val_b.activation_eligibility_epoch { + index_a.cmp(index_b) + } else { + val_a + .activation_eligibility_epoch + .cmp(&val_b.activation_eligibility_epoch) + } + }); + + // Process validators to queue entire balance and reset them + for (index, _) in pre_activation { + post.queue_entire_balance_and_reset_validator(index, spec)?; + } + + // Ensure early adopters of compounding credentials go through the activation churn + for (index, validator) in validators.iter().enumerate() { + if validator.has_compounding_withdrawal_credential(spec) { + post.queue_excess_active_balance(index, spec)?; + } + } *pre_state = post; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 5af53b3fa17..81491d65056 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1074,9 +1074,8 @@ mod tests { .expect("good electra block can be decoded"), good_block ); - // TODO(electra): once the Electra block is changed from Deneb, update this to match - // the other forks. - assert!(BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec).is_ok()); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad electra block cannot be decoded"); } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index f6bae53505b..ef5cd23646b 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -467,6 +467,40 @@ where #[test_random(default)] pub historical_summaries: List, + // Electra + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + #[serde(with = "serde_utils::quoted_u64")] + pub deposit_receipts_start_index: u64, + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + #[serde(with = "serde_utils::quoted_u64")] + pub deposit_balance_to_consume: u64, + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + #[serde(with = "serde_utils::quoted_u64")] + pub exit_balance_to_consume: u64, + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + pub earliest_exit_epoch: Epoch, + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + #[serde(with = "serde_utils::quoted_u64")] + pub consolidation_balance_to_consume: u64, + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + pub earliest_consolidation_epoch: Epoch, + #[test_random(default)] + #[superstruct(only(Electra))] + pub pending_balance_deposits: List, + #[test_random(default)] + #[superstruct(only(Electra))] + pub pending_partial_withdrawals: + List, + #[test_random(default)] + #[superstruct(only(Electra))] + pub pending_consolidations: List, + // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] @@ -2031,6 +2065,83 @@ impl BeaconState { self.epoch_cache().get_base_reward(validator_index) } + // ******* Electra accessors ******* + + /// Return the churn limit for the current epoch. + pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { + let total_active_balance = self.get_total_active_balance()?; + let churn = std::cmp::max( + spec.min_per_epoch_churn_limit_electra, + total_active_balance.safe_div(spec.churn_limit_quotient)?, + ); + + Ok(churn.safe_sub(churn.safe_rem(spec.effective_balance_increment)?)?) + } + + /// Return the churn limit for the current epoch dedicated to activations and exits. + pub fn get_activation_exit_churn_limit(&self, spec: &ChainSpec) -> Result { + Ok(std::cmp::min( + spec.max_per_epoch_activation_exit_churn_limit, + self.get_balance_churn_limit(spec)?, + )) + } + + pub fn get_consolidation_churn_limit(&self, spec: &ChainSpec) -> Result { + self.get_balance_churn_limit(spec)? + .safe_sub(self.get_activation_exit_churn_limit(spec)?) + .map_err(Into::into) + } + + // ******* Electra mutators ******* + + pub fn queue_excess_active_balance( + &mut self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result<(), Error> { + let balance = self + .balances_mut() + .get_mut(validator_index) + .ok_or(Error::UnknownValidator(validator_index))?; + if *balance > spec.min_activation_balance { + let excess_balance = balance.safe_sub(spec.min_activation_balance)?; + *balance = spec.min_activation_balance; + self.pending_balance_deposits_mut()? + .push(PendingBalanceDeposit { + index: validator_index as u64, + amount: excess_balance, + })?; + } + Ok(()) + } + + pub fn queue_entire_balance_and_reset_validator( + &mut self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result<(), Error> { + let balance = self + .balances_mut() + .get_mut(validator_index) + .ok_or(Error::UnknownValidator(validator_index))?; + let balance_copy = *balance; + *balance = 0_u64; + + let validator = self + .validators_mut() + .get_mut(validator_index) + .ok_or(Error::UnknownValidator(validator_index))?; + validator.effective_balance = 0; + validator.activation_eligibility_epoch = spec.far_future_epoch; + + self.pending_balance_deposits_mut()? + .push(PendingBalanceDeposit { + index: validator_index as u64, + amount: balance_copy, + }) + .map_err(Into::into) + } + #[allow(clippy::arithmetic_side_effects)] pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { // Required for macros (which use type-hints internally). @@ -2147,10 +2258,17 @@ impl BeaconState { /// The number of fields of the `BeaconState` rounded up to the nearest power of two. /// /// This is relevant to tree-hashing of the `BeaconState`. - /// - /// We assume this value is stable across forks. This assumption is checked in the - /// `check_num_fields_pow2` test. - pub const NUM_FIELDS_POW2: usize = BeaconStateBellatrix::::NUM_FIELDS.next_power_of_two(); + pub fn num_fields_pow2(&self) -> usize { + let fork_name = self.fork_name_unchecked(); + match fork_name { + ForkName::Base => BeaconStateBase::::NUM_FIELDS.next_power_of_two(), + ForkName::Altair => BeaconStateAltair::::NUM_FIELDS.next_power_of_two(), + ForkName::Bellatrix => BeaconStateBellatrix::::NUM_FIELDS.next_power_of_two(), + ForkName::Capella => BeaconStateCapella::::NUM_FIELDS.next_power_of_two(), + ForkName::Deneb => BeaconStateDeneb::::NUM_FIELDS.next_power_of_two(), + ForkName::Electra => BeaconStateElectra::::NUM_FIELDS.next_power_of_two(), + } + } /// Specialised deserialisation method that uses the `ChainSpec` as context. #[allow(clippy::arithmetic_side_effects)] @@ -2211,7 +2329,7 @@ impl BeaconState { // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate generalized_index - .checked_sub(Self::NUM_FIELDS_POW2) + .checked_sub(self.num_fields_pow2()) .ok_or(Error::IndexNotSupported(generalized_index))? } light_client_update::FINALIZED_ROOT_INDEX => { @@ -2221,7 +2339,7 @@ impl BeaconState { // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches // position of `finalized_checkpoint` in `BeaconState`. finalized_checkpoint_generalized_index - .checked_sub(Self::NUM_FIELDS_POW2) + .checked_sub(self.num_fields_pow2()) .ok_or(Error::IndexNotSupported(generalized_index))? } _ => return Err(Error::IndexNotSupported(generalized_index)), diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 35afe314957..38a76e44c50 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -1,10 +1,10 @@ #![cfg(test)] -use crate::{test_utils::*, ForkName}; +use crate::test_utils::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::types::{ - test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateBellatrix, - BeaconStateCapella, BeaconStateDeneb, BeaconStateElectra, BeaconStateError, ChainSpec, Domain, - Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, Slot, Vector, + test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, + ChainSpec, Domain, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, + RelativeEpoch, Slot, Vector, }; use ssz::Encode; use std::ops::Mul; @@ -403,24 +403,3 @@ fn decode_base_and_altair() { .expect_err("bad altair state cannot be decoded"); } } - -#[test] -fn check_num_fields_pow2() { - use metastruct::NumFields; - pub type E = MainnetEthSpec; - - for fork_name in ForkName::list_all() { - let num_fields = match fork_name { - ForkName::Base => BeaconStateBase::::NUM_FIELDS, - ForkName::Altair => BeaconStateAltair::::NUM_FIELDS, - ForkName::Bellatrix => BeaconStateBellatrix::::NUM_FIELDS, - ForkName::Capella => BeaconStateCapella::::NUM_FIELDS, - ForkName::Deneb => BeaconStateDeneb::::NUM_FIELDS, - ForkName::Electra => BeaconStateElectra::::NUM_FIELDS, - }; - assert_eq!( - num_fields.next_power_of_two(), - BeaconState::::NUM_FIELDS_POW2 - ); - } -} diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index a46e7c29fff..0946b9ecffa 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -89,6 +89,11 @@ pub struct ExecutionPayload { #[superstruct(only(Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, + #[superstruct(only(Electra))] + pub deposit_receipts: VariableList, + #[superstruct(only(Electra))] + pub withdrawal_requests: + VariableList, } impl<'a, E: EthSpec> ExecutionPayloadRef<'a, E> { diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 8515e386c22..324d7b97472 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -88,6 +88,10 @@ pub struct ExecutionPayloadHeader { #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub excess_blob_gas: u64, + #[superstruct(only(Electra), partial_getter(copy))] + pub deposit_receipts_root: Hash256, + #[superstruct(only(Electra), partial_getter(copy))] + pub withdrawal_requests_root: Hash256, } impl ExecutionPayloadHeader { @@ -206,6 +210,8 @@ impl ExecutionPayloadHeaderDeneb { withdrawals_root: self.withdrawals_root, blob_gas_used: self.blob_gas_used, excess_blob_gas: self.excess_blob_gas, + deposit_receipts_root: Hash256::zero(), + withdrawal_requests_root: Hash256::zero(), } } } @@ -297,6 +303,8 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadElectra> for ExecutionPayloadHe withdrawals_root: payload.withdrawals.tree_hash_root(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, + deposit_receipts_root: payload.deposit_receipts.tree_hash_root(), + withdrawal_requests_root: payload.withdrawal_requests.tree_hash_root(), } } } diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 98567cd1e6c..5fd18552f6b 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -102,6 +102,11 @@ impl Validator { .unwrap_or(false) } + /// Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal credential. + pub fn has_compounding_withdrawal_credential(&self, spec: &ChainSpec) -> bool { + is_compounding_withdrawal_credential(self.withdrawal_credentials, spec) + } + /// Get the eth1 withdrawal address if this validator has one initialized. pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ self.has_eth1_withdrawal_credential(spec) @@ -153,6 +158,17 @@ impl Default for Validator { } } +pub fn is_compounding_withdrawal_credential( + withdrawal_credentials: Hash256, + spec: &ChainSpec, +) -> bool { + withdrawal_credentials + .as_bytes() + .first() + .map(|prefix_byte| *prefix_byte == spec.compounding_withdrawal_prefix_byte) + .unwrap_or(false) +} + #[cfg(test)] mod tests { use super::*; From 8b24880df78659ab1a58f37859aeabbad838dae7 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Sat, 27 Apr 2024 06:39:29 +0530 Subject: [PATCH 16/16] Add more electra helpers (#5653) * Add new helpers * Fix some stuff * Fix compilation errors * lint * Address review --- consensus/types/src/beacon_state.rs | 127 ++++++++++++++++++++++++++++ consensus/types/src/validator.rs | 36 ++++++-- 2 files changed, 157 insertions(+), 6 deletions(-) diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index ef5cd23646b..577f282a556 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -2092,6 +2092,38 @@ impl BeaconState { .map_err(Into::into) } + /// Get active balance for the given `validator_index`. + pub fn get_active_balance( + &self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + let max_effective_balance = self + .validators() + .get(validator_index) + .map(|validator| validator.get_validator_max_effective_balance(spec)) + .ok_or(Error::UnknownValidator(validator_index))?; + Ok(std::cmp::min( + *self + .balances() + .get(validator_index) + .ok_or(Error::UnknownValidator(validator_index))?, + max_effective_balance, + )) + } + + pub fn get_pending_balance_to_withdraw(&self, validator_index: usize) -> Result { + let mut pending_balance = 0; + for withdrawal in self + .pending_partial_withdrawals()? + .iter() + .filter(|withdrawal| withdrawal.index as usize == validator_index) + { + pending_balance.safe_add_assign(withdrawal.amount)?; + } + Ok(pending_balance) + } + // ******* Electra mutators ******* pub fn queue_excess_active_balance( @@ -2142,6 +2174,101 @@ impl BeaconState { .map_err(Into::into) } + /// Change the withdrawal prefix of the given `validator_index` to the compounding withdrawal validator prefix. + pub fn switch_to_compounding_validator( + &mut self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result<(), Error> { + let validator = self + .validators_mut() + .get_mut(validator_index) + .ok_or(Error::UnknownValidator(validator_index))?; + if validator.has_eth1_withdrawal_credential(spec) { + validator.withdrawal_credentials.as_fixed_bytes_mut()[0] = + spec.compounding_withdrawal_prefix_byte; + self.queue_excess_active_balance(validator_index, spec)?; + } + Ok(()) + } + + pub fn compute_exit_epoch_and_update_churn( + &mut self, + exit_balance: u64, + spec: &ChainSpec, + ) -> Result { + let mut earliest_exit_epoch = std::cmp::max( + self.earliest_exit_epoch()?, + self.compute_activation_exit_epoch(self.current_epoch(), spec)?, + ); + + let per_epoch_churn = self.get_activation_exit_churn_limit(spec)?; + // New epoch for exits + let mut exit_balance_to_consume = if self.earliest_exit_epoch()? < earliest_exit_epoch { + per_epoch_churn + } else { + self.exit_balance_to_consume()? + }; + + // Exit doesn't fit in the current earliest epoch + if exit_balance > exit_balance_to_consume { + let balance_to_process = exit_balance.safe_sub(exit_balance_to_consume)?; + let additional_epochs = balance_to_process + .safe_sub(1)? + .safe_div(per_epoch_churn)? + .safe_add(1)?; + earliest_exit_epoch.safe_add_assign(additional_epochs)?; + exit_balance_to_consume + .safe_add_assign(additional_epochs.safe_mul(per_epoch_churn)?)?; + } + let state = self.as_electra_mut()?; + // Consume the balance and update state variables + state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; + state.earliest_exit_epoch = earliest_exit_epoch; + + Ok(state.earliest_exit_epoch) + } + + pub fn compute_consolidation_epoch_and_update_churn( + &mut self, + consolidation_balance: u64, + spec: &ChainSpec, + ) -> Result { + let mut earliest_consolidation_epoch = std::cmp::max( + self.earliest_consolidation_epoch()?, + self.compute_activation_exit_epoch(self.current_epoch(), spec)?, + ); + + let per_epoch_consolidation_churn = self.get_consolidation_churn_limit(spec)?; + + // New epoch for consolidations + let mut consolidation_balance_to_consume = + if self.earliest_consolidation_epoch()? < earliest_consolidation_epoch { + per_epoch_consolidation_churn + } else { + self.consolidation_balance_to_consume()? + }; + // Consolidation doesn't fit in the current earliest epoch + if consolidation_balance > consolidation_balance_to_consume { + let balance_to_process = + consolidation_balance.safe_sub(consolidation_balance_to_consume)?; + let additional_epochs = balance_to_process + .safe_sub(1)? + .safe_div(per_epoch_consolidation_churn)? + .safe_add(1)?; + earliest_consolidation_epoch.safe_add_assign(additional_epochs)?; + consolidation_balance_to_consume + .safe_add_assign(additional_epochs.safe_mul(per_epoch_consolidation_churn)?)?; + } + // Consume the balance and update state variables + let state = self.as_electra_mut()?; + state.consolidation_balance_to_consume = + consolidation_balance_to_consume.safe_sub(consolidation_balance)?; + state.earliest_consolidation_epoch = earliest_consolidation_epoch; + + Ok(state.earliest_consolidation_epoch) + } + #[allow(clippy::arithmetic_side_effects)] pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { // Required for macros (which use type-hints internally). diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 5fd18552f6b..8ed449ec8a7 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -57,10 +57,10 @@ impl Validator { /// Returns `true` if the validator is eligible to join the activation queue. /// - /// Spec v0.12.1 + /// Modified in electra pub fn is_eligible_for_activation_queue(&self, spec: &ChainSpec) -> bool { self.activation_eligibility_epoch == spec.far_future_epoch - && self.effective_balance == spec.max_effective_balance + && self.effective_balance >= spec.min_activation_balance } /// Returns `true` if the validator is eligible to be activated. @@ -130,15 +130,39 @@ impl Validator { } /// Returns `true` if the validator is fully withdrawable at some epoch. + /// + /// Note: Modified in electra. pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { - self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 + self.has_execution_withdrawal_credential(spec) + && self.withdrawable_epoch <= epoch + && balance > 0 } /// Returns `true` if the validator is partially withdrawable. + /// + /// Note: Modified in electra. pub fn is_partially_withdrawable_validator(&self, balance: u64, spec: &ChainSpec) -> bool { - self.has_eth1_withdrawal_credential(spec) - && self.effective_balance == spec.max_effective_balance - && balance > spec.max_effective_balance + let max_effective_balance = self.get_validator_max_effective_balance(spec); + let has_max_effective_balance = self.effective_balance == max_effective_balance; + let has_excess_balance = balance > max_effective_balance; + self.has_execution_withdrawal_credential(spec) + && has_max_effective_balance + && has_excess_balance + } + + /// Returns `true` if the validator has a 0x01 or 0x02 prefixed withdrawal credential. + pub fn has_execution_withdrawal_credential(&self, spec: &ChainSpec) -> bool { + self.has_compounding_withdrawal_credential(spec) + || self.has_eth1_withdrawal_credential(spec) + } + + /// Returns the max effective balance for a validator in gwei. + pub fn get_validator_max_effective_balance(&self, spec: &ChainSpec) -> u64 { + if self.has_compounding_withdrawal_credential(spec) { + spec.max_effective_balance_electra + } else { + spec.min_activation_balance + } } }