From 18c9be595dbeca3ba9528a907939d2d00b81a9d4 Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 1 Dec 2022 06:03:53 +0000 Subject: [PATCH 01/17] Add API endpoint to count statuses of all validators (#3756) ## Issue Addressed #3724 ## Proposed Changes Adds an endpoint to quickly count the number of occurances of each status in the validator set. ## Usage ```bash curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: application/json" | jq ``` ```json { "data": { "active_ongoing":479508, "active_exiting":0, "active_slashed":0, "pending_initialized":28, "pending_queued":0, "withdrawal_possible":933, "withdrawal_done":0, "exited_unslashed":0, "exited_slashed":3 } } ``` --- beacon_node/http_api/src/lib.rs | 14 +++++++ beacon_node/http_api/src/ui.rs | 71 +++++++++++++++++++++++++++++++++ book/src/api-lighthouse.md | 22 ++++++++++ 3 files changed, 107 insertions(+) create mode 100644 beacon_node/http_api/src/ui.rs diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 7f6852f364b..645c4ccfaba 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -17,6 +17,7 @@ mod proposer_duties; mod publish_blocks; mod state_id; mod sync_committees; +mod ui; mod validator_inclusion; mod version; @@ -2886,6 +2887,18 @@ pub fn serve( }, ); + // GET lighthouse/ui/validator_count + let get_lighthouse_ui_validator_count = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("validator_count")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + ui::get_validator_count(chain).map(api_types::GenericResponse::from) + }) + }); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -3353,6 +3366,7 @@ pub fn serve( .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) .or(get_lighthouse_merge_readiness.boxed()) + .or(get_lighthouse_ui_validator_count.boxed()) .or(get_events.boxed()), ) .boxed() diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs new file mode 100644 index 00000000000..8f9400dbbd0 --- /dev/null +++ b/beacon_node/http_api/src/ui.rs @@ -0,0 +1,71 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::types::ValidatorStatus; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use warp_utils::reject::beacon_chain_error; + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct ValidatorCountResponse { + pub active_ongoing: u64, + pub active_exiting: u64, + pub active_slashed: u64, + pub pending_initialized: u64, + pub pending_queued: u64, + pub withdrawal_possible: u64, + pub withdrawal_done: u64, + pub exited_unslashed: u64, + pub exited_slashed: u64, +} + +pub fn get_validator_count( + chain: Arc>, +) -> Result { + let spec = &chain.spec; + let mut active_ongoing = 0; + let mut active_exiting = 0; + let mut active_slashed = 0; + let mut pending_initialized = 0; + let mut pending_queued = 0; + let mut withdrawal_possible = 0; + let mut withdrawal_done = 0; + let mut exited_unslashed = 0; + let mut exited_slashed = 0; + + chain + .with_head(|head| { + let state = &head.beacon_state; + let epoch = state.current_epoch(); + for validator in state.validators() { + let status = + ValidatorStatus::from_validator(validator, epoch, spec.far_future_epoch); + + match status { + ValidatorStatus::ActiveOngoing => active_ongoing += 1, + ValidatorStatus::ActiveExiting => active_exiting += 1, + ValidatorStatus::ActiveSlashed => active_slashed += 1, + ValidatorStatus::PendingInitialized => pending_initialized += 1, + ValidatorStatus::PendingQueued => pending_queued += 1, + ValidatorStatus::WithdrawalPossible => withdrawal_possible += 1, + ValidatorStatus::WithdrawalDone => withdrawal_done += 1, + ValidatorStatus::ExitedUnslashed => exited_unslashed += 1, + ValidatorStatus::ExitedSlashed => exited_slashed += 1, + // Since we are not invoking `superset`, all other variants will be 0. + _ => (), + } + } + Ok::<(), BeaconChainError>(()) + }) + .map_err(beacon_chain_error)?; + + Ok(ValidatorCountResponse { + active_ongoing, + active_exiting, + active_slashed, + pending_initialized, + pending_queued, + withdrawal_possible, + withdrawal_done, + exited_unslashed, + exited_slashed, + }) +} diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index c1ba6a2dcc6..763372692ee 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -99,6 +99,28 @@ curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: applicatio } ``` +### `/lighthouse/ui/validator_count` + +```bash +curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "active_ongoing":479508, + "active_exiting":0, + "active_slashed":0, + "pending_initialized":28, + "pending_queued":0, + "withdrawal_possible":933, + "withdrawal_done":0, + "exited_unslashed":0, + "exited_slashed":3 + } +} +``` + ### `/lighthouse/syncing` ```bash From 84392d63fa8a30dcb8b410fa70468830cf72999a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 2 Dec 2022 00:07:43 +0000 Subject: [PATCH 02/17] Delete DB schema migrations for v11 and earlier (#3761) ## Proposed Changes Now that the Gnosis merge is scheduled, all users should have upgraded beyond Lighthouse v3.0.0. Accordingly we can delete schema migrations for versions prior to v3.0.0. ## Additional Info I also deleted the state cache stuff I added in #3714 as it turned out to be useless for the light client proofs due to the one-slot offset. --- beacon_node/beacon_chain/src/beacon_chain.rs | 40 -- .../src/beacon_fork_choice_store.rs | 22 +- .../beacon_chain/src/persisted_fork_choice.rs | 23 +- beacon_node/beacon_chain/src/schema_change.rs | 163 +-------- .../src/schema_change/migration_schema_v10.rs | 97 ----- .../src/schema_change/migration_schema_v11.rs | 77 ---- .../src/schema_change/migration_schema_v6.rs | 28 -- .../src/schema_change/migration_schema_v7.rs | 341 ------------------ .../src/schema_change/migration_schema_v8.rs | 50 --- .../src/schema_change/migration_schema_v9.rs | 176 --------- .../beacon_chain/src/schema_change/types.rs | 315 ---------------- .../beacon_chain/src/snapshot_cache.rs | 21 -- beacon_node/store/src/hot_cold_store.rs | 10 - book/src/database-migrations.md | 6 +- 14 files changed, 15 insertions(+), 1354 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/types.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 59abb860c80..32ae742d86f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -997,46 +997,6 @@ impl BeaconChain { Ok(self.store.get_state(state_root, slot)?) } - /// Run a function with mutable access to a state for `block_root`. - /// - /// The primary purpose of this function is to borrow a state with its tree hash cache - /// from the snapshot cache *without moving it*. This means that calls to this function should - /// be kept to an absolute minimum, because holding the snapshot cache lock has the ability - /// to delay block import. - /// - /// If there is no appropriate state in the snapshot cache then one will be loaded from disk. - /// If no state is found on disk then `Ok(None)` will be returned. - /// - /// The 2nd parameter to the closure is a bool indicating whether the snapshot cache was used, - /// which can inform logging/metrics. - /// - /// NOTE: the medium-term plan is to delete this function and the snapshot cache in favour - /// of `tree-states`, where all caches are CoW and everything is good in the world. - pub fn with_mutable_state_for_block>( - &self, - block: &SignedBeaconBlock, - block_root: Hash256, - f: F, - ) -> Result, Error> - where - F: FnOnce(&mut BeaconState, bool) -> Result, - { - if let Some(state) = self - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(Error::SnapshotCacheLockTimeout)? - .borrow_unadvanced_state_mut(block_root) - { - let cache_hit = true; - f(state, cache_hit).map(Some) - } else if let Some(mut state) = self.get_state(&block.state_root(), Some(block.slot()))? { - let cache_hit = false; - f(&mut state, cache_hit).map(Some) - } else { - Ok(None) - } - } - /// Return the sync committee at `slot + 1` from the canonical chain. /// /// This is useful when dealing with sync committee messages, because messages are signed diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 4f6003fda1b..5cba5f3c3bb 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -61,7 +61,7 @@ pub fn get_effective_balances(state: &BeaconState) -> Vec { } #[superstruct( - variants(V1, V8), + variants(V8), variant_attributes(derive(PartialEq, Clone, Debug, Encode, Decode)), no_enum )] @@ -75,13 +75,11 @@ pub(crate) struct CacheItem { pub(crate) type CacheItem = CacheItemV8; #[superstruct( - variants(V1, V8), + variants(V8), variant_attributes(derive(PartialEq, Clone, Default, Debug, Encode, Decode)), no_enum )] pub struct BalancesCache { - #[superstruct(only(V1))] - pub(crate) items: Vec, #[superstruct(only(V8))] pub(crate) items: Vec, } @@ -366,26 +364,20 @@ where } /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. -#[superstruct( - variants(V1, V7, V8, V10, V11), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoiceStore { - #[superstruct(only(V1, V7))] - pub balances_cache: BalancesCacheV1, - #[superstruct(only(V8, V10, V11))] + #[superstruct(only(V11))] pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, pub justified_balances: Vec, pub best_justified_checkpoint: Checkpoint, - #[superstruct(only(V10, V11))] + #[superstruct(only(V11))] pub unrealized_justified_checkpoint: Checkpoint, - #[superstruct(only(V10, V11))] + #[superstruct(only(V11))] pub unrealized_finalized_checkpoint: Checkpoint, - #[superstruct(only(V7, V8, V10, V11))] + #[superstruct(only(V11))] pub proposer_boost_root: Hash256, #[superstruct(only(V11))] pub equivocating_indices: BTreeSet, diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index a60dacdc7c0..829dc2a8a77 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,7 +1,4 @@ -use crate::beacon_fork_choice_store::{ - PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11, - PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, -}; +use crate::beacon_fork_choice_store::PersistedForkChoiceStoreV11; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error, StoreItem}; @@ -10,21 +7,9 @@ use superstruct::superstruct; // If adding a new version you should update this type alias and fix the breakages. pub type PersistedForkChoice = PersistedForkChoiceV11; -#[superstruct( - variants(V1, V7, V8, V10, V11), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoice { pub fork_choice: fork_choice::PersistedForkChoice, - #[superstruct(only(V1))] - pub fork_choice_store: PersistedForkChoiceStoreV1, - #[superstruct(only(V7))] - pub fork_choice_store: PersistedForkChoiceStoreV7, - #[superstruct(only(V8))] - pub fork_choice_store: PersistedForkChoiceStoreV8, - #[superstruct(only(V10))] - pub fork_choice_store: PersistedForkChoiceStoreV10, #[superstruct(only(V11))] pub fork_choice_store: PersistedForkChoiceStoreV11, } @@ -47,8 +32,4 @@ macro_rules! impl_store_item { }; } -impl_store_item!(PersistedForkChoiceV1); -impl_store_item!(PersistedForkChoiceV7); -impl_store_item!(PersistedForkChoiceV8); -impl_store_item!(PersistedForkChoiceV10); impl_store_item!(PersistedForkChoiceV11); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index fd55048c388..73906b1b586 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,20 +1,9 @@ //! Utilities for managing database schema changes. -mod migration_schema_v10; -mod migration_schema_v11; mod migration_schema_v12; mod migration_schema_v13; -mod migration_schema_v6; -mod migration_schema_v7; -mod migration_schema_v8; -mod migration_schema_v9; -mod types; -use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY}; +use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; -use crate::persisted_fork_choice::{ - PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7, - PersistedForkChoiceV8, -}; use crate::types::ChainSpec; use slog::{warn, Logger}; use std::sync::Arc; @@ -23,6 +12,7 @@ use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::{Error as StoreError, StoreItem}; /// Migrate the database from one schema version to another, applying all requisite mutations. +#[allow(clippy::only_used_in_recursion)] // spec is not used but likely to be used in future pub fn migrate_schema( db: Arc>, deposit_contract_deploy_block: u64, @@ -62,156 +52,9 @@ pub fn migrate_schema( } // - // Migrations from before SchemaVersion(5) are deprecated. + // Migrations from before SchemaVersion(11) are deprecated. // - // Migration for adding `execution_status` field to the fork choice store. - (SchemaVersion(5), SchemaVersion(6)) => { - // Database operations to be done atomically - let mut ops = vec![]; - - // The top-level `PersistedForkChoice` struct is still V1 but will have its internal - // bytes for the fork choice updated to V6. - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(mut persisted_fork_choice) = fork_choice_opt { - migration_schema_v6::update_execution_statuses::(&mut persisted_fork_choice) - .map_err(StoreError::SchemaMigrationError)?; - - // Store the converted fork choice store under the same key. - ops.push(persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // 1. Add `proposer_boost_root`. - // 2. Update `justified_epoch` to `justified_checkpoint` and `finalized_epoch` to - // `finalized_checkpoint`. - // 3. This migration also includes a potential update to the justified - // checkpoint in case the fork choice store's justified checkpoint and finalized checkpoint - // combination does not actually exist for any blocks in fork choice. This was possible in - // the consensus spec prior to v1.1.6. - // - // Relevant issues: - // - // https://github.com/sigp/lighthouse/issues/2741 - // https://github.com/ethereum/consensus-specs/pull/2727 - // https://github.com/ethereum/consensus-specs/pull/2730 - (SchemaVersion(6), SchemaVersion(7)) => { - // Database operations to be done atomically - let mut ops = vec![]; - - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(persisted_fork_choice_v1) = fork_choice_opt { - // This migrates the `PersistedForkChoiceStore`, adding the `proposer_boost_root` field. - let mut persisted_fork_choice_v7 = persisted_fork_choice_v1.into(); - - let result = migration_schema_v7::update_fork_choice::( - &mut persisted_fork_choice_v7, - db.clone(), - ); - - // Fall back to re-initializing fork choice from an anchor state if necessary. - if let Err(e) = result { - warn!(log, "Unable to migrate to database schema 7, re-initializing fork choice"; "error" => ?e); - migration_schema_v7::update_with_reinitialized_fork_choice::( - &mut persisted_fork_choice_v7, - db.clone(), - spec, - ) - .map_err(StoreError::SchemaMigrationError)?; - } - - // Store the converted fork choice store under the same key. - ops.push(persisted_fork_choice_v7.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Migration to add an `epoch` key to the fork choice's balances cache. - (SchemaVersion(7), SchemaVersion(8)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = - migration_schema_v8::update_fork_choice::(fork_choice, db.clone())?; - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Upgrade from v8 to v9 to separate the execution payloads into their own column. - (SchemaVersion(8), SchemaVersion(9)) => { - migration_schema_v9::upgrade_to_v9::(db.clone(), log)?; - db.store_schema_version(to) - } - // Downgrade from v9 to v8 to ignore the separation of execution payloads - // NOTE: only works before the Bellatrix fork epoch. - (SchemaVersion(9), SchemaVersion(8)) => { - migration_schema_v9::downgrade_from_v9::(db.clone(), log)?; - db.store_schema_version(to) - } - (SchemaVersion(9), SchemaVersion(10)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = migration_schema_v10::update_fork_choice(fork_choice)?; - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - (SchemaVersion(10), SchemaVersion(9)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = migration_schema_v10::downgrade_fork_choice(fork_choice)?; - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Upgrade from v10 to v11 adding support for equivocating indices to fork choice. - (SchemaVersion(10), SchemaVersion(11)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = migration_schema_v11::update_fork_choice(fork_choice); - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Downgrade from v11 to v10 removing support for equivocating indices from fork choice. - (SchemaVersion(11), SchemaVersion(10)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = - migration_schema_v11::downgrade_fork_choice(fork_choice, log); - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } // Upgrade from v11 to v12 to store richer metadata in the attestation op pool. (SchemaVersion(11), SchemaVersion(12)) => { let ops = migration_schema_v12::upgrade_to_v12::(db.clone(), log)?; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs deleted file mode 100644 index 70e0007851c..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs +++ /dev/null @@ -1,97 +0,0 @@ -use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV8}; -use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV8}; -use crate::schema_change::{ - types::{SszContainerV10, SszContainerV7}, - StoreError, -}; -use proto_array::core::SszContainer; -use ssz::{Decode, Encode}; - -pub fn update_fork_choice( - mut fork_choice: PersistedForkChoiceV8, -) -> Result { - let ssz_container_v7 = SszContainerV7::from_ssz_bytes( - &fork_choice.fork_choice.proto_array_bytes, - ) - .map_err(|e| { - StoreError::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - // These transformations instantiate `node.unrealized_justified_checkpoint` and - // `node.unrealized_finalized_checkpoint` to `None`. - let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); - let ssz_container: SszContainer = ssz_container_v10.into(); - fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes(); - - Ok(fork_choice.into()) -} - -pub fn downgrade_fork_choice( - mut fork_choice: PersistedForkChoiceV10, -) -> Result { - let ssz_container_v10 = SszContainerV10::from_ssz_bytes( - &fork_choice.fork_choice.proto_array_bytes, - ) - .map_err(|e| { - StoreError::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - let ssz_container_v7: SszContainerV7 = ssz_container_v10.into(); - fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); - - Ok(fork_choice.into()) -} - -impl From for PersistedForkChoiceStoreV10 { - fn from(other: PersistedForkChoiceStoreV8) -> Self { - Self { - balances_cache: other.balances_cache, - time: other.time, - finalized_checkpoint: other.finalized_checkpoint, - justified_checkpoint: other.justified_checkpoint, - justified_balances: other.justified_balances, - best_justified_checkpoint: other.best_justified_checkpoint, - unrealized_justified_checkpoint: other.best_justified_checkpoint, - unrealized_finalized_checkpoint: other.finalized_checkpoint, - proposer_boost_root: other.proposer_boost_root, - } - } -} - -impl From for PersistedForkChoiceV10 { - fn from(other: PersistedForkChoiceV8) -> Self { - Self { - fork_choice: other.fork_choice, - fork_choice_store: other.fork_choice_store.into(), - } - } -} - -impl From for PersistedForkChoiceStoreV8 { - fn from(other: PersistedForkChoiceStoreV10) -> Self { - Self { - balances_cache: other.balances_cache, - time: other.time, - finalized_checkpoint: other.finalized_checkpoint, - justified_checkpoint: other.justified_checkpoint, - justified_balances: other.justified_balances, - best_justified_checkpoint: other.best_justified_checkpoint, - proposer_boost_root: other.proposer_boost_root, - } - } -} - -impl From for PersistedForkChoiceV8 { - fn from(other: PersistedForkChoiceV10) -> Self { - Self { - fork_choice: other.fork_choice, - fork_choice_store: other.fork_choice_store.into(), - } - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs deleted file mode 100644 index dde80a5cac7..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11}; -use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV11}; -use slog::{warn, Logger}; -use std::collections::BTreeSet; - -/// Add the equivocating indices field. -pub fn update_fork_choice(fork_choice_v10: PersistedForkChoiceV10) -> PersistedForkChoiceV11 { - let PersistedForkChoiceStoreV10 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - } = fork_choice_v10.fork_choice_store; - - PersistedForkChoiceV11 { - fork_choice: fork_choice_v10.fork_choice, - fork_choice_store: PersistedForkChoiceStoreV11 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - equivocating_indices: BTreeSet::new(), - }, - } -} - -pub fn downgrade_fork_choice( - fork_choice_v11: PersistedForkChoiceV11, - log: Logger, -) -> PersistedForkChoiceV10 { - let PersistedForkChoiceStoreV11 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - equivocating_indices, - } = fork_choice_v11.fork_choice_store; - - if !equivocating_indices.is_empty() { - warn!( - log, - "Deleting slashed validators from fork choice store"; - "count" => equivocating_indices.len(), - "message" => "this may make your node more susceptible to following the wrong chain", - ); - } - - PersistedForkChoiceV10 { - fork_choice: fork_choice_v11.fork_choice, - fork_choice_store: PersistedForkChoiceStoreV10 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - }, - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs deleted file mode 100644 index 231da838cdc..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs +++ /dev/null @@ -1,28 +0,0 @@ -///! These functions and structs are only relevant to the database migration from schema 5 to 6. -use crate::persisted_fork_choice::PersistedForkChoiceV1; -use crate::schema_change::types::{SszContainerV1, SszContainerV6}; -use crate::BeaconChainTypes; -use ssz::four_byte_option_impl; -use ssz::{Decode, Encode}; - -// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_usize, usize); - -pub(crate) fn update_execution_statuses( - persisted_fork_choice: &mut PersistedForkChoiceV1, -) -> Result<(), String> { - let ssz_container_v1 = - SszContainerV1::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes) - .map_err(|e| { - format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - ) - })?; - - let ssz_container_v6: SszContainerV6 = ssz_container_v1.into(); - - persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v6.as_ssz_bytes(); - Ok(()) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs deleted file mode 100644 index d953d30027f..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ /dev/null @@ -1,341 +0,0 @@ -///! These functions and structs are only relevant to the database migration from schema 6 to 7. -use crate::beacon_chain::BeaconChainTypes; -use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; -use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; -use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, SszContainerV7}; -use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; -use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::ForkChoice; -use proto_array::{core::ProtoNode, core::SszContainer, CountUnrealizedFull, ProtoArrayForkChoice}; -use ssz::four_byte_option_impl; -use ssz::{Decode, Encode}; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use store::hot_cold_store::HotColdDB; -use store::iter::BlockRootsIterator; -use store::Error as StoreError; - -// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_usize, usize); - -/// This method is used to re-initialize fork choice from the finalized state in case we hit an -/// error during this migration. -pub(crate) fn update_with_reinitialized_fork_choice( - persisted_fork_choice: &mut PersistedForkChoiceV7, - db: Arc>, - spec: &ChainSpec, -) -> Result<(), String> { - let anchor_block_root = persisted_fork_choice - .fork_choice_store - .finalized_checkpoint - .root; - let anchor_block = db - .get_full_block_prior_to_v9(&anchor_block_root) - .map_err(|e| format!("{:?}", e))? - .ok_or_else(|| "Missing anchor beacon block".to_string())?; - let anchor_state = db - .get_state(&anchor_block.state_root(), Some(anchor_block.slot())) - .map_err(|e| format!("{:?}", e))? - .ok_or_else(|| "Missing anchor beacon state".to_string())?; - let snapshot = BeaconSnapshot { - beacon_block: Arc::new(anchor_block), - beacon_block_root: anchor_block_root, - beacon_state: anchor_state, - }; - let store = BeaconForkChoiceStore::get_forkchoice_store(db, &snapshot); - let fork_choice = ForkChoice::from_anchor( - store, - anchor_block_root, - &snapshot.beacon_block, - &snapshot.beacon_state, - // Don't provide the current slot here, just use what's in the store. We don't need to know - // the head here, plus it's nice to avoid mutating fork choice during this process. - None, - // This config will get overwritten on startup. - CountUnrealizedFull::default(), - spec, - ) - .map_err(|e| format!("{:?}", e))?; - persisted_fork_choice.fork_choice = fork_choice.to_persisted(); - Ok(()) -} - -pub(crate) fn update_fork_choice( - persisted_fork_choice: &mut PersistedForkChoiceV7, - db: Arc>, -) -> Result<(), StoreError> { - // `PersistedForkChoice` stores the `ProtoArray` as a `Vec`. Deserialize these - // bytes assuming the legacy struct, and transform them to the new struct before - // re-serializing. - let ssz_container_v6 = - SszContainerV6::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes) - .map_err(|e| { - StoreError::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - // Clone the V6 proto nodes in order to maintain information about `node.justified_epoch` - // and `node.finalized_epoch`. - let nodes_v6 = ssz_container_v6.nodes.clone(); - - let justified_checkpoint = persisted_fork_choice.fork_choice_store.justified_checkpoint; - let finalized_checkpoint = persisted_fork_choice.fork_choice_store.finalized_checkpoint; - - // These transformations instantiate `node.justified_checkpoint` and `node.finalized_checkpoint` - // to `None`. - let ssz_container_v7: SszContainerV7 = - ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint); - let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); - let ssz_container: SszContainer = ssz_container_v10.into(); - // `CountUnrealizedFull::default()` represents the count-unrealized-full config which will be overwritten on startup. - let mut fork_choice: ProtoArrayForkChoice = - (ssz_container, CountUnrealizedFull::default()).into(); - - update_checkpoints::(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db) - .map_err(StoreError::SchemaMigrationError)?; - - // Update the justified checkpoint in the store in case we have a discrepancy - // between the store and the proto array nodes. - update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice) - .map_err(StoreError::SchemaMigrationError)?; - - // Need to downgrade the SSZ container to V7 so that all migrations can be applied in sequence. - let ssz_container = SszContainer::from(&fork_choice); - let ssz_container_v7 = SszContainerV7::from(ssz_container); - - persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); - persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint; - - Ok(()) -} - -struct HeadInfo { - index: usize, - root: Hash256, - slot: Slot, -} - -fn update_checkpoints( - finalized_root: Hash256, - nodes_v6: &[ProtoNodeV6], - fork_choice: &mut ProtoArrayForkChoice, - db: Arc>, -) -> Result<(), String> { - let heads = find_finalized_descendant_heads(finalized_root, fork_choice); - - // For each head, first gather all epochs we will need to find justified or finalized roots for. - for head in heads { - // `relevant_epochs` are epochs for which we will need to find the root at the start slot. - // We don't need to worry about whether the are finalized or justified epochs. - let mut relevant_epochs = HashSet::new(); - let relevant_epoch_finder = |index, _: &mut ProtoNode| { - let (justified_epoch, finalized_epoch) = nodes_v6 - .get(index) - .map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch)) - .ok_or_else(|| "Index not found in legacy proto nodes".to_string())?; - relevant_epochs.insert(justified_epoch); - relevant_epochs.insert(finalized_epoch); - Ok(()) - }; - - apply_to_chain_of_ancestors( - finalized_root, - head.index, - fork_choice, - relevant_epoch_finder, - )?; - - // find the block roots associated with each relevant epoch. - let roots_by_epoch = - map_relevant_epochs_to_roots::(head.root, head.slot, relevant_epochs, db.clone())?; - - // Apply this mutator to the chain of descendants from this head, adding justified - // and finalized checkpoints for each. - let node_mutator = |index, node: &mut ProtoNode| { - let (justified_epoch, finalized_epoch) = nodes_v6 - .get(index) - .map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch)) - .ok_or_else(|| "Index not found in legacy proto nodes".to_string())?; - - // Update the checkpoints only if they haven't already been populated. - if node.justified_checkpoint.is_none() { - let justified_checkpoint = - roots_by_epoch - .get(&justified_epoch) - .map(|&root| Checkpoint { - epoch: justified_epoch, - root, - }); - node.justified_checkpoint = justified_checkpoint; - } - if node.finalized_checkpoint.is_none() { - let finalized_checkpoint = - roots_by_epoch - .get(&finalized_epoch) - .map(|&root| Checkpoint { - epoch: finalized_epoch, - root, - }); - node.finalized_checkpoint = finalized_checkpoint; - } - - Ok(()) - }; - - apply_to_chain_of_ancestors(finalized_root, head.index, fork_choice, node_mutator)?; - } - Ok(()) -} - -/// Coverts the given `HashSet` to a `Vec` then reverse sorts by `Epoch`. Next, a -/// single `BlockRootsIterator` is created which is used to iterate backwards from the given -/// `head_root` and `head_slot`, finding the block root at the start slot of each epoch. -fn map_relevant_epochs_to_roots( - head_root: Hash256, - head_slot: Slot, - epochs: HashSet, - db: Arc>, -) -> Result, String> { - // Convert the `HashSet` to a `Vec` and reverse sort the epochs. - let mut relevant_epochs = epochs.into_iter().collect::>(); - relevant_epochs.sort_unstable_by(|a, b| b.cmp(a)); - - // Iterate backwards from the given `head_root` and `head_slot` and find the block root at each epoch. - let mut iter = std::iter::once(Ok((head_root, head_slot))) - .chain(BlockRootsIterator::from_block(&db, head_root).map_err(|e| format!("{:?}", e))?); - let mut roots_by_epoch = HashMap::new(); - for epoch in relevant_epochs { - let start_slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); - - let root = iter - .find_map(|next| match next { - Ok((root, slot)) => (slot == start_slot).then_some(Ok(root)), - Err(e) => Some(Err(format!("{:?}", e))), - }) - .transpose()? - .ok_or_else(|| "Justified root not found".to_string())?; - roots_by_epoch.insert(epoch, root); - } - Ok(roots_by_epoch) -} - -/// Applies a mutator to every node in a chain, starting from the node at the given -/// `head_index` and iterating through ancestors until the `finalized_root` is reached. -fn apply_to_chain_of_ancestors( - finalized_root: Hash256, - head_index: usize, - fork_choice: &mut ProtoArrayForkChoice, - mut node_mutator: F, -) -> Result<(), String> -where - F: FnMut(usize, &mut ProtoNode) -> Result<(), String>, -{ - let head = fork_choice - .core_proto_array_mut() - .nodes - .get_mut(head_index) - .ok_or_else(|| "Head index not found in proto nodes".to_string())?; - - node_mutator(head_index, head)?; - - let mut parent_index_opt = head.parent; - let mut parent_opt = - parent_index_opt.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index)); - - // Iterate backwards through all parents until there is no reference to a parent or we reach - // the `finalized_root` node. - while let (Some(parent), Some(parent_index)) = (parent_opt, parent_index_opt) { - node_mutator(parent_index, parent)?; - - // Break out of this while loop *after* the `node_mutator` has been applied to the finalized - // node. - if parent.root == finalized_root { - break; - } - - // Update parent values - parent_index_opt = parent.parent; - parent_opt = parent_index_opt - .and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index)); - } - Ok(()) -} - -/// Finds all heads by finding all nodes in the proto array that are not referenced as parents. Then -/// checks that these nodes are descendants of the finalized root in order to determine if they are -/// relevant. -fn find_finalized_descendant_heads( - finalized_root: Hash256, - fork_choice: &ProtoArrayForkChoice, -) -> Vec { - let nodes_referenced_as_parents: HashSet = fork_choice - .core_proto_array() - .nodes - .iter() - .filter_map(|node| node.parent) - .collect::>(); - - fork_choice - .core_proto_array() - .nodes - .iter() - .enumerate() - .filter_map(|(index, node)| { - (!nodes_referenced_as_parents.contains(&index) - && fork_choice.is_descendant(finalized_root, node.root)) - .then_some(HeadInfo { - index, - root: node.root, - slot: node.slot, - }) - }) - .collect::>() -} - -fn update_store_justified_checkpoint( - persisted_fork_choice: &mut PersistedForkChoiceV7, - fork_choice: &mut ProtoArrayForkChoice, -) -> Result<(), String> { - let justified_checkpoint = fork_choice - .core_proto_array() - .nodes - .iter() - .filter_map(|node| { - (node.finalized_checkpoint - == Some(persisted_fork_choice.fork_choice_store.finalized_checkpoint)) - .then_some(node.justified_checkpoint) - .flatten() - }) - .max_by_key(|justified_checkpoint| justified_checkpoint.epoch) - .ok_or("Proto node with current finalized checkpoint not found")?; - - fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint; - Ok(()) -} - -// Add a zero `proposer_boost_root` when migrating from V1-6 to V7. -impl From for PersistedForkChoiceStoreV7 { - fn from(other: PersistedForkChoiceStoreV1) -> Self { - Self { - balances_cache: other.balances_cache, - time: other.time, - finalized_checkpoint: other.finalized_checkpoint, - justified_checkpoint: other.justified_checkpoint, - justified_balances: other.justified_balances, - best_justified_checkpoint: other.best_justified_checkpoint, - proposer_boost_root: Hash256::zero(), - } - } -} - -impl From for PersistedForkChoiceV7 { - fn from(other: PersistedForkChoiceV1) -> Self { - Self { - fork_choice: other.fork_choice, - fork_choice_store: other.fork_choice_store.into(), - } - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs deleted file mode 100644 index ef3f7857f9a..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs +++ /dev/null @@ -1,50 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use crate::beacon_fork_choice_store::{ - BalancesCacheV8, CacheItemV8, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, -}; -use crate::persisted_fork_choice::{PersistedForkChoiceV7, PersistedForkChoiceV8}; -use std::sync::Arc; -use store::{Error as StoreError, HotColdDB}; -use types::EthSpec; - -pub fn update_fork_choice( - fork_choice: PersistedForkChoiceV7, - db: Arc>, -) -> Result { - let PersistedForkChoiceStoreV7 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - proposer_boost_root, - } = fork_choice.fork_choice_store; - let mut fork_choice_store = PersistedForkChoiceStoreV8 { - balances_cache: BalancesCacheV8::default(), - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - proposer_boost_root, - }; - - // Add epochs to the balances cache. It's safe to just use the block's epoch because - // before schema v8 the cache would always miss on skipped slots. - for item in balances_cache.items { - // Drop any blocks that aren't found, they're presumably too old and this is only a cache. - if let Some(block) = db.get_full_block_prior_to_v9(&item.block_root)? { - fork_choice_store.balances_cache.items.push(CacheItemV8 { - block_root: item.block_root, - epoch: block.slot().epoch(T::EthSpec::slots_per_epoch()), - balances: item.balances, - }); - } - } - - Ok(PersistedForkChoiceV8 { - fork_choice: fork_choice.fork_choice, - fork_choice_store, - }) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs deleted file mode 100644 index e2c48d5c89d..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs +++ /dev/null @@ -1,176 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use slog::{debug, error, info, Logger}; -use slot_clock::SlotClock; -use std::sync::Arc; -use std::time::Duration; -use store::{DBColumn, Error, HotColdDB, KeyValueStore}; -use types::{EthSpec, Hash256, Slot}; - -const OPS_PER_BLOCK_WRITE: usize = 2; - -/// The slot clock isn't usually available before the database is initialized, so we construct a -/// temporary slot clock by reading the genesis state. It should always exist if the database is -/// initialized at a prior schema version, however we still handle the lack of genesis state -/// gracefully. -fn get_slot_clock( - db: &HotColdDB, - log: &Logger, -) -> Result, Error> { - // At schema v8 the genesis block must be a *full* block (with payload). In all likeliness it - // actually has no payload. - let spec = db.get_chain_spec(); - let genesis_block = if let Some(block) = db.get_full_block_prior_to_v9(&Hash256::zero())? { - block - } else { - error!(log, "Missing genesis block"); - return Ok(None); - }; - let genesis_state = - if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { - state - } else { - error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); - return Ok(None); - }; - Ok(Some(T::SlotClock::new( - spec.genesis_slot, - Duration::from_secs(genesis_state.genesis_time()), - Duration::from_secs(spec.seconds_per_slot), - ))) -} - -pub fn upgrade_to_v9( - db: Arc>, - log: Logger, -) -> Result<(), Error> { - // This upgrade is a no-op if the Bellatrix fork epoch has not already passed. This migration - // was implemented before the activation of Bellatrix on all networks except Kiln, so the only - // users who will need to wait for the slow copying migration are Kiln users. - let slot_clock = if let Some(slot_clock) = get_slot_clock::(&db, &log)? { - slot_clock - } else { - error!( - log, - "Unable to complete migration because genesis state or genesis block is missing" - ); - return Err(Error::SlotClockUnavailableForMigration); - }; - - let current_epoch = if let Some(slot) = slot_clock.now() { - slot.epoch(T::EthSpec::slots_per_epoch()) - } else { - return Ok(()); - }; - - let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch { - fork_epoch - } else { - info!( - log, - "Upgrading database schema to v9 (no-op)"; - "info" => "To downgrade before the merge run `lighthouse db migrate`" - ); - return Ok(()); - }; - - if current_epoch >= bellatrix_fork_epoch { - info!( - log, - "Upgrading database schema to v9"; - "info" => "This will take several minutes. Each block will be read from and \ - re-written to the database. You may safely exit now (Ctrl-C) and resume \ - the migration later. Downgrading is no longer possible." - ); - - for res in db.hot_db.iter_column_keys(DBColumn::BeaconBlock) { - let block_root = res?; - let block = match db.get_full_block_prior_to_v9(&block_root) { - // A pre-v9 block is present. - Ok(Some(block)) => block, - // A block is missing. - Ok(None) => return Err(Error::BlockNotFound(block_root)), - // There was an error reading a pre-v9 block. Try reading it as a post-v9 block. - Err(_) => { - if db.try_get_full_block(&block_root)?.is_some() { - // The block is present as a post-v9 block, assume that it was already - // correctly migrated. - continue; - } else { - // This scenario should not be encountered since a prior check has ensured - // that this block exists. - return Err(Error::V9MigrationFailure(block_root)); - } - } - }; - - if block.message().execution_payload().is_ok() { - // Overwrite block with blinded block and store execution payload separately. - debug!( - log, - "Rewriting Bellatrix block"; - "block_root" => ?block_root, - ); - - let mut kv_batch = Vec::with_capacity(OPS_PER_BLOCK_WRITE); - db.block_as_kv_store_ops(&block_root, block, &mut kv_batch)?; - db.hot_db.do_atomically(kv_batch)?; - } - } - } else { - info!( - log, - "Upgrading database schema to v9 (no-op)"; - "info" => "To downgrade before the merge run `lighthouse db migrate`" - ); - } - - Ok(()) -} - -// This downgrade is conditional and will only succeed if the Bellatrix fork epoch hasn't been -// reached. -pub fn downgrade_from_v9( - db: Arc>, - log: Logger, -) -> Result<(), Error> { - let slot_clock = if let Some(slot_clock) = get_slot_clock::(&db, &log)? { - slot_clock - } else { - error!( - log, - "Unable to complete migration because genesis state or genesis block is missing" - ); - return Err(Error::SlotClockUnavailableForMigration); - }; - - let current_epoch = if let Some(slot) = slot_clock.now() { - slot.epoch(T::EthSpec::slots_per_epoch()) - } else { - return Ok(()); - }; - - let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch { - fork_epoch - } else { - info!( - log, - "Downgrading database schema from v9"; - "info" => "You need to upgrade to v9 again before the merge" - ); - return Ok(()); - }; - - if current_epoch >= bellatrix_fork_epoch { - error!( - log, - "Downgrading from schema v9 after the Bellatrix fork epoch is not supported"; - "current_epoch" => current_epoch, - "bellatrix_fork_epoch" => bellatrix_fork_epoch, - "reason" => "You need a v9 schema database to run on a merged version of Prater or \ - mainnet. On Kiln, you have to re-sync", - ); - Err(Error::ResyncRequiredForExecutionPayloadSeparation) - } else { - Ok(()) - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/types.rs b/beacon_node/beacon_chain/src/schema_change/types.rs deleted file mode 100644 index 02a54c1a3f8..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/types.rs +++ /dev/null @@ -1,315 +0,0 @@ -use crate::types::{AttestationShufflingId, Checkpoint, Epoch, Hash256, Slot}; -use proto_array::core::{ProposerBoost, ProtoNode, SszContainer, VoteTracker}; -use proto_array::ExecutionStatus; -use ssz::four_byte_option_impl; -use ssz::Encode; -use ssz_derive::{Decode, Encode}; -use superstruct::superstruct; - -// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_usize, usize); -four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); - -#[superstruct( - variants(V1, V6, V7, V10), - variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)), - no_enum -)] -pub struct ProtoNode { - pub slot: Slot, - pub state_root: Hash256, - pub target_root: Hash256, - pub current_epoch_shuffling_id: AttestationShufflingId, - pub next_epoch_shuffling_id: AttestationShufflingId, - pub root: Hash256, - #[ssz(with = "four_byte_option_usize")] - pub parent: Option, - #[superstruct(only(V1, V6))] - pub justified_epoch: Epoch, - #[superstruct(only(V1, V6))] - pub finalized_epoch: Epoch, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7, V10))] - pub justified_checkpoint: Option, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7, V10))] - pub finalized_checkpoint: Option, - pub weight: u64, - #[ssz(with = "four_byte_option_usize")] - pub best_child: Option, - #[ssz(with = "four_byte_option_usize")] - pub best_descendant: Option, - #[superstruct(only(V6, V7, V10))] - pub execution_status: ExecutionStatus, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V10))] - pub unrealized_justified_checkpoint: Option, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V10))] - pub unrealized_finalized_checkpoint: Option, -} - -impl Into for ProtoNodeV1 { - fn into(self) -> ProtoNodeV6 { - ProtoNodeV6 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_epoch: self.justified_epoch, - finalized_epoch: self.finalized_epoch, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - // We set the following execution value as if the block is a pre-merge-fork block. This - // is safe as long as we never import a merge block with the old version of proto-array. - // This will be safe since we can't actually process merge blocks until we've made this - // change to fork choice. - execution_status: ExecutionStatus::irrelevant(), - } - } -} - -impl Into for ProtoNodeV6 { - fn into(self) -> ProtoNodeV7 { - ProtoNodeV7 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: None, - finalized_checkpoint: None, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - } - } -} - -impl Into for ProtoNodeV7 { - fn into(self) -> ProtoNodeV10 { - ProtoNodeV10 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - unrealized_justified_checkpoint: None, - unrealized_finalized_checkpoint: None, - } - } -} - -impl Into for ProtoNodeV10 { - fn into(self) -> ProtoNodeV7 { - ProtoNodeV7 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - } - } -} - -impl Into for ProtoNodeV10 { - fn into(self) -> ProtoNode { - ProtoNode { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, - } - } -} - -impl From for ProtoNodeV7 { - fn from(container: ProtoNode) -> Self { - Self { - slot: container.slot, - state_root: container.state_root, - target_root: container.target_root, - current_epoch_shuffling_id: container.current_epoch_shuffling_id, - next_epoch_shuffling_id: container.next_epoch_shuffling_id, - root: container.root, - parent: container.parent, - justified_checkpoint: container.justified_checkpoint, - finalized_checkpoint: container.finalized_checkpoint, - weight: container.weight, - best_child: container.best_child, - best_descendant: container.best_descendant, - execution_status: container.execution_status, - } - } -} - -#[superstruct( - variants(V1, V6, V7, V10), - variant_attributes(derive(Encode, Decode)), - no_enum -)] -#[derive(Encode, Decode)] -pub struct SszContainer { - pub votes: Vec, - pub balances: Vec, - pub prune_threshold: usize, - #[superstruct(only(V1, V6))] - pub justified_epoch: Epoch, - #[superstruct(only(V1, V6))] - pub finalized_epoch: Epoch, - #[superstruct(only(V7, V10))] - pub justified_checkpoint: Checkpoint, - #[superstruct(only(V7, V10))] - pub finalized_checkpoint: Checkpoint, - #[superstruct(only(V1))] - pub nodes: Vec, - #[superstruct(only(V6))] - pub nodes: Vec, - #[superstruct(only(V7))] - pub nodes: Vec, - #[superstruct(only(V10))] - pub nodes: Vec, - pub indices: Vec<(Hash256, usize)>, - #[superstruct(only(V7, V10))] - pub previous_proposer_boost: ProposerBoost, -} - -impl Into for SszContainerV1 { - fn into(self) -> SszContainerV6 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV6 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_epoch: self.justified_epoch, - finalized_epoch: self.finalized_epoch, - nodes, - indices: self.indices, - } - } -} - -impl SszContainerV6 { - pub(crate) fn into_ssz_container_v7( - self, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, - ) -> SszContainerV7 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV7 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint, - finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: ProposerBoost::default(), - } - } -} - -impl Into for SszContainerV7 { - fn into(self) -> SszContainerV10 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV10 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - } - } -} - -impl Into for SszContainerV10 { - fn into(self) -> SszContainerV7 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV7 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - } - } -} - -impl Into for SszContainerV10 { - fn into(self) -> SszContainer { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainer { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - } - } -} - -impl From for SszContainerV7 { - fn from(container: SszContainer) -> Self { - let nodes = container.nodes.into_iter().map(Into::into).collect(); - - Self { - votes: container.votes, - balances: container.balances, - prune_threshold: container.prune_threshold, - justified_checkpoint: container.justified_checkpoint, - finalized_checkpoint: container.finalized_checkpoint, - nodes, - indices: container.indices, - previous_proposer_boost: container.previous_proposer_boost, - } - } -} diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 33447bc2efd..40b73451cb0 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -298,27 +298,6 @@ impl SnapshotCache { }) } - /// Borrow the state corresponding to `block_root` if it exists in the cache *unadvanced*. - /// - /// Care must be taken not to mutate the state in an invalid way. This function should only - /// be used to mutate the *caches* of the state, for example the tree hash cache when - /// calculating a light client merkle proof. - pub fn borrow_unadvanced_state_mut( - &mut self, - block_root: Hash256, - ) -> Option<&mut BeaconState> { - self.snapshots - .iter_mut() - .find(|snapshot| { - // If the pre-state exists then state advance has already taken the state for - // `block_root` and mutated its tree hash cache. Rather than re-building it while - // holding the snapshot cache lock (>1 second), prefer to return `None` from this - // function and force the caller to load it from disk. - snapshot.beacon_block_root == block_root && snapshot.pre_state.is_none() - }) - .map(|snapshot| &mut snapshot.beacon_state) - } - /// If there is a snapshot with `block_root`, clone it and return the clone. pub fn get_cloned( &self, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index aff2be4cf14..4f63f4e7f97 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -386,16 +386,6 @@ impl, Cold: ItemStore> HotColdDB } } - /// Get a schema V8 or earlier full block by reading it and its payload from disk. - pub fn get_full_block_prior_to_v9( - &self, - block_root: &Hash256, - ) -> Result>, Error> { - self.get_block_with(block_root, |bytes| { - SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec) - }) - } - /// Convert a blinded block into a full block by loading its execution payload if necessary. pub fn make_full_block( &self, diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 2b0ac836a4e..0982e10ab90 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -19,13 +19,13 @@ validator client or the slasher**. | v2.0.0 | Oct 2021 | v5 | no | | v2.1.0 | Jan 2022 | v8 | no | | v2.2.0 | Apr 2022 | v8 | no | -| v2.3.0 | May 2022 | v9 | yes (pre Bellatrix) | -| v2.4.0 | Jul 2022 | v9 | yes (pre Bellatrix) | +| v2.3.0 | May 2022 | v9 | yes from <= v3.3.0 | +| v2.4.0 | Jul 2022 | v9 | yes from <= v3.3.0 | | v2.5.0 | Aug 2022 | v11 | yes | | v3.0.0 | Aug 2022 | v11 | yes | | v3.1.0 | Sep 2022 | v12 | yes | | v3.2.0 | Oct 2022 | v12 | yes | -| v3.3.0 | TBD | v13 | yes | +| v3.3.0 | Nov 2022 | v13 | yes | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). From 8cb9b5e126f851139a1afcbe1a654f40f0fbde04 Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 9 Dec 2022 06:39:19 +0000 Subject: [PATCH 03/17] Expose certain `validator_monitor` metrics to the HTTP API (#3760) ## Issue Addressed #3724 ## Proposed Changes Exposes certain `validator_monitor` as an endpoint on the HTTP API. Will only return metrics for validators which are actively being monitored. ### Usage ```bash curl -X GET "http://localhost:5052/lighthouse/ui/validator_metrics" -H "accept: application/json" | jq ``` ```json { "data": { "validators": { "12345": { "attestation_hits": 10, "attestation_misses": 0, "attestation_hit_percentage": 100, "attestation_head_hits": 10, "attestation_head_misses": 0, "attestation_head_hit_percentage": 100, "attestation_target_hits": 5, "attestation_target_misses": 5, "attestation_target_hit_percentage": 50 } } } } ``` ## Additional Info Based on #3756 which should be merged first. --- beacon_node/beacon_chain/src/lib.rs | 2 +- .../beacon_chain/src/validator_monitor.rs | 8 ++ beacon_node/http_api/src/lib.rs | 21 ++- beacon_node/http_api/src/ui.rs | 128 +++++++++++++++++- book/src/api-lighthouse.md | 27 ++++ 5 files changed, 181 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 3889fe4aa53..fd1c1cceb1f 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -22,7 +22,7 @@ pub mod fork_revert; mod head_tracker; pub mod historical_blocks; pub mod merge_readiness; -mod metrics; +pub mod metrics; pub mod migrate; mod naive_aggregation_pool; mod observed_aggregates; diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index f9203f74bf3..c99f85639cb 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -629,6 +629,14 @@ impl ValidatorMonitor { self.validators.len() } + // Return the `id`'s of all monitored validators. + pub fn get_all_monitored_validators(&self) -> Vec { + self.validators + .iter() + .map(|(_, val)| val.id.clone()) + .collect() + } + /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. /// Otherwise, do nothing. pub fn auto_register_local_validator(&mut self, validator_index: u64) { diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 645c4ccfaba..b018f9c7375 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2899,6 +2899,22 @@ pub fn serve( }) }); + // POST lighthouse/ui/validator_metrics + let post_lighthouse_ui_validator_metrics = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("validator_metrics")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and_then( + |request_data: ui::ValidatorMetricsRequestData, chain: Arc>| { + blocking_json_task(move || { + ui::post_validator_monitor_metrics(request_data, chain) + .map(api_types::GenericResponse::from) + }) + }, + ); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -3349,6 +3365,7 @@ pub fn serve( .or(get_validator_sync_committee_contribution.boxed()) .or(get_lighthouse_health.boxed()) .or(get_lighthouse_ui_health.boxed()) + .or(get_lighthouse_ui_validator_count.boxed()) .or(get_lighthouse_syncing.boxed()) .or(get_lighthouse_nat.boxed()) .or(get_lighthouse_peers.boxed()) @@ -3366,7 +3383,6 @@ pub fn serve( .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) .or(get_lighthouse_merge_readiness.boxed()) - .or(get_lighthouse_ui_validator_count.boxed()) .or(get_events.boxed()), ) .boxed() @@ -3390,7 +3406,8 @@ pub fn serve( .or(post_lighthouse_liveness.boxed()) .or(post_lighthouse_database_reconstruct.boxed()) .or(post_lighthouse_database_historical_blocks.boxed()) - .or(post_lighthouse_block_rewards.boxed()), + .or(post_lighthouse_block_rewards.boxed()) + .or(post_lighthouse_ui_validator_metrics.boxed()), )) .recover(warp_utils::reject::handle_rejection) .with(slog_logging(log.clone())) diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index 8f9400dbbd0..a5b3a8b2f2e 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -1,10 +1,11 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::ValidatorStatus; use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use warp_utils::reject::beacon_chain_error; -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorCountResponse { pub active_ongoing: u64, pub active_exiting: u64, @@ -69,3 +70,126 @@ pub fn get_validator_count( exited_slashed, }) } + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorMetricsRequestData { + indices: Vec, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorMetrics { + attestation_hits: u64, + attestation_misses: u64, + attestation_hit_percentage: f64, + attestation_head_hits: u64, + attestation_head_misses: u64, + attestation_head_hit_percentage: f64, + attestation_target_hits: u64, + attestation_target_misses: u64, + attestation_target_hit_percentage: f64, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorMetricsResponse { + validators: HashMap, +} + +pub fn post_validator_monitor_metrics( + request_data: ValidatorMetricsRequestData, + chain: Arc>, +) -> Result { + let validator_ids = chain + .validator_monitor + .read() + .get_all_monitored_validators() + .iter() + .cloned() + .collect::>(); + + let indices = request_data + .indices + .iter() + .map(|index| index.to_string()) + .collect::>(); + + let ids = validator_ids + .intersection(&indices) + .collect::>(); + + let mut validators = HashMap::new(); + + for id in ids { + let attestation_hits = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let attestation_misses = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let attestations = attestation_hits + attestation_misses; + let attestation_hit_percentage: f64 = if attestations == 0 { + 0.0 + } else { + (100 * attestation_hits / attestations) as f64 + }; + + let attestation_head_hits = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let attestation_head_misses = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let head_attestations = attestation_head_hits + attestation_head_misses; + let attestation_head_hit_percentage: f64 = if head_attestations == 0 { + 0.0 + } else { + (100 * attestation_head_hits / head_attestations) as f64 + }; + + let attestation_target_hits = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let attestation_target_misses = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let target_attestations = attestation_target_hits + attestation_target_misses; + let attestation_target_hit_percentage: f64 = if target_attestations == 0 { + 0.0 + } else { + (100 * attestation_target_hits / target_attestations) as f64 + }; + + let metrics = ValidatorMetrics { + attestation_hits, + attestation_misses, + attestation_hit_percentage, + attestation_head_hits, + attestation_head_misses, + attestation_head_hit_percentage, + attestation_target_hits, + attestation_target_misses, + attestation_target_hit_percentage, + }; + + validators.insert(id.clone(), metrics); + } + + Ok(ValidatorMetricsResponse { validators }) +} diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 763372692ee..2b7239361a4 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -121,6 +121,33 @@ curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: ap } ``` +### `/lighthouse/ui/validator_metrics` +Re-exposes certain metrics from the validator monitor to the HTTP API. +Will only return metrics for the validators currently being monitored and are present in the POST data. +```bash +curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indices": [12345]}' -H "Content-Type: application/json" | jq +``` + +```json +{ + "data": { + "validators": { + "12345": { + "attestation_hits": 10, + "attestation_misses": 0, + "attestation_hit_percentage": 100, + "attestation_head_hits": 10, + "attestation_head_misses": 0, + "attestation_head_hit_percentage": 100, + "attestation_target_hits": 5, + "attestation_target_misses": 5, + "attestation_target_hit_percentage": 50 + } + } + } +} +``` + ### `/lighthouse/syncing` ```bash From 80dd615fff736ae87c4c3979edaa9e019cd9b70c Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 9 Dec 2022 09:20:10 +0000 Subject: [PATCH 04/17] Update book with missing Lighthouse endpoints (#3769) ## Proposed Changes Adds docs for the following endpoints: - `/lighthouse/analysis/attestation_performance` - `/lighthouse/analysis/block_packing_efficiency` --- book/src/api-lighthouse.md | 144 +++++++++++++++++++++++++++++++++---- book/src/api.md | 2 +- 2 files changed, 132 insertions(+), 14 deletions(-) diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 2b7239361a4..05cb0b69cf8 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -501,6 +501,102 @@ The endpoint will return immediately. See the beacon node logs for an indication Manually provide `SignedBeaconBlock`s to backfill the database. This is intended for use by Lighthouse developers during testing only. +### `/lighthouse/merge_readiness` + +```bash +curl -X GET "http://localhost:5052/lighthouse/merge_readiness" | jq +``` + +``` +{ + "data":{ + "type":"ready", + "config":{ + "terminal_total_difficulty":"6400" + }, + "current_difficulty":"4800" + } + } +``` + +### `/lighthouse/analysis/attestation_performance/{index}` + +Fetch information about the attestation performance of a validator index or all validators for a +range of consecutive epochs. + +Two query parameters are required: + +* `start_epoch` (inclusive): the first epoch to compute attestation performance for. +* `end_epoch` (inclusive): the final epoch to compute attestation performance for. + +Example: + +```bash +curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/1?start_epoch=1&end_epoch=1" | jq +``` + +```json +[ + { + "index": 1, + "epochs": { + "1": { + "active": true, + "head": true, + "target": true, + "source": true, + "delay": 1 + } + } + } +] +``` + +Instead of specifying a validator index, you can specify the entire validator set by using `global`: + +```bash +curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/global?start_epoch=1&end_epoch=1" | jq +``` + +```json +[ + { + "index": 0, + "epochs": { + "1": { + "active": true, + "head": true, + "target": true, + "source": true, + "delay": 1 + } + } + }, + { + "index": 1, + "epochs": { + "1": { + "active": true, + "head": true, + "target": true, + "source": true, + "delay": 1 + } + } + }, + { + .. + } +] + +``` + +Caveats: + +* For maximum efficiency the start_epoch should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. + This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, + and loading a state on a boundary is most efficient. + ### `/lighthouse/analysis/block_rewards` Fetch information about the block rewards paid to proposers for a range of consecutive blocks. @@ -513,7 +609,7 @@ Two query parameters are required: Example: ```bash -curl "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=32" | jq +curl -X GET "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=32" | jq ``` ```json @@ -541,21 +637,43 @@ Caveats: [block_reward_src]: https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs +### `/lighthouse/analysis/block_packing` -### `/lighthouse/merge_readiness` +Fetch information about the block packing efficiency of blocks for a range of consecutive +epochs. + +Two query parameters are required: + +* `start_epoch` (inclusive): the epoch of the first block to compute packing efficiency for. +* `end_epoch` (inclusive): the epoch of the last block to compute packing efficiency for. ```bash -curl -X GET "http://localhost:5052/lighthouse/merge_readiness" +curl -X GET "http://localhost:5052/lighthouse/analysis/block_packing_efficiency?start_epoch=1&end_epoch=1" | jq ``` +```json +[ + { + "slot": "33", + "block_hash": "0xb20970bb97c6c6de6b1e2b689d6381dd15b3d3518fbaee032229495f963bd5da", + "proposer_info": { + "validator_index": 855, + "graffiti": "poapZoJ7zWNfK7F3nWjEausWVBvKa6gA" + }, + "available_attestations": 3805, + "included_attestations": 1143, + "prior_skip_slots": 1 + }, + { + .. + } +] ``` -{ - "data":{ - "type":"ready", - "config":{ - "terminal_total_difficulty":"6400" - }, - "current_difficulty":"4800" - } - } -``` + +Caveats: + +* `start_epoch` must not be `0`. +* For maximum efficiency the `start_epoch` should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. + This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, and + loading a state on a boundary is most efficient. + diff --git a/book/src/api.md b/book/src/api.md index f8c54ad9a91..5837ad9654a 100644 --- a/book/src/api.md +++ b/book/src/api.md @@ -6,4 +6,4 @@ RESTful HTTP/JSON APIs. There are two APIs served by Lighthouse: - [Beacon Node API](./api-bn.md) -- [Validator Client API](./api-vc.md) (not yet released). +- [Validator Client API](./api-vc.md) From 979b73c9b62832b0be42f917bdd15378df00562c Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 9 Dec 2022 09:20:13 +0000 Subject: [PATCH 05/17] Add API endpoint to get VC graffiti (#3779) ## Issue Addressed #3766 ## Proposed Changes Adds an endpoint to get the graffiti that will be used for the next block proposal for each validator. ## Usage ```bash curl -H "Authorization: Bearer api-token" http://localhost:9095/lighthouse/ui/graffiti | jq ``` ```json { "data": { "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e": "mr f was here", "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b": "mr v was here", "0x872c61b4a7f8510ec809e5b023f5fdda2105d024c470ddbbeca4bc74e8280af0d178d749853e8f6a841083ac1b4db98f": null } } ``` ## Additional Info This will only return graffiti that the validator client knows about. That is from these 3 sources: 1. Graffiti File 2. validator_definitions.yml 3. The `--graffiti` flag on the VC If the graffiti is set on the BN, it will not be returned. This may warrant an additional endpoint on the BN side which can be used in the event the endpoint returns `null`. --- book/src/api-vc-endpoints.md | 25 ++++++++++ validator_client/src/block_service.rs | 20 +++----- validator_client/src/http_api/mod.rs | 50 ++++++++++++++++++- validator_client/src/http_api/tests.rs | 2 + .../src/initialized_validators.rs | 9 ++++ validator_client/src/lib.rs | 28 ++++++++++- 6 files changed, 118 insertions(+), 16 deletions(-) diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 76cffc0e4f5..80a14ae7710 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -117,6 +117,31 @@ Returns information regarding the health of the host machine. } ``` +## `GET /lighthouse/ui/graffiti` + +Returns the graffiti that will be used for the next block proposal of each validator. + +### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/ui/graffiti` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | + +### Example Response Body + +```json +{ + "data": { + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e": "mr f was here", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b": "mr v was here", + "0x872c61b4a7f8510ec809e5b023f5fdda2105d024c470ddbbeca4bc74e8280af0d178d749853e8f6a841083ac1b4db98f": null + } +} +``` + ## `GET /lighthouse/spec` Returns the Ethereum proof-of-stake consensus specification loaded for this validator. diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index b0b69a4f50d..f0d2c9081f0 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,6 +1,7 @@ use crate::beacon_node_fallback::{Error as FallbackError, Errors}; use crate::{ beacon_node_fallback::{BeaconNodeFallback, RequireSynced}, + determine_graffiti, graffiti_file::GraffitiFile, OfflineOnFailure, }; @@ -298,18 +299,13 @@ impl BlockService { })? .into(); - let graffiti = self - .graffiti_file - .clone() - .and_then(|mut g| match g.load_graffiti(&validator_pubkey) { - Ok(g) => g, - Err(e) => { - warn!(log, "Failed to read graffiti file"; "error" => ?e); - None - } - }) - .or_else(|| self.validator_store.graffiti(&validator_pubkey)) - .or(self.graffiti); + let graffiti = determine_graffiti( + &validator_pubkey, + log, + self.graffiti_file.clone(), + self.validator_store.graffiti(&validator_pubkey), + self.graffiti, + ); let randao_reveal_ref = &randao_reveal; let self_ref = &self; diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index df5d0c606e9..600e7a4c683 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -4,7 +4,7 @@ mod keystores; mod remotekeys; mod tests; -use crate::ValidatorStore; +use crate::{determine_graffiti, GraffitiFile, ValidatorStore}; use account_utils::{ mnemonic_from_phrase, validator_definitions::{SigningDefinition, ValidatorDefinition, Web3SignerDefinition}, @@ -13,13 +13,14 @@ pub use api_secret::ApiSecret; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, - types::{self as api_types, GenericResponse, PublicKey, PublicKeyBytes}, + types::{self as api_types, GenericResponse, Graffiti, PublicKey, PublicKeyBytes}, }; use lighthouse_version::version_with_platform; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, info, warn, Logger}; use slot_clock::SlotClock; +use std::collections::HashMap; use std::future::Future; use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; @@ -65,6 +66,8 @@ pub struct Context { pub api_secret: ApiSecret, pub validator_store: Option>>, pub validator_dir: Option, + pub graffiti_file: Option, + pub graffiti_flag: Option, pub spec: ChainSpec, pub config: Config, pub log: Logger, @@ -177,6 +180,12 @@ pub fn serve( }) }); + let inner_graffiti_file = ctx.graffiti_file.clone(); + let graffiti_file_filter = warp::any().map(move || inner_graffiti_file.clone()); + + let inner_graffiti_flag = ctx.graffiti_flag; + let graffiti_flag_filter = warp::any().map(move || inner_graffiti_flag); + let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); @@ -329,6 +338,42 @@ pub fn serve( }) }); + let get_lighthouse_ui_graffiti = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("graffiti")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(graffiti_file_filter) + .and(graffiti_flag_filter) + .and(signer.clone()) + .and(log_filter.clone()) + .and_then( + |validator_store: Arc>, + graffiti_file: Option, + graffiti_flag: Option, + signer, + log| { + blocking_signed_json_task(signer, move || { + let mut result = HashMap::new(); + for (key, graffiti_definition) in validator_store + .initialized_validators() + .read() + .get_all_validators_graffiti() + { + let graffiti = determine_graffiti( + key, + &log, + graffiti_file.clone(), + graffiti_definition, + graffiti_flag, + ); + result.insert(key.to_string(), graffiti.map(|g| g.as_utf8_lossy())); + } + Ok(api_types::GenericResponse::from(result)) + }) + }, + ); + // POST lighthouse/validators/ let post_validators = warp::path("lighthouse") .and(warp::path("validators")) @@ -945,6 +990,7 @@ pub fn serve( .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) .or(get_lighthouse_ui_health) + .or(get_lighthouse_ui_graffiti) .or(get_fee_recipient) .or(get_gas_limit) .or(get_std_keystores) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index b121dda5b1a..5aa24a2b022 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -120,6 +120,8 @@ impl ApiTester { api_secret, validator_dir: Some(validator_dir.path().into()), validator_store: Some(validator_store.clone()), + graffiti_file: None, + graffiti_flag: Some(Graffiti::default()), spec: E::default_spec(), config: HttpConfig { enabled: true, diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 8d9fbe281fc..e8fe6ff2ff9 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -634,6 +634,15 @@ impl InitializedValidators { self.validators.get(public_key).and_then(|v| v.graffiti) } + /// Returns a `HashMap` of `public_key` -> `graffiti` for all initialized validators. + pub fn get_all_validators_graffiti(&self) -> HashMap<&PublicKeyBytes, Option> { + let mut result = HashMap::new(); + for public_key in self.validators.keys() { + result.insert(public_key, self.graffiti(public_key)); + } + result + } + /// Returns the `suggested_fee_recipient` for a given public key specified in the /// `ValidatorDefinitions`. pub fn suggested_fee_recipient(&self, public_key: &PublicKeyBytes) -> Option
{ diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 1f869562d19..819efec93c9 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -30,13 +30,14 @@ use crate::beacon_node_fallback::{ RequireSynced, }; use crate::doppelganger_service::DoppelgangerService; +use crate::graffiti_file::GraffitiFile; use account_utils::validator_definitions::ValidatorDefinitions; use attestation_service::{AttestationService, AttestationServiceBuilder}; use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; use duties_service::DutiesService; use environment::RuntimeContext; -use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Timeouts}; +use eth2::{reqwest::ClientBuilder, types::Graffiti, BeaconNodeHttpClient, StatusCode, Timeouts}; use http_api::ApiSecret; use notifier::spawn_notifier; use parking_lot::RwLock; @@ -57,7 +58,7 @@ use tokio::{ sync::mpsc, time::{sleep, Duration}, }; -use types::{EthSpec, Hash256}; +use types::{EthSpec, Hash256, PublicKeyBytes}; use validator_store::ValidatorStore; /// The interval between attempts to contact the beacon node during startup. @@ -526,6 +527,8 @@ impl ProductionValidatorClient { api_secret, validator_store: Some(self.validator_store.clone()), validator_dir: Some(self.config.validator_dir.clone()), + graffiti_file: self.config.graffiti_file.clone(), + graffiti_flag: self.config.graffiti, spec: self.context.eth2_config.spec.clone(), config: self.config.http_api.clone(), log: log.clone(), @@ -726,3 +729,24 @@ pub fn load_pem_certificate>(pem_path: P) -> Result, + validator_definition_graffiti: Option, + graffiti_flag: Option, +) -> Option { + graffiti_file + .and_then(|mut g| match g.load_graffiti(validator_pubkey) { + Ok(g) => g, + Err(e) => { + warn!(log, "Failed to read graffiti file"; "error" => ?e); + None + } + }) + .or(validator_definition_graffiti) + .or(graffiti_flag) +} From c973bfc90c048def023b6203b42bfd79f9e9961c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Dec 2022 00:45:18 +0000 Subject: [PATCH 06/17] Reduce log severity for late and unrevealed blocks (#3775) ## Issue Addressed NA ## Proposed Changes In #3725 I introduced a `CRIT` log for unrevealed payloads, against @michaelsproul's [advice](https://github.com/sigp/lighthouse/pull/3725#discussion_r1034142113). After being woken up in the middle of the night by a block that was not revealed to the BN but *was* revealed to the network, I have capitulated. This PR implements @michaelsproul's suggestion and reduces the severity to `ERRO`. Additionally, I have dropped a `CRIT` to an `ERRO` for when a block is published late. The block in question was indeed published late on the network, however now that we have builders that can slow down block production I don't think the error is "actionable" enough to warrant a `CRIT` for the user. ## Additional Info NA --- beacon_node/execution_layer/src/lib.rs | 2 +- beacon_node/http_api/src/publish_blocks.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2a2225cbdfd..dfce9745774 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1556,7 +1556,7 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, &[metrics::FAILURE], ); - crit!( + error!( self.log(), "Builder failed to reveal payload"; "info" => "this relay failure may cause a missed proposal", diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 08355c1d376..5d27f117b02 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -5,7 +5,7 @@ use beacon_chain::{ }; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{crit, error, info, warn, Logger}; +use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; @@ -72,10 +72,10 @@ pub async fn publish_block( // // Check to see the thresholds are non-zero to avoid logging errors with small // slot times (e.g., during testing) - let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let error_threshold = crit_threshold / 2; - if delay >= crit_threshold { - crit!( + let too_late_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let delayed_threshold = too_late_threshold / 2; + if delay >= too_late_threshold { + error!( log, "Block was broadcast too late"; "msg" => "system may be overloaded, block likely to be orphaned", @@ -83,7 +83,7 @@ pub async fn publish_block( "slot" => block.slot(), "root" => ?root, ) - } else if delay >= error_threshold { + } else if delay >= delayed_threshold { error!( log, "Block broadcast was delayed"; From 173a0abab4cde6f30b6263372884eda77c986423 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 13 Dec 2022 17:03:21 +1100 Subject: [PATCH 07/17] Fix `Withdrawal` serialisation and check address change fork (#3789) * Disallow address changes before Capella * Quote u64s in Withdrawal serialisation --- beacon_node/beacon_chain/src/beacon_chain.rs | 7 +++++++ beacon_node/beacon_chain/src/errors.rs | 1 + consensus/types/src/withdrawal.rs | 2 ++ 3 files changed, 10 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7841f53e74b..0bbbe92356c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2191,7 +2191,14 @@ impl BeaconChain { ) -> Result, Error> { #[cfg(feature = "withdrawals-processing")] { + let current_fork = self.spec.fork_name_at_slot::(self.slot()?); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { + // Disallow BLS to execution changes prior to the Capella fork. + return Err(Error::BlsToExecutionChangeBadFork(current_fork)); + } + let wall_clock_state = self.wall_clock_state()?; + Ok(self .observed_bls_to_execution_changes .lock() diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 60282426a5a..3a2e4a0bc53 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -206,6 +206,7 @@ pub enum BeaconChainError { MissingPersistedForkChoice, CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), + BlsToExecutionChangeBadFork(ForkName), } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 10530dcb507..6f14cf1c52e 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -12,8 +12,10 @@ use tree_hash_derive::TreeHash; pub struct Withdrawal { #[serde(with = "eth2_serde_utils::quoted_u64")] pub index: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] pub validator_index: u64, pub address: Address, + #[serde(with = "eth2_serde_utils::quoted_u64")] pub amount: u64, } From 1b28ef8a8d89d3867aaa86aebb36046fe29b2a85 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 13 Dec 2022 06:24:51 +0000 Subject: [PATCH 08/17] Adding light_client gossip topics (#3693) ## Issue Addressed Implementing the light_client_gossip topics but I'm not there yet. Which issue # does this PR address? Partially #3651 ## Proposed Changes Add light client gossip topics. Please list or describe the changes introduced by this PR. I'm going to Implement light_client_finality_update and light_client_optimistic_update gossip topics. Currently I've attempted the former and I'm seeking feedback. ## Additional Info I've only implemented the light_client_finality_update topic because I wanted to make sure I was on the correct path. Also checking that the gossiped LightClientFinalityUpdate is the same as the locally constructed one is not implemented because caching the updates will make this much easier. Could someone give me some feedback on this please? Please provide any additional information. For example, future considerations or information useful for reviewers. Co-authored-by: GeemoCandama <104614073+GeemoCandama@users.noreply.github.com> --- beacon_node/beacon_chain/src/beacon_chain.rs | 44 ++++++ beacon_node/beacon_chain/src/builder.rs | 2 + beacon_node/beacon_chain/src/lib.rs | 2 + ...ght_client_finality_update_verification.rs | 135 +++++++++++++++++ ...t_client_optimistic_update_verification.rs | 125 ++++++++++++++++ beacon_node/beacon_chain/src/metrics.rs | 18 +++ .../src/service/gossip_cache.rs | 26 ++++ .../lighthouse_network/src/service/utils.rs | 2 + .../lighthouse_network/src/types/mod.rs | 5 +- .../lighthouse_network/src/types/pubsub.rs | 40 ++++- .../lighthouse_network/src/types/topics.rs | 15 ++ .../network/src/beacon_processor/mod.rs | 110 +++++++++++++- .../beacon_processor/worker/gossip_methods.rs | 141 +++++++++++++++++- beacon_node/network/src/metrics.rs | 23 +++ beacon_node/network/src/router/mod.rs | 24 +++ beacon_node/network/src/router/processor.rs | 33 +++- beacon_node/network/src/service.rs | 23 +++ consensus/types/src/lib.rs | 3 + .../types/src/light_client_finality_update.rs | 37 ++--- .../src/light_client_optimistic_update.rs | 10 +- 20 files changed, 778 insertions(+), 40 deletions(-) create mode 100644 beacon_node/beacon_chain/src/light_client_finality_update_verification.rs create mode 100644 beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 32ae742d86f..309f6a83e07 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -22,6 +22,12 @@ use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, Prep use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; +use crate::light_client_finality_update_verification::{ + Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate, +}; +use crate::light_client_optimistic_update_verification::{ + Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate, +}; use crate::migrate::BackgroundMigrator; use crate::naive_aggregation_pool::{ AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool, @@ -335,6 +341,10 @@ pub struct BeaconChain { /// Maintains a record of which validators we've seen attester slashings for. pub(crate) observed_attester_slashings: Mutex, T::EthSpec>>, + /// The most recently validated light client finality update received on gossip. + pub latest_seen_finality_update: Mutex>>, + /// The most recently validated light client optimistic update received on gossip. + pub latest_seen_optimistic_update: Mutex>>, /// Provides information from the Ethereum 1 (PoW) chain. pub eth1_chain: Option>, /// Interfaces with the execution client. @@ -1779,6 +1789,40 @@ impl BeaconChain { }) } + /// Accepts some 'LightClientFinalityUpdate' from the network and attempts to verify it + pub fn verify_finality_update_for_gossip( + self: &Arc, + light_client_finality_update: LightClientFinalityUpdate, + seen_timestamp: Duration, + ) -> Result, LightClientFinalityUpdateError> { + VerifiedLightClientFinalityUpdate::verify( + light_client_finality_update, + self, + seen_timestamp, + ) + .map(|v| { + metrics::inc_counter(&metrics::FINALITY_UPDATE_PROCESSING_SUCCESSES); + v + }) + } + + /// Accepts some 'LightClientOptimisticUpdate' from the network and attempts to verify it + pub fn verify_optimistic_update_for_gossip( + self: &Arc, + light_client_optimistic_update: LightClientOptimisticUpdate, + seen_timestamp: Duration, + ) -> Result, LightClientOptimisticUpdateError> { + VerifiedLightClientOptimisticUpdate::verify( + light_client_optimistic_update, + self, + seen_timestamp, + ) + .map(|v| { + metrics::inc_counter(&metrics::OPTIMISTIC_UPDATE_PROCESSING_SUCCESSES); + v + }) + } + /// Accepts some attestation-type object and attempts to verify it in the context of fork /// choice. If it is valid it is applied to `self.fork_choice`. /// diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 58bbb2b5c6a..f5bd85dec28 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -780,6 +780,8 @@ where observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), + latest_seen_finality_update: <_>::default(), + latest_seen_optimistic_update: <_>::default(), eth1_chain: self.eth1_chain, execution_layer: self.execution_layer, genesis_validators_root, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index fd1c1cceb1f..a55532ac12f 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -21,6 +21,8 @@ pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; +pub mod light_client_finality_update_verification; +pub mod light_client_optimistic_update_verification; pub mod merge_readiness; pub mod metrics; pub mod migrate; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs new file mode 100644 index 00000000000..7c431ebccca --- /dev/null +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -0,0 +1,135 @@ +use crate::{ + beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, +}; +use derivative::Derivative; +use slot_clock::SlotClock; +use std::time::Duration; +use strum::AsRefStr; +use types::{ + light_client_update::Error as LightClientUpdateError, LightClientFinalityUpdate, Slot, +}; + +/// Returned when a light client finality update was not successfully verified. It might not have been verified for +/// two reasons: +/// +/// - The light client finality message is malformed or inappropriate for the context (indicated by all variants +/// other than `BeaconChainError`). +/// - The application encountered an internal error whilst attempting to determine validity +/// (the `BeaconChainError` variant) +#[derive(Debug, AsRefStr)] +pub enum Error { + /// Light client finality update message with a lower or equal finalized_header slot already forwarded. + FinalityUpdateAlreadySeen, + /// The light client finality message was received is prior to one-third of slot duration passage. (with + /// respect to the gossip clock disparity and slot clock duration). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + TooEarly, + /// Light client finality update message does not match the locally constructed one. + /// + /// ## Peer Scoring + /// + InvalidLightClientFinalityUpdate, + /// Signature slot start time is none. + SigSlotStartIsNone, + /// Failed to construct a LightClientFinalityUpdate from state. + FailedConstructingUpdate, + /// Beacon chain error occured. + BeaconChainError(BeaconChainError), + LightClientUpdateError(LightClientUpdateError), +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChainError(e) + } +} + +impl From for Error { + fn from(e: LightClientUpdateError) -> Self { + Error::LightClientUpdateError(e) + } +} + +/// Wraps a `LightClientFinalityUpdate` that has been verified for propagation on the gossip network. +#[derive(Derivative)] +#[derivative(Clone(bound = "T: BeaconChainTypes"))] +pub struct VerifiedLightClientFinalityUpdate { + light_client_finality_update: LightClientFinalityUpdate, + seen_timestamp: Duration, +} + +impl VerifiedLightClientFinalityUpdate { + /// Returns `Ok(Self)` if the `light_client_finality_update` is valid to be (re)published on the gossip + /// network. + pub fn verify( + light_client_finality_update: LightClientFinalityUpdate, + chain: &BeaconChain, + seen_timestamp: Duration, + ) -> Result { + let gossiped_finality_slot = light_client_finality_update.finalized_header.slot; + let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); + let signature_slot = light_client_finality_update.signature_slot; + let start_time = chain.slot_clock.start_of(signature_slot); + let mut latest_seen_finality_update = chain.latest_seen_finality_update.lock(); + + let head = chain.canonical_head.cached_head(); + let head_block = &head.snapshot.beacon_block; + let attested_block_root = head_block.message().parent_root(); + let attested_block = chain + .get_blinded_block(&attested_block_root)? + .ok_or(Error::FailedConstructingUpdate)?; + let mut attested_state = chain + .get_state(&attested_block.state_root(), Some(attested_block.slot()))? + .ok_or(Error::FailedConstructingUpdate)?; + + let finalized_block_root = attested_state.finalized_checkpoint().root; + let finalized_block = chain + .get_blinded_block(&finalized_block_root)? + .ok_or(Error::FailedConstructingUpdate)?; + let latest_seen_finality_update_slot = match latest_seen_finality_update.as_ref() { + Some(update) => update.finalized_header.slot, + None => Slot::new(0), + }; + + // verify that no other finality_update with a lower or equal + // finalized_header.slot was already forwarded on the network + if gossiped_finality_slot <= latest_seen_finality_update_slot { + return Err(Error::FinalityUpdateAlreadySeen); + } + + // verify that enough time has passed for the block to have been propagated + match start_time { + Some(time) => { + if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration + { + return Err(Error::TooEarly); + } + } + None => return Err(Error::SigSlotStartIsNone), + } + + let head_state = &head.snapshot.beacon_state; + let finality_update = LightClientFinalityUpdate::new( + &chain.spec, + head_state, + head_block, + &mut attested_state, + &finalized_block, + )?; + + // verify that the gossiped finality update is the same as the locally constructed one. + if finality_update != light_client_finality_update { + return Err(Error::InvalidLightClientFinalityUpdate); + } + + *latest_seen_finality_update = Some(light_client_finality_update.clone()); + + Ok(Self { + light_client_finality_update, + seen_timestamp, + }) + } +} diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs new file mode 100644 index 00000000000..ec9c90e7355 --- /dev/null +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -0,0 +1,125 @@ +use crate::{ + beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, +}; +use derivative::Derivative; +use slot_clock::SlotClock; +use std::time::Duration; +use strum::AsRefStr; +use types::{ + light_client_update::Error as LightClientUpdateError, LightClientOptimisticUpdate, Slot, +}; + +/// Returned when a light client optimistic update was not successfully verified. It might not have been verified for +/// two reasons: +/// +/// - The light client optimistic message is malformed or inappropriate for the context (indicated by all variants +/// other than `BeaconChainError`). +/// - The application encountered an internal error whilst attempting to determine validity +/// (the `BeaconChainError` variant) +#[derive(Debug, AsRefStr)] +pub enum Error { + /// Light client optimistic update message with a lower or equal optimistic_header slot already forwarded. + OptimisticUpdateAlreadySeen, + /// The light client optimistic message was received is prior to one-third of slot duration passage. (with + /// respect to the gossip clock disparity and slot clock duration). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + TooEarly, + /// Light client optimistic update message does not match the locally constructed one. + /// + /// ## Peer Scoring + /// + InvalidLightClientOptimisticUpdate, + /// Signature slot start time is none. + SigSlotStartIsNone, + /// Failed to construct a LightClientOptimisticUpdate from state. + FailedConstructingUpdate, + /// Beacon chain error occured. + BeaconChainError(BeaconChainError), + LightClientUpdateError(LightClientUpdateError), +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChainError(e) + } +} + +impl From for Error { + fn from(e: LightClientUpdateError) -> Self { + Error::LightClientUpdateError(e) + } +} + +/// Wraps a `LightClientOptimisticUpdate` that has been verified for propagation on the gossip network. +#[derive(Derivative)] +#[derivative(Clone(bound = "T: BeaconChainTypes"))] +pub struct VerifiedLightClientOptimisticUpdate { + light_client_optimistic_update: LightClientOptimisticUpdate, + seen_timestamp: Duration, +} + +impl VerifiedLightClientOptimisticUpdate { + /// Returns `Ok(Self)` if the `light_client_optimistic_update` is valid to be (re)published on the gossip + /// network. + pub fn verify( + light_client_optimistic_update: LightClientOptimisticUpdate, + chain: &BeaconChain, + seen_timestamp: Duration, + ) -> Result { + let gossiped_optimistic_slot = light_client_optimistic_update.attested_header.slot; + let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); + let signature_slot = light_client_optimistic_update.signature_slot; + let start_time = chain.slot_clock.start_of(signature_slot); + let mut latest_seen_optimistic_update = chain.latest_seen_optimistic_update.lock(); + + let head = chain.canonical_head.cached_head(); + let head_block = &head.snapshot.beacon_block; + let attested_block_root = head_block.message().parent_root(); + let attested_block = chain + .get_blinded_block(&attested_block_root)? + .ok_or(Error::FailedConstructingUpdate)?; + + let attested_state = chain + .get_state(&attested_block.state_root(), Some(attested_block.slot()))? + .ok_or(Error::FailedConstructingUpdate)?; + let latest_seen_optimistic_update_slot = match latest_seen_optimistic_update.as_ref() { + Some(update) => update.attested_header.slot, + None => Slot::new(0), + }; + + // verify that no other optimistic_update with a lower or equal + // optimistic_header.slot was already forwarded on the network + if gossiped_optimistic_slot <= latest_seen_optimistic_update_slot { + return Err(Error::OptimisticUpdateAlreadySeen); + } + + // verify that enough time has passed for the block to have been propagated + match start_time { + Some(time) => { + if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration + { + return Err(Error::TooEarly); + } + } + None => return Err(Error::SigSlotStartIsNone), + } + + let optimistic_update = + LightClientOptimisticUpdate::new(&chain.spec, head_block, &attested_state)?; + + // verify that the gossiped optimistic update is the same as the locally constructed one. + if optimistic_update != light_client_optimistic_update { + return Err(Error::InvalidLightClientOptimisticUpdate); + } + + *latest_seen_optimistic_update = Some(light_client_optimistic_update.clone()); + + Ok(Self { + light_client_optimistic_update, + seen_timestamp, + }) + } +} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index b37c5afc35f..c681570b110 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -948,6 +948,24 @@ lazy_static! { ); } +// Fifth lazy-static block is used to account for macro recursion limit. +lazy_static! { + /* + * Light server message verification + */ + pub static ref FINALITY_UPDATE_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "light_client_finality_update_verification_success_total", + "Number of light client finality updates verified for gossip" + ); + /* + * Light server message verification + */ + pub static ref OPTIMISTIC_UPDATE_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "light_client_optimistic_update_verification_success_total", + "Number of light client optimistic updates verified for gossip" + ); +} + /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, /// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`. pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 4842605f7aa..c784191cd30 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -34,6 +34,10 @@ pub struct GossipCache { signed_contribution_and_proof: Option, /// Timeout for sync committee messages. sync_committee_message: Option, + /// Timeout for light client finality updates. + light_client_finality_update: Option, + /// Timeout for light client optimistic updates. + light_client_optimistic_update: Option, } #[derive(Default)] @@ -55,6 +59,10 @@ pub struct GossipCacheBuilder { signed_contribution_and_proof: Option, /// Timeout for sync committee messages. sync_committee_message: Option, + /// Timeout for light client finality updates. + light_client_finality_update: Option, + /// Timeout for light client optimistic updates. + light_client_optimistic_update: Option, } #[allow(dead_code)] @@ -113,6 +121,18 @@ impl GossipCacheBuilder { self } + /// Timeout for light client finality update messages. + pub fn light_client_finality_update_timeout(mut self, timeout: Duration) -> Self { + self.light_client_finality_update = Some(timeout); + self + } + + /// Timeout for light client optimistic update messages. + pub fn light_client_optimistic_update_timeout(mut self, timeout: Duration) -> Self { + self.light_client_optimistic_update = Some(timeout); + self + } + pub fn build(self) -> GossipCache { let GossipCacheBuilder { default_timeout, @@ -124,6 +144,8 @@ impl GossipCacheBuilder { attester_slashing, signed_contribution_and_proof, sync_committee_message, + light_client_finality_update, + light_client_optimistic_update, } = self; GossipCache { expirations: DelayQueue::default(), @@ -136,6 +158,8 @@ impl GossipCacheBuilder { attester_slashing: attester_slashing.or(default_timeout), signed_contribution_and_proof: signed_contribution_and_proof.or(default_timeout), sync_committee_message: sync_committee_message.or(default_timeout), + light_client_finality_update: light_client_finality_update.or(default_timeout), + light_client_optimistic_update: light_client_optimistic_update.or(default_timeout), } } } @@ -158,6 +182,8 @@ impl GossipCache { GossipKind::AttesterSlashing => self.attester_slashing, GossipKind::SignedContributionAndProof => self.signed_contribution_and_proof, GossipKind::SyncCommitteeMessage(_) => self.sync_committee_message, + GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, + GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, }; let expire_timeout = match expire_timeout { Some(expire_timeout) => expire_timeout, diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 8073ae77683..09a8d1a8636 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -253,6 +253,8 @@ pub(crate) fn create_whitelist_filter( add(ProposerSlashing); add(AttesterSlashing); add(SignedContributionAndProof); + add(LightClientFinalityUpdate); + add(LightClientOptimisticUpdate); for id in 0..attestation_subnet_count { add(Attestation(SubnetId::new(id))); } diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index ad02e07fb70..2a5ca6c8062 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -16,4 +16,7 @@ pub use globals::NetworkGlobals; pub use pubsub::{PubsubMessage, SnappyTransform}; pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::{BackFillState, SyncState}; -pub use topics::{subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS}; +pub use topics::{ + subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS, + LIGHT_CLIENT_GOSSIP_TOPICS, +}; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index a01072f8e4e..b036e558c99 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -9,10 +9,10 @@ use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, + SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -33,6 +33,10 @@ pub enum PubsubMessage { SignedContributionAndProof(Box>), /// Gossipsub message providing notification of unaggregated sync committee signatures with its subnet id. SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>), + /// Gossipsub message providing notification of a light client finality update. + LightClientFinalityUpdate(Box>), + /// Gossipsub message providing notification of a light client optimistic update. + LightClientOptimisticUpdate(Box>), } // Implements the `DataTransform` trait of gossipsub to employ snappy compression @@ -115,6 +119,10 @@ impl PubsubMessage { PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof, PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0), + PubsubMessage::LightClientFinalityUpdate(_) => GossipKind::LightClientFinalityUpdate, + PubsubMessage::LightClientOptimisticUpdate(_) => { + GossipKind::LightClientOptimisticUpdate + } } } @@ -206,6 +214,22 @@ impl PubsubMessage { sync_committee, )))) } + GossipKind::LightClientFinalityUpdate => { + let light_client_finality_update = + LightClientFinalityUpdate::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::LightClientFinalityUpdate(Box::new( + light_client_finality_update, + ))) + } + GossipKind::LightClientOptimisticUpdate => { + let light_client_optimistic_update = + LightClientOptimisticUpdate::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::LightClientOptimisticUpdate(Box::new( + light_client_optimistic_update, + ))) + } } } } @@ -227,6 +251,8 @@ impl PubsubMessage { PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(), PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(), + PubsubMessage::LightClientFinalityUpdate(data) => data.as_ssz_bytes(), + PubsubMessage::LightClientOptimisticUpdate(data) => data.as_ssz_bytes(), } } } @@ -261,6 +287,12 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::SyncCommitteeMessage(data) => { write!(f, "Sync committee message: subnet_id: {}", *data.0) } + PubsubMessage::LightClientFinalityUpdate(_data) => { + write!(f, "Light CLient Finality Update") + } + PubsubMessage::LightClientOptimisticUpdate(_data) => { + write!(f, "Light CLient Optimistic Update") + } } } } diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 47d703c2600..e7e3cf4abbe 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -18,6 +18,8 @@ pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof"; pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; +pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; +pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const CORE_TOPICS: [GossipKind; 6] = [ GossipKind::BeaconBlock, @@ -28,6 +30,11 @@ pub const CORE_TOPICS: [GossipKind; 6] = [ GossipKind::SignedContributionAndProof, ]; +pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ + GossipKind::LightClientFinalityUpdate, + GossipKind::LightClientOptimisticUpdate, +]; + /// A gossipsub topic which encapsulates the type of messages that should be sent and received over /// the pubsub protocol and the way the messages should be encoded. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] @@ -63,6 +70,10 @@ pub enum GossipKind { /// Topic for publishing unaggregated sync committee signatures on a particular subnet. #[strum(serialize = "sync_committee")] SyncCommitteeMessage(SyncSubnetId), + /// Topic for publishing finality updates for light clients. + LightClientFinalityUpdate, + /// Topic for publishing optimistic updates for light clients. + LightClientOptimisticUpdate, } impl std::fmt::Display for GossipKind { @@ -136,6 +147,8 @@ impl GossipTopic { VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit, PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, + LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, + LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, topic => match committee_topic_index(topic) { Some(subnet) => match subnet { Subnet::Attestation(s) => GossipKind::Attestation(s), @@ -194,6 +207,8 @@ impl std::fmt::Display for GossipTopic { GossipKind::SyncCommitteeMessage(index) => { format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) } + GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), + GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), }; write!( f, diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 9528cfd1dfb..743a97a29c2 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -62,9 +62,9 @@ use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, + SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, @@ -129,6 +129,14 @@ const MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN: usize = 4_096; /// before we start dropping them. const MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN: usize = 4_096; +/// The maximum number of queued `LightClientFinalityUpdate` objects received on gossip that will be stored +/// before we start dropping them. +const MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN: usize = 1_024; + +/// The maximum number of queued `LightClientOptimisticUpdate` objects received on gossip that will be stored +/// before we start dropping them. +const MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN: usize = 1_024; + /// The maximum number of queued `SyncCommitteeMessage` objects that will be stored before we start dropping /// them. const MAX_SYNC_MESSAGE_QUEUE_LEN: usize = 2048; @@ -195,6 +203,8 @@ pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; pub const GOSSIP_ATTESTER_SLASHING: &str = "gossip_attester_slashing"; pub const GOSSIP_SYNC_SIGNATURE: &str = "gossip_sync_signature"; pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution"; +pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; +pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const STATUS_PROCESSING: &str = "status_processing"; @@ -476,6 +486,42 @@ impl WorkEvent { } } + /// Create a new `Work` event for some light client finality update. + pub fn gossip_light_client_finality_update( + message_id: MessageId, + peer_id: PeerId, + light_client_finality_update: Box>, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::GossipLightClientFinalityUpdate { + message_id, + peer_id, + light_client_finality_update, + seen_timestamp, + }, + } + } + + /// Create a new `Work` event for some light client optimistic update. + pub fn gossip_light_client_optimistic_update( + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: Box>, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::GossipLightClientOptimisticUpdate { + message_id, + peer_id, + light_client_optimistic_update, + seen_timestamp, + }, + } + } + /// Create a new `Work` event for some attester slashing. pub fn gossip_attester_slashing( message_id: MessageId, @@ -730,6 +776,18 @@ pub enum Work { sync_contribution: Box>, seen_timestamp: Duration, }, + GossipLightClientFinalityUpdate { + message_id: MessageId, + peer_id: PeerId, + light_client_finality_update: Box>, + seen_timestamp: Duration, + }, + GossipLightClientOptimisticUpdate { + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: Box>, + seen_timestamp: Duration, + }, RpcBlock { block_root: Hash256, block: Arc>, @@ -777,6 +835,8 @@ impl Work { Work::GossipAttesterSlashing { .. } => GOSSIP_ATTESTER_SLASHING, Work::GossipSyncSignature { .. } => GOSSIP_SYNC_SIGNATURE, Work::GossipSyncContribution { .. } => GOSSIP_SYNC_CONTRIBUTION, + Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, + Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::Status { .. } => STATUS_PROCESSING, @@ -916,6 +976,10 @@ impl BeaconProcessor { let mut gossip_attester_slashing_queue = FifoQueue::new(MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN); + // Using a FIFO queue for light client updates to maintain sequence order. + let mut finality_update_queue = FifoQueue::new(MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN); + let mut optimistic_update_queue = FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN); + // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); @@ -1250,6 +1314,12 @@ impl BeaconProcessor { Work::GossipSyncContribution { .. } => { sync_contribution_queue.push(work) } + Work::GossipLightClientFinalityUpdate { .. } => { + finality_update_queue.push(work, work_id, &self.log) + } + Work::GossipLightClientOptimisticUpdate { .. } => { + optimistic_update_queue.push(work, work_id, &self.log) + } Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), Work::ChainSegment { ref process_id, .. } => match process_id { ChainSegmentProcessId::RangeBatchId { .. } @@ -1551,7 +1621,7 @@ impl BeaconProcessor { ) }), /* - * Syn contribution verification. + * Sync contribution verification. */ Work::GossipSyncContribution { message_id, @@ -1566,6 +1636,38 @@ impl BeaconProcessor { seen_timestamp, ) }), + /* + * Light client finality update verification. + */ + Work::GossipLightClientFinalityUpdate { + message_id, + peer_id, + light_client_finality_update, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_finality_update( + message_id, + peer_id, + *light_client_finality_update, + seen_timestamp, + ) + }), + /* + * Light client optimistic update verification. + */ + Work::GossipLightClientOptimisticUpdate { + message_id, + peer_id, + light_client_optimistic_update, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_optimistic_update( + message_id, + peer_id, + *light_client_optimistic_update, + seen_timestamp, + ) + }), /* * Verification for beacon blocks received during syncing via RPC. */ diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 947d9cfe274..ef23f6761f6 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -3,6 +3,8 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::store::Error; use beacon_chain::{ attestation_verification::{self, Error as AttnError, VerifiedAttestation}, + light_client_finality_update_verification::Error as LightClientFinalityUpdateError, + light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, @@ -18,9 +20,10 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, + SyncSubnetId, }; use super::{ @@ -1303,6 +1306,138 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL); } + pub fn process_gossip_finality_update( + self, + message_id: MessageId, + peer_id: PeerId, + light_client_finality_update: LightClientFinalityUpdate, + seen_timestamp: Duration, + ) { + match self + .chain + .verify_finality_update_for_gossip(light_client_finality_update, seen_timestamp) + { + Ok(_verified_light_client_finality_update) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + } + Err(e) => { + metrics::register_finality_update_error(&e); + match e { + LightClientFinalityUpdateError::InvalidLightClientFinalityUpdate => { + debug!( + self.log, + "LC invalid finality update"; + "peer" => %peer_id, + "error" => ?e, + ); + + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "light_client_gossip_error", + ); + } + LightClientFinalityUpdateError::TooEarly => { + debug!( + self.log, + "LC finality update too early"; + "peer" => %peer_id, + "error" => ?e, + ); + + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "light_client_gossip_error", + ); + } + LightClientFinalityUpdateError::FinalityUpdateAlreadySeen => debug!( + self.log, + "LC finality update already seen"; + "peer" => %peer_id, + "error" => ?e, + ), + LightClientFinalityUpdateError::BeaconChainError(_) + | LightClientFinalityUpdateError::LightClientUpdateError(_) + | LightClientFinalityUpdateError::SigSlotStartIsNone + | LightClientFinalityUpdateError::FailedConstructingUpdate => debug!( + self.log, + "LC error constructing finality update"; + "peer" => %peer_id, + "error" => ?e, + ), + } + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + }; + } + + pub fn process_gossip_optimistic_update( + self, + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: LightClientOptimisticUpdate, + seen_timestamp: Duration, + ) { + match self + .chain + .verify_optimistic_update_for_gossip(light_client_optimistic_update, seen_timestamp) + { + Ok(_verified_light_client_optimistic_update) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + } + Err(e) => { + metrics::register_optimistic_update_error(&e); + match e { + LightClientOptimisticUpdateError::InvalidLightClientOptimisticUpdate => { + debug!( + self.log, + "LC invalid optimistic update"; + "peer" => %peer_id, + "error" => ?e, + ); + + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "light_client_gossip_error", + ) + } + LightClientOptimisticUpdateError::TooEarly => { + debug!( + self.log, + "LC optimistic update too early"; + "peer" => %peer_id, + "error" => ?e, + ); + + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "light_client_gossip_error", + ); + } + LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => debug!( + self.log, + "LC optimistic update already seen"; + "peer" => %peer_id, + "error" => ?e, + ), + LightClientOptimisticUpdateError::BeaconChainError(_) + | LightClientOptimisticUpdateError::LightClientUpdateError(_) + | LightClientOptimisticUpdateError::SigSlotStartIsNone + | LightClientOptimisticUpdateError::FailedConstructingUpdate => debug!( + self.log, + "LC error constructing optimistic update"; + "peer" => %peer_id, + "error" => ?e, + ), + } + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + }; + } + /// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the /// network. fn handle_attestation_verification_failure( diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index b4e7a3bace3..b4f3f29f934 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -1,5 +1,7 @@ use beacon_chain::{ attestation_verification::Error as AttnError, + light_client_finality_update_verification::Error as LightClientFinalityUpdateError, + light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, sync_committee_verification::Error as SyncCommitteeError, }; use fnv::FnvHashMap; @@ -252,6 +254,19 @@ lazy_static! { "Gossipsub sync_committee errors per error type", &["type"] ); + pub static ref GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE: Result = + try_create_int_counter_vec( + "gossipsub_light_client_finality_update_errors_per_type", + "Gossipsub light_client_finality_update errors per error type", + &["type"] + ); + pub static ref GOSSIP_OPTIMISTIC_UPDATE_ERRORS_PER_TYPE: Result = + try_create_int_counter_vec( + "gossipsub_light_client_optimistic_update_errors_per_type", + "Gossipsub light_client_optimistic_update errors per error type", + &["type"] + ); + /* * Network queue metrics @@ -358,6 +373,14 @@ pub fn update_bandwidth_metrics(bandwidth: Arc) { ); } +pub fn register_finality_update_error(error: &LightClientFinalityUpdateError) { + inc_counter_vec(&GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]); +} + +pub fn register_optimistic_update_error(error: &LightClientOptimisticUpdateError) { + inc_counter_vec(&GOSSIP_OPTIMISTIC_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]); +} + pub fn register_attestation_error(error: &AttnError) { inc_counter_vec(&GOSSIP_ATTESTATION_ERRORS_PER_TYPE, &[error.as_ref()]); } diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 5df308f2592..ce98337cfed 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -280,6 +280,30 @@ impl Router { sync_committtee_msg.0, ); } + PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { + trace!( + self.log, + "Received light client finality update"; + "peer_id" => %peer_id + ); + self.processor.on_light_client_finality_update_gossip( + id, + peer_id, + light_client_finality_update, + ); + } + PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => { + trace!( + self.log, + "Received light client optimistic update"; + "peer_id" => %peer_id + ); + self.processor.on_light_client_optimistic_update_gossip( + id, + peer_id, + light_client_optimistic_update, + ); + } } } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 3c9a4a81fb9..999ba29e90a 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -17,8 +17,9 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::SyncCommitteeMessage; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, + Attestation, AttesterSlashing, EthSpec, LightClientFinalityUpdate, LightClientOptimisticUpdate, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, + SignedVoluntaryExit, SubnetId, SyncSubnetId, }; /// Processes validated messages from the network. It relays necessary data to the syncing thread @@ -368,6 +369,34 @@ impl Processor { )) } + pub fn on_light_client_finality_update_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + light_client_finality_update: Box>, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_light_client_finality_update( + message_id, + peer_id, + light_client_finality_update, + timestamp_now(), + )) + } + + pub fn on_light_client_optimistic_update_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: Box>, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_light_client_optimistic_update( + message_id, + peer_id, + light_client_optimistic_update, + timestamp_now(), + )) + } + fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { self.beacon_processor_send .try_send(work) diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 31c42b860de..4568ed1a229 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -208,6 +208,8 @@ pub struct NetworkService { metrics_update: tokio::time::Interval, /// gossipsub_parameter_update timer gossipsub_parameter_update: tokio::time::Interval, + /// enable_light_client_server indicator + enable_light_client_server: bool, /// The logger for the network service. fork_context: Arc, log: slog::Logger, @@ -345,6 +347,7 @@ impl NetworkService { gossipsub_parameter_update, fork_context, log: network_log, + enable_light_client_server: config.enable_light_client_server, }; network_service.spawn_service(executor); @@ -679,6 +682,7 @@ impl NetworkService { } return; } + let mut subscribed_topics: Vec = vec![]; for topic_kind in lighthouse_network::types::CORE_TOPICS.iter() { for fork_digest in self.required_gossip_fork_digests() { @@ -695,6 +699,25 @@ impl NetworkService { } } + if self.enable_light_client_server { + for light_client_topic_kind in + lighthouse_network::types::LIGHT_CLIENT_GOSSIP_TOPICS.iter() + { + for fork_digest in self.required_gossip_fork_digests() { + let light_client_topic = GossipTopic::new( + light_client_topic_kind.clone(), + GossipEncoding::default(), + fork_digest, + ); + if self.libp2p.subscribe(light_client_topic.clone()) { + subscribed_topics.push(light_client_topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %light_client_topic); + } + } + } + } + // If we are to subscribe to all subnets we do it here if self.subscribe_all_subnets { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 4a6cc57b119..37bab8b4806 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -51,6 +51,7 @@ pub mod graffiti; pub mod historical_batch; pub mod indexed_attestation; pub mod light_client_bootstrap; +pub mod light_client_finality_update; pub mod light_client_optimistic_update; pub mod light_client_update; pub mod pending_attestation; @@ -136,6 +137,8 @@ pub use crate::free_attestation::FreeAttestation; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; +pub use crate::light_client_finality_update::LightClientFinalityUpdate; +pub use crate::light_client_optimistic_update::LightClientOptimisticUpdate; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; pub use crate::payload::{BlindedPayload, BlockType, ExecPayload, FullPayload}; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index fe26c0fa3eb..cae6266f9e7 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -1,10 +1,10 @@ -use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; -use crate::{light_client_update::*, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec}; -use safe_arith::ArithError; +use super::{ + BeaconBlockHeader, EthSpec, FixedVector, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, + Slot, SyncAggregate, +}; +use crate::{light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::{U5, U6}; -use std::sync::Arc; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -28,43 +28,38 @@ pub struct LightClientFinalityUpdate { impl LightClientFinalityUpdate { pub fn new( - chain_spec: ChainSpec, - beacon_state: BeaconState, - block: BeaconBlock, + chain_spec: &ChainSpec, + beacon_state: &BeaconState, + block: &SignedBeaconBlock, attested_state: &mut BeaconState, - finalized_block: BeaconBlock, + finalized_block: &SignedBlindedBeaconBlock, ) -> Result { let altair_fork_epoch = chain_spec .altair_fork_epoch .ok_or(Error::AltairForkNotActive)?; - if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch { + if beacon_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch { return Err(Error::AltairForkNotActive); } - let sync_aggregate = block.body().sync_aggregate()?; + let sync_aggregate = block.message().body().sync_aggregate()?; if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { return Err(Error::NotEnoughSyncCommitteeParticipants); } // Compute and validate attested header. let mut attested_header = attested_state.latest_block_header().clone(); - attested_header.state_root = attested_state.tree_hash_root(); + attested_header.state_root = attested_state.update_tree_hash_cache()?; // Build finalized header from finalized block - let finalized_header = BeaconBlockHeader { - slot: finalized_block.slot(), - proposer_index: finalized_block.proposer_index(), - parent_root: finalized_block.parent_root(), - state_root: finalized_block.state_root(), - body_root: finalized_block.body_root(), - }; + let finalized_header = finalized_block.message().block_header(); + if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { return Err(Error::InvalidFinalizedBlock); } let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; Ok(Self { - attested_header: attested_header, - finalized_header: finalized_header, + attested_header, + finalized_header, finality_branch: FixedVector::new(finality_branch)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 9592bf1c23b..8dda8cd5aed 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -1,6 +1,6 @@ use super::{BeaconBlockHeader, EthSpec, Slot, SyncAggregate}; use crate::{ - light_client_update::Error, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec, + light_client_update::Error, test_utils::TestRandom, BeaconState, ChainSpec, SignedBeaconBlock, }; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -23,9 +23,9 @@ pub struct LightClientOptimisticUpdate { impl LightClientOptimisticUpdate { pub fn new( - chain_spec: ChainSpec, - block: BeaconBlock, - attested_state: BeaconState, + chain_spec: &ChainSpec, + block: &SignedBeaconBlock, + attested_state: &BeaconState, ) -> Result { let altair_fork_epoch = chain_spec .altair_fork_epoch @@ -34,7 +34,7 @@ impl LightClientOptimisticUpdate { return Err(Error::AltairForkNotActive); } - let sync_aggregate = block.body().sync_aggregate()?; + let sync_aggregate = block.message().body().sync_aggregate()?; if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { return Err(Error::NotEnoughSyncCommitteeParticipants); } From 6f79263a2191030b98f2030800486f4e833f3900 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Dec 2022 06:24:52 +0000 Subject: [PATCH 09/17] Make all validator monitor logs `INFO` (#3727) ## Issue Addressed NA ## Proposed Changes This is a *potentially* contentious change, but I find it annoying that the validator monitor logs `WARN` and `ERRO` for imperfect attestations. Perfect attestation performance is unachievable (don't believe those photo-shopped beauty magazines!) since missed and poorly-packed blocks by other validators will reduce your performance. When the validator monitor is on with 10s or more validators, I find the logs are washed out with ERROs that are not worth investigating. I suspect that users who really want to know if validators are missing attestations can do so by matching the content of the log, rather than the log level. I'm open to feedback about this, especially from anyone who is relying on the current log levels. ## Additional Info NA ## Breaking Changes Notes The validator monitor will no longer emit `WARN` and `ERRO` logs for sub-optimal attestation performance. The logs will now be emitted at `INFO` level. This change was introduced to avoid cluttering the `WARN` and `ERRO` logs with alerts that are frequently triggered by the actions of other network participants (e.g., a missed block) and require no action from the user. --- beacon_node/beacon_chain/src/validator_monitor.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index c99f85639cb..2d093ff886e 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -4,7 +4,7 @@ use crate::metrics; use parking_lot::RwLock; -use slog::{crit, debug, error, info, warn, Logger}; +use slog::{crit, debug, info, Logger}; use slot_clock::SlotClock; use state_processing::per_epoch_processing::{ errors::EpochProcessingError, EpochProcessingSummary, @@ -580,7 +580,7 @@ impl ValidatorMonitor { ); } if !attestation_miss.is_empty() { - error!( + info!( self.log, "Previous epoch attestation(s) missing"; "epoch" => prev_epoch, @@ -589,7 +589,7 @@ impl ValidatorMonitor { } if !head_miss.is_empty() { - warn!( + info!( self.log, "Previous epoch attestation(s) failed to match head"; "epoch" => prev_epoch, @@ -598,7 +598,7 @@ impl ValidatorMonitor { } if !target_miss.is_empty() { - warn!( + info!( self.log, "Previous epoch attestation(s) failed to match target"; "epoch" => prev_epoch, @@ -607,7 +607,7 @@ impl ValidatorMonitor { } if !suboptimal_inclusion.is_empty() { - warn!( + info!( self.log, "Previous epoch attestation(s) had sub-optimal inclusion delay"; "epoch" => prev_epoch, From 775d22229903c560a0468d844b1b4daadc3b2e1e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 13 Dec 2022 09:57:26 +0000 Subject: [PATCH 10/17] Enable proposer boost re-orging (#2860) ## Proposed Changes With proposer boosting implemented (#2822) we have an opportunity to re-org out late blocks. This PR adds three flags to the BN to control this behaviour: * `--disable-proposer-reorgs`: turn aggressive re-orging off (it's on by default). * `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled. * `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs, meaning re-orgs will only be attempted when the chain is finalizing optimally. For safety Lighthouse will only attempt a re-org under very specific conditions: 1. The block being proposed is 1 slot after the canonical head, and the canonical head is 1 slot after its parent. i.e. at slot `n + 1` rather than building on the block from slot `n` we build on the block from slot `n - 1`. 2. The current canonical head received less than N% of the committee vote. N should be set depending on the proposer boost fraction itself, the fraction of the network that is believed to be applying it, and the size of the largest entity that could be hoarding votes. 3. The current canonical head arrived after the attestation deadline from our perspective. This condition was only added to support suppression of forkchoiceUpdated messages, but makes intuitive sense. 4. The block is being proposed in the first 2 seconds of the slot. This gives it time to propagate and receive the proposer boost. ## Additional Info For the initial idea and background, see: https://github.com/ethereum/consensus-specs/pull/2353#issuecomment-950238004 There is also a specification for this feature here: https://github.com/ethereum/consensus-specs/pull/3034 Co-authored-by: Michael Sproul Co-authored-by: pawan --- .github/custom/clippy.toml | 1 + Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 643 ++++++++++++++---- .../src/beacon_fork_choice_store.rs | 53 +- beacon_node/beacon_chain/src/builder.rs | 26 +- .../beacon_chain/src/canonical_head.rs | 31 +- beacon_node/beacon_chain/src/chain_config.rs | 25 +- beacon_node/beacon_chain/src/errors.rs | 4 +- beacon_node/beacon_chain/src/fork_revert.rs | 3 +- beacon_node/beacon_chain/src/lib.rs | 4 +- beacon_node/beacon_chain/src/metrics.rs | 34 +- .../beacon_chain/src/proposer_prep_service.rs | 13 +- .../beacon_chain/src/snapshot_cache.rs | 7 +- .../beacon_chain/src/state_advance_timer.rs | 15 +- beacon_node/beacon_chain/src/test_utils.rs | 127 +++- beacon_node/beacon_chain/tests/merge.rs | 2 + .../tests/payload_invalidation.rs | 21 +- beacon_node/client/src/builder.rs | 6 +- .../src/engine_api/json_structures.rs | 10 +- beacon_node/execution_layer/src/lib.rs | 28 +- .../test_utils/execution_block_generator.rs | 162 ++++- .../src/test_utils/handle_rpc.rs | 8 + .../execution_layer/src/test_utils/hook.rs | 27 + .../src/test_utils/mock_execution_layer.rs | 15 + .../execution_layer/src/test_utils/mod.rs | 7 +- beacon_node/http_api/tests/common.rs | 27 +- .../http_api/tests/interactive_tests.rs | 506 +++++++++++++- beacon_node/src/cli.rs | 32 + beacon_node/src/config.rs | 26 + book/src/SUMMARY.md | 1 + book/src/builders.md | 12 +- book/src/late-block-re-orgs.md | 60 ++ common/task_executor/src/test_utils.rs | 7 + consensus/fork_choice/src/fork_choice.rs | 83 ++- .../fork_choice/src/fork_choice_store.rs | 3 +- consensus/fork_choice/tests/tests.rs | 8 +- consensus/proto_array/Cargo.toml | 1 + consensus/proto_array/src/error.rs | 9 + .../src/fork_choice_test_definition.rs | 17 +- .../execution_status.rs | 2 +- .../proto_array/src/justified_balances.rs | 62 ++ consensus/proto_array/src/lib.rs | 11 +- consensus/proto_array/src/proto_array.rs | 43 +- .../src/proto_array_fork_choice.rs | 278 +++++++- consensus/proto_array/src/ssz_container.rs | 18 +- lcli/src/main.rs | 8 + lcli/src/new_testnet.rs | 4 + lighthouse/tests/beacon_node.rs | 72 ++ lighthouse/tests/validator_client.rs | 18 + scripts/local_testnet/setup.sh | 1 + scripts/local_testnet/vars.env | 3 + validator_client/src/block_service.rs | 21 + validator_client/src/cli.rs | 14 +- validator_client/src/config.rs | 13 + validator_client/src/lib.rs | 1 + 55 files changed, 2301 insertions(+), 333 deletions(-) create mode 100644 beacon_node/execution_layer/src/test_utils/hook.rs create mode 100644 book/src/late-block-re-orgs.md create mode 100644 consensus/proto_array/src/justified_balances.rs diff --git a/.github/custom/clippy.toml b/.github/custom/clippy.toml index f50e35bcdfd..3ccbeee44a7 100644 --- a/.github/custom/clippy.toml +++ b/.github/custom/clippy.toml @@ -16,6 +16,7 @@ async-wrapper-methods = [ "task_executor::TaskExecutor::spawn_blocking_handle", "warp_utils::task::blocking_task", "warp_utils::task::blocking_json_task", + "beacon_chain::beacon_chain::BeaconChain::spawn_blocking_handle", "validator_client::http_api::blocking_signed_json_task", "execution_layer::test_utils::MockServer::new", "execution_layer::test_utils::MockServer::new_with_config", diff --git a/Cargo.lock b/Cargo.lock index 12d44d3e161..2f7cd873dd7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4965,6 +4965,7 @@ version = "0.2.0" dependencies = [ "eth2_ssz", "eth2_ssz_derive", + "safe_arith", "serde", "serde_derive", "serde_yaml", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 309f6a83e07..55d6ae29efb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -44,9 +44,8 @@ use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; -use crate::proposer_prep_service::PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::snapshot_cache::SnapshotCache; +use crate::snapshot_cache::{BlockProductionPreState, SnapshotCache}; use crate::sync_committee_verification::{ Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution, }; @@ -56,9 +55,7 @@ use crate::validator_monitor::{ HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, }; use crate::validator_pubkey_cache::ValidatorPubkeyCache; -use crate::BeaconForkChoiceStore; -use crate::BeaconSnapshot; -use crate::{metrics, BeaconChainError}; +use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead}; use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, @@ -72,7 +69,7 @@ use itertools::process_results; use itertools::Itertools; use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; -use proto_array::CountUnrealizedFull; +use proto_array::{CountUnrealizedFull, DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; @@ -103,6 +100,7 @@ use store::{ use task_executor::{ShutdownReason, TaskExecutor}; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; +use types::consts::merge::INTERVALS_PER_SLOT; use types::*; pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; @@ -127,6 +125,12 @@ pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1) /// The timeout for the eth1 finalization cache pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200); +/// The latest delay from the start of the slot at which to attempt a 1-slot re-org. +fn max_re_org_slot_delay(seconds_per_slot: u64) -> Duration { + // Allow at least half of the attestation deadline for the block to propagate. + Duration::from_secs(seconds_per_slot) / INTERVALS_PER_SLOT as u32 / 2 +} + // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); @@ -188,6 +192,21 @@ pub enum ProduceBlockVerification { NoVerification, } +/// Payload attributes for which the `beacon_chain` crate is responsible. +pub struct PrePayloadAttributes { + pub proposer_index: u64, + pub prev_randao: Hash256, +} + +/// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already +/// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub enum OverrideForkchoiceUpdate { + #[default] + Yes, + AlreadyApplied, +} + /// The accepted clock drift for nodes gossiping blocks and attestations. See: /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration @@ -2756,6 +2775,7 @@ impl BeaconChain { if !payload_verification_status.is_optimistic() && block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot { + let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE); match fork_choice.get_head(current_slot, &self.spec) { // This block became the head, add it to the early attester cache. Ok(new_head_root) if new_head_root == block_root => { @@ -2789,6 +2809,7 @@ impl BeaconChain { "error" => ?e ), } + drop(fork_choice_timer); } drop(post_exec_timer); @@ -3475,6 +3496,7 @@ impl BeaconChain { // signed. If we miss the cache or we're producing a block that conflicts with the head, // fall back to getting the head from `slot - 1`. let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); + // Atomically read some values from the head whilst avoiding holding cached head `Arc` any // longer than necessary. let (head_slot, head_block_root) = { @@ -3482,8 +3504,19 @@ impl BeaconChain { (head.head_slot(), head.head_block_root()) }; let (state, state_root_opt) = if head_slot < slot { + // Attempt an aggressive re-org if configured and the conditions are right. + if let Some(re_org_state) = self.get_state_for_re_org(slot, head_slot, head_block_root) + { + info!( + self.log, + "Proposing block to re-org current head"; + "slot" => slot, + "head_to_reorg" => %head_block_root, + ); + (re_org_state.pre_state, re_org_state.state_root) + } // Normal case: proposing a block atop the current head. Use the snapshot cache. - if let Some(pre_state) = self + else if let Some(pre_state) = self .snapshot_cache .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .and_then(|snapshot_cache| { @@ -3523,6 +3556,400 @@ impl BeaconChain { Ok((state, state_root_opt)) } + /// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable. + /// + /// This function will return `None` if proposer re-orgs are disabled. + fn get_state_for_re_org( + &self, + slot: Slot, + head_slot: Slot, + canonical_head: Hash256, + ) -> Option> { + let re_org_threshold = self.config.re_org_threshold?; + + if self.spec.proposer_score_boost.is_none() { + warn!( + self.log, + "Ignoring proposer re-org configuration"; + "reason" => "this network does not have proposer boost enabled" + ); + return None; + } + + let slot_delay = self + .slot_clock + .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .or_else(|| { + warn!( + self.log, + "Not attempting re-org"; + "error" => "unable to read slot clock" + ); + None + })?; + + // Attempt a proposer re-org if: + // + // 1. It seems we have time to propagate and still receive the proposer boost. + // 2. The current head block was seen late. + // 3. The `get_proposer_head` conditions from fork choice pass. + let proposing_on_time = slot_delay < max_re_org_slot_delay(self.spec.seconds_per_slot); + if !proposing_on_time { + debug!( + self.log, + "Not attempting re-org"; + "reason" => "not proposing on time", + ); + return None; + } + + let head_late = self.block_observed_after_attestation_deadline(canonical_head, head_slot); + if !head_late { + debug!( + self.log, + "Not attempting re-org"; + "reason" => "head not late" + ); + return None; + } + + // Is the current head weak and appropriate for re-orging? + let proposer_head_timer = + metrics::start_timer(&metrics::BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES); + let proposer_head = self + .canonical_head + .fork_choice_read_lock() + .get_proposer_head( + slot, + canonical_head, + re_org_threshold, + self.config.re_org_max_epochs_since_finalization, + ) + .map_err(|e| match e { + ProposerHeadError::DoNotReOrg(reason) => { + debug!( + self.log, + "Not attempting re-org"; + "reason" => %reason, + ); + } + ProposerHeadError::Error(e) => { + warn!( + self.log, + "Not attempting re-org"; + "error" => ?e, + ); + } + }) + .ok()?; + drop(proposer_head_timer); + let re_org_parent_block = proposer_head.parent_node.root; + + // Only attempt a re-org if we hit the snapshot cache. + let pre_state = self + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .and_then(|snapshot_cache| { + snapshot_cache.get_state_for_block_production(re_org_parent_block) + }) + .or_else(|| { + debug!( + self.log, + "Not attempting re-org"; + "reason" => "missed snapshot cache", + "parent_block" => ?re_org_parent_block, + ); + None + })?; + + info!( + self.log, + "Attempting re-org due to weak head"; + "weak_head" => ?canonical_head, + "parent" => ?re_org_parent_block, + "head_weight" => proposer_head.head_node.weight, + "threshold_weight" => proposer_head.re_org_weight_threshold + ); + + Some(pre_state) + } + + /// Get the proposer index and `prev_randao` value for a proposal at slot `proposal_slot`. + /// + /// The `proposer_head` may be the head block of `cached_head` or its parent. An error will + /// be returned for any other value. + pub fn get_pre_payload_attributes( + &self, + proposal_slot: Slot, + proposer_head: Hash256, + cached_head: &CachedHead, + ) -> Result, Error> { + let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); + + let head_block_root = cached_head.head_block_root(); + let parent_block_root = cached_head.parent_block_root(); + + // The proposer head must be equal to the canonical head or its parent. + if proposer_head != head_block_root && proposer_head != parent_block_root { + warn!( + self.log, + "Unable to compute payload attributes"; + "block_root" => ?proposer_head, + "head_block_root" => ?head_block_root, + ); + return Ok(None); + } + + // Compute the proposer index. + let head_epoch = cached_head.head_slot().epoch(T::EthSpec::slots_per_epoch()); + let shuffling_decision_root = if head_epoch == proposal_epoch { + cached_head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(proposer_head)? + } else { + proposer_head + }; + let cached_proposer = self + .beacon_proposer_cache + .lock() + .get_slot::(shuffling_decision_root, proposal_slot); + let proposer_index = if let Some(proposer) = cached_proposer { + proposer.index as u64 + } else { + if head_epoch + 2 < proposal_epoch { + warn!( + self.log, + "Skipping proposer preparation"; + "msg" => "this is a non-critical issue that can happen on unhealthy nodes or \ + networks.", + "proposal_epoch" => proposal_epoch, + "head_epoch" => head_epoch, + ); + + // Don't skip the head forward more than two epochs. This avoids burdening an + // unhealthy node. + // + // Although this node might miss out on preparing for a proposal, they should still + // be able to propose. This will prioritise beacon chain health over efficient + // packing of execution blocks. + return Ok(None); + } + + let (proposers, decision_root, _, fork) = + compute_proposer_duties_from_head(proposal_epoch, self)?; + + let proposer_offset = (proposal_slot % T::EthSpec::slots_per_epoch()).as_usize(); + let proposer = *proposers + .get(proposer_offset) + .ok_or(BeaconChainError::NoProposerForSlot(proposal_slot))?; + + self.beacon_proposer_cache.lock().insert( + proposal_epoch, + decision_root, + proposers, + fork, + )?; + + // It's possible that the head changes whilst computing these duties. If so, abandon + // this routine since the change of head would have also spawned another instance of + // this routine. + // + // Exit now, after updating the cache. + if decision_root != shuffling_decision_root { + warn!( + self.log, + "Head changed during proposer preparation"; + ); + return Ok(None); + } + + proposer as u64 + }; + + // Get the `prev_randao` value. + let prev_randao = if proposer_head == parent_block_root { + cached_head.parent_random() + } else { + cached_head.head_random() + }?; + + Ok(Some(PrePayloadAttributes { + proposer_index, + prev_randao, + })) + } + + /// Determine whether a fork choice update to the execution layer should be overridden. + /// + /// This is *only* necessary when proposer re-orgs are enabled, because we have to prevent the + /// execution layer from enshrining the block we want to re-org as the head. + /// + /// This function uses heuristics that align quite closely but not exactly with the re-org + /// conditions set out in `get_state_for_re_org` and `get_proposer_head`. The differences are + /// documented below. + fn overridden_forkchoice_update_params( + &self, + canonical_forkchoice_params: ForkchoiceUpdateParameters, + ) -> Result { + self.overridden_forkchoice_update_params_or_failure_reason(&canonical_forkchoice_params) + .or_else(|e| match e { + ProposerHeadError::DoNotReOrg(reason) => { + trace!( + self.log, + "Not suppressing fork choice update"; + "reason" => %reason, + ); + Ok(canonical_forkchoice_params) + } + ProposerHeadError::Error(e) => Err(e), + }) + } + + fn overridden_forkchoice_update_params_or_failure_reason( + &self, + canonical_forkchoice_params: &ForkchoiceUpdateParameters, + ) -> Result> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_OVERRIDE_FCU_TIMES); + + // Never override if proposer re-orgs are disabled. + let re_org_threshold = self + .config + .re_org_threshold + .ok_or(DoNotReOrg::ReOrgsDisabled)?; + + let head_block_root = canonical_forkchoice_params.head_root; + + // Perform initial checks and load the relevant info from fork choice. + let info = self + .canonical_head + .fork_choice_read_lock() + .get_preliminary_proposer_head( + head_block_root, + re_org_threshold, + self.config.re_org_max_epochs_since_finalization, + ) + .map_err(|e| e.map_inner_error(Error::ProposerHeadForkChoiceError))?; + + // The slot of our potential re-org block is always 1 greater than the head block because we + // only attempt single-slot re-orgs. + let head_slot = info.head_node.slot; + let re_org_block_slot = head_slot + 1; + let fork_choice_slot = info.current_slot; + + // If a re-orging proposal isn't made by the `max_re_org_slot_delay` then we give up + // and allow the fork choice update for the canonical head through so that we may attest + // correctly. + let current_slot_ok = if head_slot == fork_choice_slot { + true + } else if re_org_block_slot == fork_choice_slot { + self.slot_clock + .start_of(re_org_block_slot) + .and_then(|slot_start| { + let now = self.slot_clock.now_duration()?; + let slot_delay = now.saturating_sub(slot_start); + Some(slot_delay <= max_re_org_slot_delay(self.spec.seconds_per_slot)) + }) + .unwrap_or(false) + } else { + false + }; + if !current_slot_ok { + return Err(DoNotReOrg::HeadDistance.into()); + } + + // Only attempt a re-org if we have a proposer registered for the re-org slot. + let proposing_at_re_org_slot = { + // The proposer shuffling has the same decision root as the next epoch attestation + // shuffling. We know our re-org block is not on the epoch boundary, so it has the + // same proposer shuffling as the head (but not necessarily the parent which may lie + // in the previous epoch). + let shuffling_decision_root = info + .head_node + .next_epoch_shuffling_id + .shuffling_decision_block; + let proposer_index = self + .beacon_proposer_cache + .lock() + .get_slot::(shuffling_decision_root, re_org_block_slot) + .ok_or_else(|| { + debug!( + self.log, + "Fork choice override proposer shuffling miss"; + "slot" => re_org_block_slot, + "decision_root" => ?shuffling_decision_root, + ); + DoNotReOrg::NotProposing + })? + .index as u64; + + self.execution_layer + .as_ref() + .ok_or(ProposerHeadError::Error(Error::ExecutionLayerMissing))? + .has_proposer_preparation_data_blocking(proposer_index) + }; + if !proposing_at_re_org_slot { + return Err(DoNotReOrg::NotProposing.into()); + } + + // If the current slot is already equal to the proposal slot (or we are in the tail end of + // the prior slot), then check the actual weight of the head against the re-org threshold. + let head_weak = if fork_choice_slot == re_org_block_slot { + info.head_node.weight < info.re_org_weight_threshold + } else { + true + }; + if !head_weak { + return Err(DoNotReOrg::HeadNotWeak { + head_weight: info.head_node.weight, + re_org_weight_threshold: info.re_org_weight_threshold, + } + .into()); + } + + // Check that the head block arrived late and is vulnerable to a re-org. This check is only + // a heuristic compared to the proper weight check in `get_state_for_re_org`, the reason + // being that we may have only *just* received the block and not yet processed any + // attestations for it. We also can't dequeue attestations for the block during the + // current slot, which would be necessary for determining its weight. + let head_block_late = + self.block_observed_after_attestation_deadline(head_block_root, head_slot); + if !head_block_late { + return Err(DoNotReOrg::HeadNotLate.into()); + } + + let parent_head_hash = info.parent_node.execution_status.block_hash(); + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: info.parent_node.root, + head_hash: parent_head_hash, + justified_hash: canonical_forkchoice_params.justified_hash, + finalized_hash: canonical_forkchoice_params.finalized_hash, + }; + + debug!( + self.log, + "Fork choice update overridden"; + "canonical_head" => ?head_block_root, + "override" => ?info.parent_node.root, + "slot" => fork_choice_slot, + ); + + Ok(forkchoice_update_params) + } + + /// Check if the block with `block_root` was observed after the attestation deadline of `slot`. + fn block_observed_after_attestation_deadline(&self, block_root: Hash256, slot: Slot) -> bool { + let block_delays = self.block_times_cache.read().get_block_delays( + block_root, + self.slot_clock + .start_of(slot) + .unwrap_or_else(|| Duration::from_secs(0)), + ); + block_delays.observed.map_or(false, |delay| { + delay > self.slot_clock.unagg_attestation_production_delay() + }) + } + /// Produce a block for some `slot` upon the given `state`. /// /// Typically the `self.produce_block()` function should be used, instead of calling this @@ -4085,17 +4512,13 @@ impl BeaconChain { /// The `PayloadAttributes` are used by the EL to give it a look-ahead for preparing an optimal /// set of transactions for a new `ExecutionPayload`. /// - /// This function will result in a call to `forkchoiceUpdated` on the EL if: - /// - /// 1. We're in the tail-end of the slot (as defined by PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR) - /// 2. The head block is one slot (or less) behind the prepare slot (e.g., we're preparing for - /// the next slot and the block at the current slot is already known). + /// This function will result in a call to `forkchoiceUpdated` on the EL if we're in the + /// tail-end of the slot (as defined by `self.config.prepare_payload_lookahead`). pub async fn prepare_beacon_proposer( self: &Arc, current_slot: Slot, ) -> Result<(), Error> { let prepare_slot = current_slot + 1; - let prepare_epoch = prepare_slot.epoch(T::EthSpec::slots_per_epoch()); // There's no need to run the proposer preparation routine before the bellatrix fork. if self.slot_is_prior_to_bellatrix(prepare_slot) { @@ -4113,158 +4536,99 @@ impl BeaconChain { return Ok(()); } - // Atomically read some values from the canonical head, whilst avoiding holding the cached - // head `Arc` any longer than necessary. + // Load the cached head and its forkchoice update parameters. // // Use a blocking task since blocking the core executor on the canonical head read lock can // block the core tokio executor. let chain = self.clone(); - let (head_slot, head_root, head_decision_root, head_random, forkchoice_update_params) = - self.spawn_blocking_handle( + let maybe_prep_data = self + .spawn_blocking_handle( move || { let cached_head = chain.canonical_head.cached_head(); - let head_block_root = cached_head.head_block_root(); - let decision_root = cached_head - .snapshot - .beacon_state - .proposer_shuffling_decision_root(head_block_root)?; - Ok::<_, Error>(( - cached_head.head_slot(), - head_block_root, - decision_root, - cached_head.head_random()?, - cached_head.forkchoice_update_parameters(), - )) + + // Don't bother with proposer prep if the head is more than + // `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot. + // + // This prevents the routine from running during sync. + let head_slot = cached_head.head_slot(); + if head_slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS + < current_slot + { + debug!( + chain.log, + "Head too old for proposer prep"; + "head_slot" => head_slot, + "current_slot" => current_slot, + ); + return Ok(None); + } + + let canonical_fcu_params = cached_head.forkchoice_update_parameters(); + let fcu_params = + chain.overridden_forkchoice_update_params(canonical_fcu_params)?; + let pre_payload_attributes = chain.get_pre_payload_attributes( + prepare_slot, + fcu_params.head_root, + &cached_head, + )?; + Ok::<_, Error>(Some((fcu_params, pre_payload_attributes))) }, - "prepare_beacon_proposer_fork_choice_read", + "prepare_beacon_proposer_head_read", ) .await??; - let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); - // Don't bother with proposer prep if the head is more than - // `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot. - // - // This prevents the routine from running during sync. - if head_slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS - < current_slot - { - debug!( - self.log, - "Head too old for proposer prep"; - "head_slot" => head_slot, - "current_slot" => current_slot, - ); - return Ok(()); - } - - // Ensure that the shuffling decision root is correct relative to the epoch we wish to - // query. - let shuffling_decision_root = if head_epoch == prepare_epoch { - head_decision_root - } else { - head_root - }; - - // Read the proposer from the proposer cache. - let cached_proposer = self - .beacon_proposer_cache - .lock() - .get_slot::(shuffling_decision_root, prepare_slot); - let proposer = if let Some(proposer) = cached_proposer { - proposer.index - } else { - if head_epoch + 2 < prepare_epoch { - warn!( - self.log, - "Skipping proposer preparation"; - "msg" => "this is a non-critical issue that can happen on unhealthy nodes or \ - networks.", - "prepare_epoch" => prepare_epoch, - "head_epoch" => head_epoch, - ); - - // Don't skip the head forward more than two epochs. This avoids burdening an - // unhealthy node. - // - // Although this node might miss out on preparing for a proposal, they should still - // be able to propose. This will prioritise beacon chain health over efficient - // packing of execution blocks. - return Ok(()); - } - - let (proposers, decision_root, _, fork) = - compute_proposer_duties_from_head(prepare_epoch, self)?; - - let proposer_index = prepare_slot.as_usize() % (T::EthSpec::slots_per_epoch() as usize); - let proposer = *proposers - .get(proposer_index) - .ok_or(BeaconChainError::NoProposerForSlot(prepare_slot))?; - - self.beacon_proposer_cache.lock().insert( - prepare_epoch, - decision_root, - proposers, - fork, - )?; - - // It's possible that the head changes whilst computing these duties. If so, abandon - // this routine since the change of head would have also spawned another instance of - // this routine. - // - // Exit now, after updating the cache. - if decision_root != shuffling_decision_root { - warn!( - self.log, - "Head changed during proposer preparation"; - ); + let (forkchoice_update_params, pre_payload_attributes) = + if let Some((fcu, Some(pre_payload))) = maybe_prep_data { + (fcu, pre_payload) + } else { + // Appropriate log messages have already been logged above and in + // `get_pre_payload_attributes`. return Ok(()); - } - - proposer - }; + }; // If the execution layer doesn't have any proposer data for this validator then we assume // it's not connected to this BN and no action is required. + let proposer = pre_payload_attributes.proposer_index; if !execution_layer - .has_proposer_preparation_data(proposer as u64) + .has_proposer_preparation_data(proposer) .await { return Ok(()); } + let head_root = forkchoice_update_params.head_root; let payload_attributes = PayloadAttributes { timestamp: self .slot_clock .start_of(prepare_slot) .ok_or(Error::InvalidSlot(prepare_slot))? .as_secs(), - prev_randao: head_random, - suggested_fee_recipient: execution_layer - .get_suggested_fee_recipient(proposer as u64) - .await, + prev_randao: pre_payload_attributes.prev_randao, + suggested_fee_recipient: execution_layer.get_suggested_fee_recipient(proposer).await, }; debug!( self.log, "Preparing beacon proposer"; "payload_attributes" => ?payload_attributes, - "head_root" => ?head_root, "prepare_slot" => prepare_slot, "validator" => proposer, + "parent_root" => ?head_root, ); let already_known = execution_layer - .insert_proposer(prepare_slot, head_root, proposer as u64, payload_attributes) + .insert_proposer(prepare_slot, head_root, proposer, payload_attributes) .await; + // Only push a log to the user if this is the first time we've seen this proposer for this // slot. if !already_known { info!( self.log, "Prepared beacon proposer"; - "already_known" => already_known, "prepare_slot" => prepare_slot, "validator" => proposer, + "parent_root" => ?head_root, ); } @@ -4286,27 +4650,22 @@ impl BeaconChain { return Ok(()); }; - // If either of the following are true, send a fork-choice update message to the - // EL: - // - // 1. We're in the tail-end of the slot (as defined by - // PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR) - // 2. The head block is one slot (or less) behind the prepare slot (e.g., we're - // preparing for the next slot and the block at the current slot is already - // known). - if till_prepare_slot - <= self.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR - || head_slot + 1 >= prepare_slot - { + // If we are close enough to the proposal slot, send an fcU, which will have payload + // attributes filled in by the execution layer cache we just primed. + if till_prepare_slot <= self.config.prepare_payload_lookahead { debug!( self.log, - "Pushing update to prepare proposer"; + "Sending forkchoiceUpdate for proposer prep"; "till_prepare_slot" => ?till_prepare_slot, "prepare_slot" => prepare_slot ); - self.update_execution_engine_forkchoice(current_slot, forkchoice_update_params) - .await?; + self.update_execution_engine_forkchoice( + current_slot, + forkchoice_update_params, + OverrideForkchoiceUpdate::AlreadyApplied, + ) + .await?; } Ok(()) @@ -4315,7 +4674,8 @@ impl BeaconChain { pub async fn update_execution_engine_forkchoice( self: &Arc, current_slot: Slot, - params: ForkchoiceUpdateParameters, + input_params: ForkchoiceUpdateParameters, + override_forkchoice_update: OverrideForkchoiceUpdate, ) -> Result<(), Error> { let next_slot = current_slot + 1; @@ -4337,6 +4697,19 @@ impl BeaconChain { .as_ref() .ok_or(Error::ExecutionLayerMissing)?; + // Determine whether to override the forkchoiceUpdated message if we want to re-org + // the current head at the next slot. + let params = if override_forkchoice_update == OverrideForkchoiceUpdate::Yes { + let chain = self.clone(); + self.spawn_blocking_handle( + move || chain.overridden_forkchoice_update_params(input_params), + "update_execution_engine_forkchoice_override", + ) + .await?? + } else { + input_params + }; + // Take the global lock for updating the execution engine fork choice. // // Whilst holding this lock we must: diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 5cba5f3c3bb..0b789b8b615 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -7,6 +7,8 @@ use crate::{metrics, BeaconSnapshot}; use derivative::Derivative; use fork_choice::ForkChoiceStore; +use proto_array::JustifiedBalances; +use safe_arith::ArithError; use ssz_derive::{Decode, Encode}; use std::collections::BTreeSet; use std::marker::PhantomData; @@ -31,6 +33,7 @@ pub enum Error { MissingState(Hash256), InvalidPersistedBytes(ssz::DecodeError), BeaconStateError(BeaconStateError), + Arith(ArithError), } impl From for Error { @@ -39,27 +42,15 @@ impl From for Error { } } +impl From for Error { + fn from(e: ArithError) -> Self { + Error::Arith(e) + } +} + /// The number of validator balance sets that are cached within `BalancesCache`. const MAX_BALANCE_CACHE_SIZE: usize = 4; -/// Returns the effective balances for every validator in the given `state`. -/// -/// Any validator who is not active in the epoch of the given `state` is assigned a balance of -/// zero. -pub fn get_effective_balances(state: &BeaconState) -> Vec { - state - .validators() - .iter() - .map(|validator| { - if validator.is_active_at(state.current_epoch()) { - validator.effective_balance - } else { - 0 - } - }) - .collect() -} - #[superstruct( variants(V8), variant_attributes(derive(PartialEq, Clone, Debug, Encode, Decode)), @@ -113,7 +104,7 @@ impl BalancesCache { let item = CacheItem { block_root: epoch_boundary_root, epoch, - balances: get_effective_balances(state), + balances: JustifiedBalances::from_justified_state(state)?.effective_balances, }; if self.items.len() == MAX_BALANCE_CACHE_SIZE { @@ -152,7 +143,7 @@ pub struct BeaconForkChoiceStore, Cold: ItemStore< time: Slot, finalized_checkpoint: Checkpoint, justified_checkpoint: Checkpoint, - justified_balances: Vec, + justified_balances: JustifiedBalances, best_justified_checkpoint: Checkpoint, unrealized_justified_checkpoint: Checkpoint, unrealized_finalized_checkpoint: Checkpoint, @@ -181,7 +172,7 @@ where pub fn get_forkchoice_store( store: Arc>, anchor: &BeaconSnapshot, - ) -> Self { + ) -> Result { let anchor_state = &anchor.beacon_state; let mut anchor_block_header = anchor_state.latest_block_header().clone(); if anchor_block_header.state_root == Hash256::zero() { @@ -194,13 +185,14 @@ where root: anchor_root, }; let finalized_checkpoint = justified_checkpoint; + let justified_balances = JustifiedBalances::from_justified_state(anchor_state)?; - Self { + Ok(Self { store, balances_cache: <_>::default(), time: anchor_state.slot(), justified_checkpoint, - justified_balances: anchor_state.balances().clone().into(), + justified_balances, finalized_checkpoint, best_justified_checkpoint: justified_checkpoint, unrealized_justified_checkpoint: justified_checkpoint, @@ -208,7 +200,7 @@ where proposer_boost_root: Hash256::zero(), equivocating_indices: BTreeSet::new(), _phantom: PhantomData, - } + }) } /// Save the current state of `Self` to a `PersistedForkChoiceStore` which can be stored to the @@ -219,7 +211,7 @@ where time: self.time, finalized_checkpoint: self.finalized_checkpoint, justified_checkpoint: self.justified_checkpoint, - justified_balances: self.justified_balances.clone(), + justified_balances: self.justified_balances.effective_balances.clone(), best_justified_checkpoint: self.best_justified_checkpoint, unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, @@ -233,13 +225,15 @@ where persisted: PersistedForkChoiceStore, store: Arc>, ) -> Result { + let justified_balances = + JustifiedBalances::from_effective_balances(persisted.justified_balances)?; Ok(Self { store, balances_cache: persisted.balances_cache, time: persisted.time, finalized_checkpoint: persisted.finalized_checkpoint, justified_checkpoint: persisted.justified_checkpoint, - justified_balances: persisted.justified_balances, + justified_balances, best_justified_checkpoint: persisted.best_justified_checkpoint, unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, @@ -279,7 +273,7 @@ where &self.justified_checkpoint } - fn justified_balances(&self) -> &[u64] { + fn justified_balances(&self) -> &JustifiedBalances { &self.justified_balances } @@ -314,8 +308,9 @@ where self.justified_checkpoint.root, self.justified_checkpoint.epoch, ) { + // NOTE: could avoid this re-calculation by introducing a `PersistedCacheItem`. metrics::inc_counter(&metrics::BALANCES_CACHE_HITS); - self.justified_balances = balances; + self.justified_balances = JustifiedBalances::from_effective_balances(balances)?; } else { metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES); let justified_block = self @@ -332,7 +327,7 @@ where .map_err(Error::FailedToReadState)? .ok_or_else(|| Error::MissingState(justified_block.state_root()))?; - self.justified_balances = get_effective_balances(&state); + self.justified_balances = JustifiedBalances::from_justified_state(&state)?; } Ok(()) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index f5bd85dec28..eff50701d7a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -22,6 +22,7 @@ use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; +use proto_array::ReOrgThreshold; use slasher::Slasher; use slog::{crit, error, info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; @@ -31,8 +32,8 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use types::{ - BeaconBlock, BeaconState, ChainSpec, Checkpoint, EthSpec, Graffiti, Hash256, PublicKeyBytes, - Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256, + PublicKeyBytes, Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -159,6 +160,21 @@ where self } + /// Sets the proposer re-org threshold. + pub fn proposer_re_org_threshold(mut self, threshold: Option) -> Self { + self.chain_config.re_org_threshold = threshold; + self + } + + /// Sets the proposer re-org max epochs since finalization. + pub fn proposer_re_org_max_epochs_since_finalization( + mut self, + epochs_since_finalization: Epoch, + ) -> Self { + self.chain_config.re_org_max_epochs_since_finalization = epochs_since_finalization; + self + } + /// Sets the store (database). /// /// Should generally be called early in the build chain. @@ -358,7 +374,8 @@ where let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?; self = updated_builder; - let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis); + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis) + .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; let current_slot = None; let fork_choice = ForkChoice::from_anchor( @@ -476,7 +493,8 @@ where beacon_state: weak_subj_state, }; - let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot); + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot) + .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; let current_slot = Some(snapshot.beacon_block.slot()); let fork_choice = ForkChoice::from_anchor( diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index c9bd6db0e67..dd64e02edf7 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -34,7 +34,8 @@ use crate::persisted_fork_choice::PersistedForkChoice; use crate::{ beacon_chain::{ - BeaconForkChoice, BeaconStore, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY, + BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, + BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY, }, block_times_cache::BlockTimesCache, events::ServerSentEventHandler, @@ -114,6 +115,11 @@ impl CachedHead { self.snapshot.beacon_block_root } + /// Returns the root of the parent of the head block. + pub fn parent_block_root(&self) -> Hash256 { + self.snapshot.beacon_block.parent_root() + } + /// Returns root of the `BeaconState` at the head of the beacon chain. /// /// ## Note @@ -146,6 +152,21 @@ impl CachedHead { Ok(root) } + /// Returns the randao mix for the parent of the block at the head of the chain. + /// + /// This is useful for re-orging the current head. The parent's RANDAO value is read from + /// the head's execution payload because it is unavailable in the beacon state's RANDAO mixes + /// array after being overwritten by the head block's RANDAO mix. + /// + /// This will error if the head block is not execution-enabled (post Bellatrix). + pub fn parent_random(&self) -> Result { + self.snapshot + .beacon_block + .message() + .execution_payload() + .map(|payload| payload.prev_randao()) + } + /// Returns the active validator count for the current epoch of the head state. /// /// Should only return `None` if the caches have not been built on the head state (this should @@ -765,6 +786,7 @@ impl BeaconChain { new_cached_head: &CachedHead, new_head_proto_block: ProtoBlock, ) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_AFTER_NEW_HEAD_TIMES); let old_snapshot = &old_cached_head.snapshot; let new_snapshot = &new_cached_head.snapshot; let new_head_is_optimistic = new_head_proto_block @@ -902,6 +924,7 @@ impl BeaconChain { new_view: ForkChoiceView, finalized_proto_block: ProtoBlock, ) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_AFTER_FINALIZATION_TIMES); let new_snapshot = &new_cached_head.snapshot; let finalized_block_is_optimistic = finalized_proto_block .execution_status @@ -1124,7 +1147,11 @@ fn spawn_execution_layer_updates( } if let Err(e) = chain - .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .update_execution_engine_forkchoice( + current_slot, + forkchoice_update_params, + OverrideForkchoiceUpdate::Yes, + ) .await { crit!( diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 286cc17a963..c4c6966732d 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,9 +1,18 @@ -pub use proto_array::CountUnrealizedFull; +pub use proto_array::{CountUnrealizedFull, ReOrgThreshold}; use serde_derive::{Deserialize, Serialize}; -use types::Checkpoint; +use std::time::Duration; +use types::{Checkpoint, Epoch}; +pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); +pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250; +/// Default fraction of a slot lookahead for payload preparation (12/3 = 4 seconds on mainnet). +pub const DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR: u32 = 3; + +/// Fraction of a slot lookahead for fork choice in the state advance timer (500ms on mainnet). +pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24; + #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] pub struct ChainConfig { /// Maximum number of slots to skip when importing a consensus message (e.g., block, @@ -21,6 +30,10 @@ pub struct ChainConfig { pub enable_lock_timeouts: bool, /// The max size of a message that can be sent over the network. pub max_network_size: usize, + /// Maximum percentage of committee weight at which to attempt re-orging the canonical head. + pub re_org_threshold: Option, + /// Maximum number of epochs since finalization for attempting a proposer re-org. + pub re_org_max_epochs_since_finalization: Epoch, /// Number of milliseconds to wait for fork choice before proposing a block. /// /// If set to 0 then block proposal will not wait for fork choice at all. @@ -47,6 +60,11 @@ pub struct ChainConfig { pub count_unrealized_full: CountUnrealizedFull, /// Optionally set timeout for calls to checkpoint sync endpoint. pub checkpoint_sync_url_timeout: u64, + /// The offset before the start of a proposal slot at which payload attributes should be sent. + /// + /// Low values are useful for execution engines which don't improve their payload after the + /// first call, and high values are useful for ensuring the EL is given ample notice. + pub prepare_payload_lookahead: Duration, } impl Default for ChainConfig { @@ -57,6 +75,8 @@ impl Default for ChainConfig { reconstruct_historic_states: false, enable_lock_timeouts: true, max_network_size: 10 * 1_048_576, // 10M + re_org_threshold: Some(DEFAULT_RE_ORG_THRESHOLD), + re_org_max_epochs_since_finalization: DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, // Builder fallback configs that are set in `clap` will override these. builder_fallback_skips: 3, @@ -68,6 +88,7 @@ impl Default for ChainConfig { paranoid_block_proposal: false, count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, + prepare_payload_lookahead: Duration::from_secs(4), } } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 704cba489d2..17f58b223f4 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -204,6 +204,7 @@ pub enum BeaconChainError { MissingPersistedForkChoice, CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), + ProposerHeadForkChoiceError(fork_choice::Error), } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -234,6 +235,7 @@ pub enum BlockProductionError { UnableToProduceAtSlot(Slot), SlotProcessingError(SlotProcessingError), BlockProcessingError(BlockProcessingError), + ForkChoiceError(ForkChoiceError), Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), StateAdvanceError(StateAdvanceError), @@ -252,7 +254,6 @@ pub enum BlockProductionError { FailedToReadFinalizedBlock(store::Error), MissingFinalizedBlock(Hash256), BlockTooLarge(usize), - ForkChoiceError(BeaconChainError), ShuttingDown, MissingSyncAggregate, MissingExecutionPayload, @@ -265,3 +266,4 @@ easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); easy_from_to!(Eth1ChainError, BlockProductionError); easy_from_to!(StateAdvanceError, BlockProductionError); +easy_from_to!(ForkChoiceError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 3d48dfd8f60..6d5b5ddc4ae 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -147,7 +147,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It beacon_state: finalized_state, }; - let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), &finalized_snapshot); + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), &finalized_snapshot) + .map_err(|e| format!("Unable to reset fork choice store for revert: {e:?}"))?; let mut fork_choice = ForkChoice::from_anchor( fc_store, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index a55532ac12f..ae1c5e4b766 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -48,8 +48,8 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - CountUnrealized, ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + CountUnrealized, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, + StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index c681570b110..b52c4258fe7 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -77,6 +77,11 @@ lazy_static! { "beacon_block_processing_attestation_observation_seconds", "Time spent hashing and remembering all the attestations in the block" ); + pub static ref BLOCK_PROCESSING_FORK_CHOICE: Result = try_create_histogram_with_buckets( + "beacon_block_processing_fork_choice_seconds", + "Time spent running fork choice's `get_head` during block import", + exponential_buckets(1e-3, 2.0, 8) + ); pub static ref BLOCK_SYNC_AGGREGATE_SET_BITS: Result = try_create_int_gauge( "block_sync_aggregate_set_bits", "The number of true bits in the last sync aggregate in a block" @@ -99,6 +104,11 @@ lazy_static! { "beacon_block_production_fork_choice_seconds", "Time taken to run fork choice before block production" ); + pub static ref BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES: Result = try_create_histogram_with_buckets( + "beacon_block_production_get_proposer_head_times", + "Time taken for fork choice to compute the proposer head before block production", + exponential_buckets(1e-3, 2.0, 8) + ); pub static ref BLOCK_PRODUCTION_STATE_LOAD_TIMES: Result = try_create_histogram( "beacon_block_production_state_load_seconds", "Time taken to load the base state for block production" @@ -322,10 +332,26 @@ lazy_static! { "beacon_reorgs_total", "Count of occasions fork choice has switched to a different chain" ); - pub static ref FORK_CHOICE_TIMES: Result = - try_create_histogram("beacon_fork_choice_seconds", "Full runtime of fork choice"); - pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result = - try_create_histogram("beacon_fork_choice_find_head_seconds", "Full runtime of fork choice find_head function"); + pub static ref FORK_CHOICE_TIMES: Result = try_create_histogram_with_buckets( + "beacon_fork_choice_seconds", + "Full runtime of fork choice", + linear_buckets(10e-3, 20e-3, 10) + ); + pub static ref FORK_CHOICE_OVERRIDE_FCU_TIMES: Result = try_create_histogram_with_buckets( + "beacon_fork_choice_override_fcu_seconds", + "Time taken to compute the optional forkchoiceUpdated override", + exponential_buckets(1e-3, 2.0, 8) + ); + pub static ref FORK_CHOICE_AFTER_NEW_HEAD_TIMES: Result = try_create_histogram_with_buckets( + "beacon_fork_choice_after_new_head_seconds", + "Time taken to run `after_new_head`", + exponential_buckets(1e-3, 2.0, 10) + ); + pub static ref FORK_CHOICE_AFTER_FINALIZATION_TIMES: Result = try_create_histogram_with_buckets( + "beacon_fork_choice_after_finalization_seconds", + "Time taken to run `after_finalization`", + exponential_buckets(1e-3, 2.0, 10) + ); pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result = try_create_histogram( "beacon_fork_choice_process_block_seconds", "Time taken to add a block and all attestations to fork choice" diff --git a/beacon_node/beacon_chain/src/proposer_prep_service.rs b/beacon_node/beacon_chain/src/proposer_prep_service.rs index 9cd177b3409..140a9659fce 100644 --- a/beacon_node/beacon_chain/src/proposer_prep_service.rs +++ b/beacon_node/beacon_chain/src/proposer_prep_service.rs @@ -5,13 +5,9 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::time::sleep; -/// At 12s slot times, the means that the payload preparation routine will run 4s before the start -/// of each slot (`12 / 3 = 4`). -pub const PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR: u32 = 3; - /// Spawns a routine which ensures the EL is provided advance notice of any block producers. /// -/// This routine will run once per slot, at `slot_duration / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR` +/// This routine will run once per slot, at `chain.prepare_payload_lookahead()` /// before the start of each slot. /// /// The service will not be started if there is no `execution_layer` on the `chain`. @@ -38,8 +34,8 @@ async fn proposer_prep_service( loop { match chain.slot_clock.duration_to_next_slot() { Some(duration) => { - let additional_delay = slot_duration - - chain.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR; + let additional_delay = + slot_duration.saturating_sub(chain.config.prepare_payload_lookahead); sleep(duration + additional_delay).await; debug!( @@ -65,14 +61,11 @@ async fn proposer_prep_service( }, "proposer_prep_update", ); - - continue; } None => { error!(chain.log, "Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; - continue; } }; } diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 40b73451cb0..d2846c08569 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -13,7 +13,10 @@ pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4; /// The minimum block delay to clone the state in the cache instead of removing it. /// This helps keep block processing fast during re-orgs from late blocks. -const MINIMUM_BLOCK_DELAY_FOR_CLONE: Duration = Duration::from_secs(6); +fn minimum_block_delay_for_clone(seconds_per_slot: u64) -> Duration { + // If the block arrived at the attestation deadline or later, it might get re-orged. + Duration::from_secs(seconds_per_slot) / 3 +} /// This snapshot is to be used for verifying a child of `self.beacon_block`. #[derive(Debug)] @@ -256,7 +259,7 @@ impl SnapshotCache { if let Some(cache) = self.snapshots.get(i) { // Avoid cloning the block during sync (when the `block_delay` is `None`). if let Some(delay) = block_delay { - if delay >= MINIMUM_BLOCK_DELAY_FOR_CLONE + if delay >= minimum_block_delay_for_clone(spec.seconds_per_slot) && delay <= Duration::from_secs(spec.seconds_per_slot) * 4 || block_slot > cache.beacon_block.slot() + 1 { diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 72fc973e546..f73223fa540 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -16,6 +16,7 @@ use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::{ beacon_chain::{ATTESTATION_CACHE_LOCK_TIMEOUT, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}, + chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, snapshot_cache::StateAdvance, BeaconChain, BeaconChainError, BeaconChainTypes, }; @@ -133,7 +134,7 @@ async fn state_advance_timer( // Run fork choice 23/24s of the way through the slot (11.5s on mainnet). // We need to run after the state advance, so use the same condition as above. - let fork_choice_offset = slot_duration / 24; + let fork_choice_offset = slot_duration / FORK_CHOICE_LOOKAHEAD_FACTOR; let fork_choice_instant = if duration_to_next_slot > state_advance_offset { Instant::now() + duration_to_next_slot - fork_choice_offset } else { @@ -224,8 +225,20 @@ async fn state_advance_timer( return; } + // Re-compute the head, dequeuing attestations for the current slot early. beacon_chain.recompute_head_at_slot(next_slot).await; + // Prepare proposers so that the node can send payload attributes in the case where + // it decides to abandon a proposer boost re-org. + if let Err(e) = beacon_chain.prepare_beacon_proposer(current_slot).await { + warn!( + log, + "Unable to prepare proposer with lookahead"; + "error" => ?e, + "slot" => next_slot, + ); + } + // Use a blocking task to avoid blocking the core executor whilst waiting for locks // in `ForkChoiceSignalTx`. beacon_chain.task_executor.clone().spawn_blocking( diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index b88966b41a9..d6e8787f4e0 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -32,7 +32,7 @@ use rand::SeedableRng; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slog::Logger; -use slot_clock::TestingSlotClock; +use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::{ state_advance::{complete_state_advance, partial_state_advance}, @@ -319,6 +319,12 @@ where self } + pub fn logger(mut self, log: Logger) -> Self { + self.log = log.clone(); + self.runtime.set_logger(log); + self + } + /// This mutator will be run before the `store_mutator`. pub fn initial_mutator(mut self, mutator: BoxedMutator) -> Self { assert!( @@ -524,10 +530,9 @@ pub struct BeaconChainHarness { pub rng: Mutex, } -pub type HarnessAttestations = Vec<( - Vec<(Attestation, SubnetId)>, - Option>, -)>; +pub type CommitteeAttestations = Vec<(Attestation, SubnetId)>; +pub type HarnessAttestations = + Vec<(CommitteeAttestations, Option>)>; pub type HarnessSyncContributions = Vec<( Vec<(SyncCommitteeMessage, usize)>, @@ -778,6 +783,21 @@ where sk.sign(message) } + /// Sign a beacon block using the proposer's key. + pub fn sign_beacon_block( + &self, + block: BeaconBlock, + state: &BeaconState, + ) -> SignedBeaconBlock { + let proposer_index = block.proposer_index() as usize; + block.sign( + &self.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ) + } + /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the /// `block` identified by `beacon_block_root`. @@ -851,13 +871,35 @@ where state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, - ) -> Vec, SubnetId)>> { + ) -> Vec> { + self.make_unaggregated_attestations_with_limit( + attesting_validators, + state, + state_root, + head_block_root, + attestation_slot, + None, + ) + .0 + } + + pub fn make_unaggregated_attestations_with_limit( + &self, + attesting_validators: &[usize], + state: &BeaconState, + state_root: Hash256, + head_block_root: SignedBeaconBlockHash, + attestation_slot: Slot, + limit: Option, + ) -> (Vec>, Vec) { let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); let fork = self .spec .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); - state + let attesters = Mutex::new(vec![]); + + let attestations = state .get_beacon_committees_at_slot(attestation_slot) .expect("should get committees") .iter() @@ -869,6 +911,15 @@ where if !attesting_validators.contains(validator_index) { return None; } + + let mut attesters = attesters.lock(); + if let Some(limit) = limit { + if attesters.len() >= limit { + return None; + } + } + attesters.push(*validator_index); + let mut attestation = self .produce_unaggregated_attestation_for_block( attestation_slot, @@ -909,9 +960,19 @@ where Some((attestation, subnet_id)) }) - .collect() + .collect::>() }) - .collect() + .collect::>(); + + let attesters = attesters.into_inner(); + if let Some(limit) = limit { + assert_eq!( + limit, + attesters.len(), + "failed to generate `limit` attestations" + ); + } + (attestations, attesters) } /// A list of sync messages for the given state. @@ -1004,13 +1065,38 @@ where block_hash: SignedBeaconBlockHash, slot: Slot, ) -> HarnessAttestations { - let unaggregated_attestations = self.make_unaggregated_attestations( + self.make_attestations_with_limit( attesting_validators, state, state_root, block_hash, slot, - ); + None, + ) + .0 + } + + /// Produce exactly `limit` attestations. + /// + /// Return attestations and vec of validator indices that attested. + pub fn make_attestations_with_limit( + &self, + attesting_validators: &[usize], + state: &BeaconState, + state_root: Hash256, + block_hash: SignedBeaconBlockHash, + slot: Slot, + limit: Option, + ) -> (HarnessAttestations, Vec) { + let (unaggregated_attestations, attesters) = self + .make_unaggregated_attestations_with_limit( + attesting_validators, + state, + state_root, + block_hash, + slot, + limit, + ); let fork = self.spec.fork_at_epoch(slot.epoch(E::slots_per_epoch())); let aggregated_attestations: Vec>> = @@ -1029,7 +1115,7 @@ where .committee .iter() .find(|&validator_index| { - if !attesting_validators.contains(validator_index) { + if !attesters.contains(validator_index) { return false; } @@ -1080,10 +1166,13 @@ where }) .collect(); - unaggregated_attestations - .into_iter() - .zip(aggregated_attestations) - .collect() + ( + unaggregated_attestations + .into_iter() + .zip(aggregated_attestations) + .collect(), + attesters, + ) } pub fn make_sync_contributions( @@ -1736,6 +1825,12 @@ where self.chain.slot_clock.advance_slot(); } + /// Advance the clock to `lookahead` before the start of `slot`. + pub fn advance_to_slot_lookahead(&self, slot: Slot, lookahead: Duration) { + let time = self.chain.slot_clock.start_of(slot).unwrap() - lookahead; + self.chain.slot_clock.set_current_time(time); + } + /// Deprecated: Use make_block() instead /// /// Returns a newly created block, signed by the proposer for the given slot. diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index 19e8902a3e8..c8c47c99041 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -53,6 +53,7 @@ async fn merge_with_terminal_block_hash_override() { let harness = BeaconChainHarness::builder(E::default()) .spec(spec) + .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() @@ -109,6 +110,7 @@ async fn base_altair_merge_with_terminal_block_after_fork() { let harness = BeaconChainHarness::builder(E::default()) .spec(spec) + .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index d77cc19678d..0b9eaaee0f0 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -7,8 +7,9 @@ use beacon_chain::otb_verification_service::{ use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, StateSkipConfig, - WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, + OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ @@ -19,6 +20,7 @@ use execution_layer::{ use fork_choice::{ CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, }; +use logging::test_logger; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; use std::collections::HashMap; @@ -59,6 +61,7 @@ impl InvalidPayloadRig { let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec) + .logger(test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .mock_execution_layer() .fresh_ephemeral_store() @@ -383,7 +386,7 @@ impl InvalidPayloadRig { .fork_choice_write_lock() .get_head(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - Err(ForkChoiceError::ProtoArrayError(e)) if e.contains(s) => (), + Err(ForkChoiceError::ProtoArrayStringError(e)) if e.contains(s) => (), other => panic!("expected {} error, got {:?}", s, other), }; } @@ -978,6 +981,10 @@ async fn payload_preparation() { ) .await; + rig.harness.advance_to_slot_lookahead( + next_slot, + rig.harness.chain.config.prepare_payload_lookahead, + ); rig.harness .chain .prepare_beacon_proposer(rig.harness.chain.slot().unwrap()) @@ -1054,7 +1061,7 @@ async fn invalid_parent() { &rig.harness.chain.spec, CountUnrealized::True, ), - Err(ForkChoiceError::ProtoArrayError(message)) + Err(ForkChoiceError::ProtoArrayStringError(message)) if message.contains(&format!( "{:?}", ProtoArrayError::ParentExecutionStatusIsInvalid { @@ -1121,7 +1128,11 @@ async fn payload_preparation_before_transition_block() { .get_forkchoice_update_parameters(); rig.harness .chain - .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .update_execution_engine_forkchoice( + current_slot, + forkchoice_update_params, + OverrideForkchoiceUpdate::Yes, + ) .await .unwrap(); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 75b865407e2..f3e937b2e5f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -769,7 +769,11 @@ where runtime_context.executor.spawn( async move { let result = inner_chain - .update_execution_engine_forkchoice(current_slot, params) + .update_execution_engine_forkchoice( + current_slot, + params, + Default::default(), + ) .await; // No need to exit early if setting the head fails. It will be set again if/when the diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 2b0c3a4c983..560569c92f2 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -29,7 +29,7 @@ pub struct JsonResponseBody { pub id: serde_json::Value, } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct TransparentJsonPayloadId(#[serde(with = "eth2_serde_utils::bytes_8_hex")] pub PayloadId); @@ -227,7 +227,7 @@ impl From> for ExecutionPayload { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonPayloadAttributesV1 { #[serde(with = "eth2_serde_utils::u64_hex_be")] @@ -270,7 +270,7 @@ impl From for PayloadAttributes { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkChoiceStateV1 { pub head_block_hash: ExecutionBlockHash, @@ -323,7 +323,7 @@ pub enum JsonPayloadStatusV1Status { InvalidBlockHash, } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonPayloadStatusV1 { pub status: JsonPayloadStatusV1Status, @@ -388,7 +388,7 @@ impl From for PayloadStatusV1 { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkchoiceUpdatedV1Response { pub payload_status: JsonPayloadStatusV1, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index dfce9745774..ec1415f80bb 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -514,6 +514,16 @@ impl ExecutionLayer { .contains_key(&proposer_index) } + /// Check if a proposer is registered as a local validator, *from a synchronous context*. + /// + /// This method MUST NOT be called from an async task. + pub fn has_proposer_preparation_data_blocking(&self, proposer_index: u64) -> bool { + self.inner + .proposer_preparation_data + .blocking_lock() + .contains_key(&proposer_index) + } + /// Returns the fee-recipient address that should be used to build a block pub async fn get_suggested_fee_recipient(&self, proposer_index: u64) -> Address { if let Some(preparation_data_entry) = @@ -1141,12 +1151,14 @@ impl ExecutionLayer { &[metrics::FORKCHOICE_UPDATED], ); - trace!( + debug!( self.log(), "Issuing engine_forkchoiceUpdated"; "finalized_block_hash" => ?finalized_block_hash, "justified_block_hash" => ?justified_block_hash, "head_block_hash" => ?head_block_hash, + "head_block_root" => ?head_block_root, + "current_slot" => current_slot, ); let next_slot = current_slot + 1; @@ -1762,6 +1774,20 @@ mod test { .await; } + #[tokio::test] + async fn test_forked_terminal_block() { + let runtime = TestRuntime::default(); + let (mock, block_hash) = MockExecutionLayer::default_params(runtime.task_executor.clone()) + .move_to_terminal_block() + .produce_forked_pow_block(); + assert!(mock + .el + .is_valid_terminal_pow_block_hash(block_hash, &mock.spec) + .await + .unwrap() + .unwrap()); + } + #[tokio::test] async fn finds_valid_terminal_block_hash() { let runtime = TestRuntime::default(); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 3620a02dfbb..22dcb400708 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -105,13 +105,15 @@ pub struct PoWBlock { pub timestamp: u64, } -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ExecutionBlockGenerator { /* * Common database */ + head_block: Option>, + finalized_block_hash: Option, blocks: HashMap>, - block_hashes: HashMap, + block_hashes: HashMap>, /* * PoW block parameters */ @@ -133,6 +135,8 @@ impl ExecutionBlockGenerator { terminal_block_hash: ExecutionBlockHash, ) -> Self { let mut gen = Self { + head_block: <_>::default(), + finalized_block_hash: <_>::default(), blocks: <_>::default(), block_hashes: <_>::default(), terminal_total_difficulty, @@ -149,13 +153,7 @@ impl ExecutionBlockGenerator { } pub fn latest_block(&self) -> Option> { - let hash = *self - .block_hashes - .iter() - .max_by_key(|(number, _)| *number) - .map(|(_, hash)| hash)?; - - self.block_by_hash(hash) + self.head_block.clone() } pub fn latest_execution_block(&self) -> Option { @@ -164,8 +162,18 @@ impl ExecutionBlockGenerator { } pub fn block_by_number(&self, number: u64) -> Option> { - let hash = *self.block_hashes.get(&number)?; - self.block_by_hash(hash) + // Get the latest canonical head block + let mut latest_block = self.latest_block()?; + loop { + let block_number = latest_block.block_number(); + if block_number < number { + return None; + } + if block_number == number { + return Some(latest_block); + } + latest_block = self.block_by_hash(latest_block.parent_hash())?; + } } pub fn execution_block_by_number(&self, number: u64) -> Option { @@ -226,10 +234,16 @@ impl ExecutionBlockGenerator { } pub fn insert_pow_block(&mut self, block_number: u64) -> Result<(), String> { + if let Some(finalized_block_hash) = self.finalized_block_hash { + return Err(format!( + "terminal block {} has been finalized. PoW chain has stopped building", + finalized_block_hash + )); + } let parent_hash = if block_number == 0 { ExecutionBlockHash::zero() - } else if let Some(hash) = self.block_hashes.get(&(block_number - 1)) { - *hash + } else if let Some(block) = self.block_by_number(block_number - 1) { + block.block_hash() } else { return Err(format!( "parent with block number {} not found", @@ -244,49 +258,118 @@ impl ExecutionBlockGenerator { parent_hash, )?; - self.insert_block(Block::PoW(block)) + // Insert block into block tree + self.insert_block(Block::PoW(block))?; + + // Set head + if let Some(head_total_difficulty) = + self.head_block.as_ref().and_then(|b| b.total_difficulty()) + { + if block.total_difficulty >= head_total_difficulty { + self.head_block = Some(Block::PoW(block)); + } + } else { + self.head_block = Some(Block::PoW(block)); + } + Ok(()) } - pub fn insert_block(&mut self, block: Block) -> Result<(), String> { + /// Insert a PoW block given the parent hash. + /// + /// Returns `Ok(hash)` of the inserted block. + /// Returns an error if the `parent_hash` does not exist in the block tree or + /// if the parent block is the terminal block. + pub fn insert_pow_block_by_hash( + &mut self, + parent_hash: ExecutionBlockHash, + unique_id: u64, + ) -> Result { + let parent_block = self.block_by_hash(parent_hash).ok_or_else(|| { + format!( + "Block corresponding to parent hash does not exist: {}", + parent_hash + ) + })?; + + let mut block = generate_pow_block( + self.terminal_total_difficulty, + self.terminal_block_number, + parent_block.block_number() + 1, + parent_hash, + )?; + + // Hack the block hash to make this block distinct from any other block with a different + // `unique_id` (the default is 0). + block.block_hash = ExecutionBlockHash::from_root(Hash256::from_low_u64_be(unique_id)); + block.block_hash = ExecutionBlockHash::from_root(block.tree_hash_root()); + + let hash = self.insert_block(Block::PoW(block))?; + + // Set head + if let Some(head_total_difficulty) = + self.head_block.as_ref().and_then(|b| b.total_difficulty()) + { + if block.total_difficulty >= head_total_difficulty { + self.head_block = Some(Block::PoW(block)); + } + } else { + self.head_block = Some(Block::PoW(block)); + } + Ok(hash) + } + + pub fn insert_block(&mut self, block: Block) -> Result { if self.blocks.contains_key(&block.block_hash()) { return Err(format!("{:?} is already known", block.block_hash())); - } else if self.block_hashes.contains_key(&block.block_number()) { - return Err(format!( - "block {} is already known, forking is not supported", - block.block_number() - )); - } else if block.block_number() != 0 && !self.blocks.contains_key(&block.parent_hash()) { + } else if block.parent_hash() != ExecutionBlockHash::zero() + && !self.blocks.contains_key(&block.parent_hash()) + { return Err(format!("parent block {:?} is unknown", block.parent_hash())); } - self.insert_block_without_checks(block) + Ok(self.insert_block_without_checks(block)) } - pub fn insert_block_without_checks(&mut self, block: Block) -> Result<(), String> { + pub fn insert_block_without_checks(&mut self, block: Block) -> ExecutionBlockHash { + let block_hash = block.block_hash(); self.block_hashes - .insert(block.block_number(), block.block_hash()); - self.blocks.insert(block.block_hash(), block); + .entry(block.block_number()) + .or_insert_with(Vec::new) + .push(block_hash); + self.blocks.insert(block_hash, block); - Ok(()) + block_hash } pub fn modify_last_block(&mut self, block_modifier: impl FnOnce(&mut Block)) { - if let Some((last_block_hash, block_number)) = - self.block_hashes.keys().max().and_then(|block_number| { - self.block_hashes - .get(block_number) - .map(|block| (block, *block_number)) + if let Some(last_block_hash) = self + .block_hashes + .iter_mut() + .max_by_key(|(block_number, _)| *block_number) + .and_then(|(_, block_hashes)| { + // Remove block hash, we will re-insert with the new block hash after modifying it. + block_hashes.pop() }) { - let mut block = self.blocks.remove(last_block_hash).unwrap(); + let mut block = self.blocks.remove(&last_block_hash).unwrap(); block_modifier(&mut block); + // Update the block hash after modifying the block match &mut block { Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), Block::PoS(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), } - self.block_hashes.insert(block_number, block.block_hash()); - self.blocks.insert(block.block_hash(), block); + + // Update head. + if self + .head_block + .as_ref() + .map_or(true, |head| head.block_hash() == last_block_hash) + { + self.head_block = Some(block.clone()); + } + + self.insert_block_without_checks(block); } } @@ -405,6 +488,17 @@ impl ExecutionBlockGenerator { } }; + self.head_block = Some( + self.blocks + .get(&forkchoice_state.head_block_hash) + .unwrap() + .clone(), + ); + + if forkchoice_state.finalized_block_hash != ExecutionBlockHash::zero() { + self.finalized_block_hash = Some(forkchoice_state.finalized_block_hash); + } + Ok(JsonForkchoiceUpdatedV1Response { payload_status: JsonPayloadStatusV1 { status: JsonPayloadStatusV1Status::Valid, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index ac677bf331f..97c52357559 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -123,6 +123,14 @@ pub async fn handle_rpc( let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; let payload_attributes: Option = get_param(params, 1)?; + if let Some(hook_response) = ctx + .hook + .lock() + .on_forkchoice_updated(forkchoice_state.clone(), payload_attributes.clone()) + { + return Ok(serde_json::to_value(hook_response).unwrap()); + } + let head_block_hash = forkchoice_state.head_block_hash; // Canned responses set by block hash take priority. diff --git a/beacon_node/execution_layer/src/test_utils/hook.rs b/beacon_node/execution_layer/src/test_utils/hook.rs new file mode 100644 index 00000000000..a3748103e3e --- /dev/null +++ b/beacon_node/execution_layer/src/test_utils/hook.rs @@ -0,0 +1,27 @@ +use crate::json_structures::*; + +type ForkChoiceUpdatedHook = dyn Fn( + JsonForkChoiceStateV1, + Option, + ) -> Option + + Send + + Sync; + +#[derive(Default)] +pub struct Hook { + forkchoice_updated: Option>, +} + +impl Hook { + pub fn on_forkchoice_updated( + &self, + state: JsonForkChoiceStateV1, + payload_attributes: Option, + ) -> Option { + (self.forkchoice_updated.as_ref()?)(state, payload_attributes) + } + + pub fn set_forkchoice_updated_hook(&mut self, f: Box) { + self.forkchoice_updated = Some(f); + } +} diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 065abc93609..e9d4b2121be 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -234,6 +234,21 @@ impl MockExecutionLayer { self } + pub fn produce_forked_pow_block(self) -> (Self, ExecutionBlockHash) { + let head_block = self + .server + .execution_block_generator() + .latest_block() + .unwrap(); + + let block_hash = self + .server + .execution_block_generator() + .insert_pow_block_by_hash(head_block.parent_hash(), 1) + .unwrap(); + (self, block_hash) + } + pub async fn with_terminal_block<'a, U, V>(self, func: U) -> Self where U: Fn(ChainSpec, ExecutionLayer, Option) -> V, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index f5066879a78..f18ecbe6226 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -23,6 +23,7 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; +pub use hook::Hook; pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; pub use mock_execution_layer::MockExecutionLayer; @@ -33,6 +34,7 @@ pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; mod execution_block_generator; mod handle_rpc; +mod hook; mod mock_builder; mod mock_execution_layer; @@ -99,6 +101,7 @@ impl MockServer { static_new_payload_response: <_>::default(), static_forkchoice_updated_response: <_>::default(), static_get_block_by_hash_response: <_>::default(), + hook: <_>::default(), new_payload_statuses: <_>::default(), fcu_payload_statuses: <_>::default(), _phantom: PhantomData, @@ -359,8 +362,7 @@ impl MockServer { .write() // The EF tests supply blocks out of order, so we must import them "without checks" and // trust they form valid chains. - .insert_block_without_checks(block) - .unwrap() + .insert_block_without_checks(block); } pub fn get_block(&self, block_hash: ExecutionBlockHash) -> Option> { @@ -441,6 +443,7 @@ pub struct Context { pub static_new_payload_response: Arc>>, pub static_forkchoice_updated_response: Arc>>, pub static_get_block_by_hash_response: Arc>>>, + pub hook: Arc>, // Canned responses by block hash. // diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index ec1448df7b3..9d6ad4050bf 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -1,5 +1,5 @@ use beacon_chain::{ - test_utils::{BeaconChainHarness, EphemeralHarnessType}, + test_utils::{BeaconChainHarness, BoxedMutator, EphemeralHarnessType}, BeaconChain, BeaconChainTypes, }; use directory::DEFAULT_ROOT_DIR; @@ -12,6 +12,7 @@ use lighthouse_network::{ types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager, }; +use logging::test_logger; use network::{NetworkReceivers, NetworkSenders}; use sensitive_url::SensitiveUrl; use slog::Logger; @@ -19,6 +20,7 @@ use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::Duration; +use store::MemoryStore; use tokio::sync::oneshot; use types::{ChainSpec, EthSpec}; @@ -47,13 +49,30 @@ pub struct ApiServer> { pub external_peer_id: PeerId, } +type Mutator = BoxedMutator, MemoryStore>; + impl InteractiveTester { pub async fn new(spec: Option, validator_count: usize) -> Self { - let harness = BeaconChainHarness::builder(E::default()) + Self::new_with_mutator(spec, validator_count, None).await + } + + pub async fn new_with_mutator( + spec: Option, + validator_count: usize, + mutator: Option>, + ) -> Self { + let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec) .deterministic_keypairs(validator_count) - .fresh_ephemeral_store() - .build(); + .logger(test_logger()) + .mock_execution_layer() + .fresh_ephemeral_store(); + + if let Some(mutator) = mutator { + harness_builder = harness_builder.initial_mutator(mutator); + } + + let harness = harness_builder.build(); let ApiServer { server, diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index b3227d7723a..17a3624afed 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,9 +1,22 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` use crate::common::*; -use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; +use beacon_chain::{ + chain_config::ReOrgThreshold, + test_utils::{AttestationStrategy, BlockStrategy}, +}; use eth2::types::DepositContractData; +use execution_layer::{ForkChoiceState, PayloadAttributes}; +use parking_lot::Mutex; +use slot_clock::SlotClock; +use state_processing::state_advance::complete_state_advance; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; use tree_hash::TreeHash; -use types::{EthSpec, FullPayload, MainnetEthSpec, Slot}; +use types::{ + Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, + MainnetEthSpec, ProposerPreparationData, Slot, +}; type E = MainnetEthSpec; @@ -33,6 +46,495 @@ async fn deposit_contract_custom_network() { assert_eq!(result, expected); } +/// Data structure for tracking fork choice updates received by the mock execution layer. +#[derive(Debug, Default)] +struct ForkChoiceUpdates { + updates: HashMap>, +} + +#[derive(Debug, Clone)] +struct ForkChoiceUpdateMetadata { + received_at: Duration, + state: ForkChoiceState, + payload_attributes: Option, +} + +impl ForkChoiceUpdates { + fn insert(&mut self, update: ForkChoiceUpdateMetadata) { + self.updates + .entry(update.state.head_block_hash) + .or_insert_with(Vec::new) + .push(update); + } + + fn contains_update_for(&self, block_hash: ExecutionBlockHash) -> bool { + self.updates.contains_key(&block_hash) + } + + /// Find the first fork choice update for `head_block_hash` with payload attributes for a + /// block proposal at `proposal_timestamp`. + fn first_update_with_payload_attributes( + &self, + head_block_hash: ExecutionBlockHash, + proposal_timestamp: u64, + ) -> Option { + self.updates + .get(&head_block_hash)? + .iter() + .find(|update| { + update + .payload_attributes + .as_ref() + .map_or(false, |payload_attributes| { + payload_attributes.timestamp == proposal_timestamp + }) + }) + .cloned() + } +} + +pub struct ReOrgTest { + head_slot: Slot, + /// Number of slots between parent block and canonical head. + parent_distance: u64, + /// Number of slots between head block and block proposal slot. + head_distance: u64, + re_org_threshold: u64, + max_epochs_since_finalization: u64, + percent_parent_votes: usize, + percent_empty_votes: usize, + percent_head_votes: usize, + should_re_org: bool, + misprediction: bool, +} + +impl Default for ReOrgTest { + /// Default config represents a regular easy re-org. + fn default() -> Self { + Self { + head_slot: Slot::new(30), + parent_distance: 1, + head_distance: 1, + re_org_threshold: 20, + max_epochs_since_finalization: 2, + percent_parent_votes: 100, + percent_empty_votes: 100, + percent_head_votes: 0, + should_re_org: true, + misprediction: false, + } + } +} + +// Test that the beacon node will try to perform proposer boost re-orgs on late blocks when +// configured. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_zero_weight() { + proposer_boost_re_org_test(ReOrgTest::default()).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_epoch_boundary() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(31), + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_slot_after_epoch_boundary() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(33), + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_bad_ffg() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(64 + 22), + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_no_finality() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(96), + percent_parent_votes: 100, + percent_empty_votes: 0, + percent_head_votes: 100, + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_finality() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(129), + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_parent_distance() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(30), + parent_distance: 2, + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_head_distance() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(29), + head_distance: 2, + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_very_unhealthy() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(31), + parent_distance: 2, + head_distance: 2, + percent_parent_votes: 10, + percent_empty_votes: 10, + percent_head_votes: 10, + should_re_org: false, + ..Default::default() + }) + .await; +} + +/// The head block is late but still receives 30% of the committee vote, leading to a misprediction. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_weight_misprediction() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(30), + percent_empty_votes: 70, + percent_head_votes: 30, + should_re_org: false, + misprediction: true, + ..Default::default() + }) + .await; +} + +/// Run a proposer boost re-org test. +/// +/// - `head_slot`: the slot of the canonical head to be reorged +/// - `reorg_threshold`: committee percentage value for reorging +/// - `num_empty_votes`: percentage of comm of attestations for the parent block +/// - `num_head_votes`: number of attestations for the head block +/// - `should_re_org`: whether the proposer should build on the parent rather than the head +pub async fn proposer_boost_re_org_test( + ReOrgTest { + head_slot, + parent_distance, + head_distance, + re_org_threshold, + max_epochs_since_finalization, + percent_parent_votes, + percent_empty_votes, + percent_head_votes, + should_re_org, + misprediction, + }: ReOrgTest, +) { + assert!(head_slot > 0); + + // We require a network with execution enabled so we can check EL message timings. + let mut spec = ForkName::Merge.make_genesis_spec(E::default_spec()); + spec.terminal_total_difficulty = 1.into(); + + // Ensure there are enough validators to have `attesters_per_slot`. + let attesters_per_slot = 10; + let validator_count = E::slots_per_epoch() as usize * attesters_per_slot; + let all_validators = (0..validator_count).collect::>(); + let num_initial = head_slot.as_u64().checked_sub(parent_distance + 1).unwrap(); + + // Check that the required vote percentages can be satisfied exactly using `attesters_per_slot`. + assert_eq!(100 % attesters_per_slot, 0); + let percent_per_attester = 100 / attesters_per_slot; + assert_eq!(percent_parent_votes % percent_per_attester, 0); + assert_eq!(percent_empty_votes % percent_per_attester, 0); + assert_eq!(percent_head_votes % percent_per_attester, 0); + let num_parent_votes = Some(attesters_per_slot * percent_parent_votes / 100); + let num_empty_votes = Some(attesters_per_slot * percent_empty_votes / 100); + let num_head_votes = Some(attesters_per_slot * percent_head_votes / 100); + + let tester = InteractiveTester::::new_with_mutator( + Some(spec), + validator_count, + Some(Box::new(move |builder| { + builder + .proposer_re_org_threshold(Some(ReOrgThreshold(re_org_threshold))) + .proposer_re_org_max_epochs_since_finalization(Epoch::new( + max_epochs_since_finalization, + )) + })), + ) + .await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + let execution_ctx = mock_el.server.ctx.clone(); + let slot_clock = &harness.chain.slot_clock; + + // Move to terminal block. + mock_el.server.all_payloads_valid(); + execution_ctx + .execution_block_generator + .write() + .move_to_terminal_block() + .unwrap(); + + // Send proposer preparation data for all validators. + let proposer_preparation_data = all_validators + .iter() + .map(|i| ProposerPreparationData { + validator_index: *i as u64, + fee_recipient: Address::from_low_u64_be(*i as u64), + }) + .collect::>(); + harness + .chain + .execution_layer + .as_ref() + .unwrap() + .update_proposer_preparation( + head_slot.epoch(E::slots_per_epoch()) + 1, + &proposer_preparation_data, + ) + .await; + + // Create some chain depth. + harness.advance_slot(); + harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Start collecting fork choice updates. + let forkchoice_updates = Arc::new(Mutex::new(ForkChoiceUpdates::default())); + let forkchoice_updates_inner = forkchoice_updates.clone(); + let chain_inner = harness.chain.clone(); + + execution_ctx + .hook + .lock() + .set_forkchoice_updated_hook(Box::new(move |state, payload_attributes| { + let received_at = chain_inner.slot_clock.now_duration().unwrap(); + let state = ForkChoiceState::from(state); + let payload_attributes = payload_attributes.map(Into::into); + let update = ForkChoiceUpdateMetadata { + received_at, + state, + payload_attributes, + }; + forkchoice_updates_inner.lock().insert(update); + None + })); + + // We set up the following block graph, where B is a block that arrives late and is re-orged + // by C. + // + // A | B | - | + // ^ | - | C | + + let slot_a = Slot::new(num_initial + 1); + let slot_b = slot_a + parent_distance; + let slot_c = slot_b + head_distance; + + harness.advance_slot(); + let (block_a_root, block_a, state_a) = harness + .add_block_at_slot(slot_a, harness.get_current_state()) + .await + .unwrap(); + + // Attest to block A during slot A. + let (block_a_parent_votes, _) = harness.make_attestations_with_limit( + &all_validators, + &state_a, + state_a.canonical_root(), + block_a_root, + slot_a, + num_parent_votes, + ); + harness.process_attestations(block_a_parent_votes); + + // Attest to block A during slot B. + for _ in 0..parent_distance { + harness.advance_slot(); + } + let (block_a_empty_votes, block_a_attesters) = harness.make_attestations_with_limit( + &all_validators, + &state_a, + state_a.canonical_root(), + block_a_root, + slot_b, + num_empty_votes, + ); + harness.process_attestations(block_a_empty_votes); + + let remaining_attesters = all_validators + .iter() + .copied() + .filter(|index| !block_a_attesters.contains(index)) + .collect::>(); + + // Produce block B and process it halfway through the slot. + let (block_b, mut state_b) = harness.make_block(state_a.clone(), slot_b).await; + let block_b_root = block_b.canonical_root(); + + let obs_time = slot_clock.start_of(slot_b).unwrap() + slot_clock.slot_duration() / 2; + slot_clock.set_current_time(obs_time); + harness.chain.block_times_cache.write().set_time_observed( + block_b_root, + slot_b, + obs_time, + None, + None, + ); + harness.process_block_result(block_b.clone()).await.unwrap(); + + // Add attestations to block B. + let (block_b_head_votes, _) = harness.make_attestations_with_limit( + &remaining_attesters, + &state_b, + state_b.canonical_root(), + block_b_root.into(), + slot_b, + num_head_votes, + ); + harness.process_attestations(block_b_head_votes); + + let payload_lookahead = harness.chain.config.prepare_payload_lookahead; + let fork_choice_lookahead = Duration::from_millis(500); + while harness.get_current_slot() != slot_c { + let current_slot = harness.get_current_slot(); + let next_slot = current_slot + 1; + + // Simulate the scheduled call to prepare proposers at 8 seconds into the slot. + harness.advance_to_slot_lookahead(next_slot, payload_lookahead); + harness + .chain + .prepare_beacon_proposer(current_slot) + .await + .unwrap(); + + // Simulate the scheduled call to fork choice + prepare proposers 500ms before the + // next slot. + harness.advance_to_slot_lookahead(next_slot, fork_choice_lookahead); + harness.chain.recompute_head_at_slot(next_slot).await; + harness + .chain + .prepare_beacon_proposer(current_slot) + .await + .unwrap(); + + harness.advance_slot(); + harness.chain.per_slot_task().await; + } + + // Produce block C. + // Advance state_b so we can get the proposer. + complete_state_advance(&mut state_b, None, slot_c, &harness.chain.spec).unwrap(); + + let proposer_index = state_b + .get_beacon_proposer_index(slot_c, &harness.chain.spec) + .unwrap(); + let randao_reveal = harness + .sign_randao_reveal(&state_b, proposer_index, slot_c) + .into(); + let unsigned_block_c = tester + .client + .get_validator_blocks(slot_c, &randao_reveal, None) + .await + .unwrap() + .data; + let block_c = harness.sign_beacon_block(unsigned_block_c, &state_b); + + if should_re_org { + // Block C should build on A. + assert_eq!(block_c.parent_root(), block_a_root.into()); + } else { + // Block C should build on B. + assert_eq!(block_c.parent_root(), block_b_root); + } + + // Applying block C should cause it to become head regardless (re-org or continuation). + let block_root_c = harness + .process_block_result(block_c.clone()) + .await + .unwrap() + .into(); + assert_eq!(harness.head_block_root(), block_root_c); + + // Check the fork choice updates that were sent. + let forkchoice_updates = forkchoice_updates.lock(); + let block_a_exec_hash = block_a.message().execution_payload().unwrap().block_hash(); + let block_b_exec_hash = block_b.message().execution_payload().unwrap().block_hash(); + + let block_c_timestamp = block_c.message().execution_payload().unwrap().timestamp(); + + // If we re-orged then no fork choice update for B should have been sent. + assert_eq!( + should_re_org, + !forkchoice_updates.contains_update_for(block_b_exec_hash), + "{block_b_exec_hash:?}" + ); + + // Check the timing of the first fork choice update with payload attributes for block C. + let c_parent_hash = if should_re_org { + block_a_exec_hash + } else { + block_b_exec_hash + }; + let first_update = forkchoice_updates + .first_update_with_payload_attributes(c_parent_hash, block_c_timestamp) + .unwrap(); + let payload_attribs = first_update.payload_attributes.as_ref().unwrap(); + + let lookahead = slot_clock + .start_of(slot_c) + .unwrap() + .checked_sub(first_update.received_at) + .unwrap(); + + if !misprediction { + assert_eq!( + lookahead, payload_lookahead, + "lookahead={lookahead:?}, timestamp={}, prev_randao={:?}", + payload_attribs.timestamp, payload_attribs.prev_randao, + ); + } else { + // On a misprediction we issue the first fcU 500ms before creating a block! + assert_eq!( + lookahead, fork_choice_lookahead, + "timestamp={}, prev_randao={:?}", + payload_attribs.timestamp, payload_attribs.prev_randao, + ); + } +} + // Test that running fork choice before proposing results in selection of the correct head. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn fork_choice_before_proposal() { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 44a995176d1..c5f4cc8adf4 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -761,6 +761,38 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { experimental as it may obscure performance issues.") .takes_value(false) ) + .arg( + Arg::with_name("disable-proposer-reorgs") + .long("disable-proposer-reorgs") + .help("Do not attempt to reorg late blocks from other validators when proposing.") + .takes_value(false) + ) + .arg( + Arg::with_name("proposer-reorg-threshold") + .long("proposer-reorg-threshold") + .value_name("PERCENT") + .help("Percentage of vote weight below which to attempt a proposer reorg. \ + Default: 20%") + .conflicts_with("disable-proposer-reorgs") + ) + .arg( + Arg::with_name("proposer-reorg-epochs-since-finalization") + .long("proposer-reorg-epochs-since-finalization") + .value_name("EPOCHS") + .help("Maximum number of epochs since finalization at which proposer reorgs are \ + allowed. Default: 2") + .conflicts_with("disable-proposer-reorgs") + ) + .arg( + Arg::with_name("prepare-payload-lookahead") + .long("prepare-payload-lookahead") + .value_name("MILLISECONDS") + .help("The time before the start of a proposal slot at which payload attributes \ + should be sent. Low values are useful for execution nodes which don't \ + improve their payload after the first call, and high values are useful \ + for ensuring the EL is given ample notice. Default: 1/3 of a slot.") + .takes_value(true) + ) .arg( Arg::with_name("fork-choice-before-proposal-timeout") .long("fork-choice-before-proposal-timeout") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index e98b585f5f3..b3bfa696edf 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,3 +1,7 @@ +use beacon_chain::chain_config::{ + ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, +}; use clap::ArgMatches; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use client::{ClientConfig, ClientGenesis}; @@ -18,6 +22,7 @@ use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::path::{Path, PathBuf}; use std::str::FromStr; +use std::time::Duration; use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN}; use unused_port::{unused_tcp_port, unused_udp_port}; @@ -674,11 +679,32 @@ pub fn get_config( client_config.chain.enable_lock_timeouts = false; } + if cli_args.is_present("disable-proposer-reorgs") { + client_config.chain.re_org_threshold = None; + } else { + client_config.chain.re_org_threshold = Some( + clap_utils::parse_optional(cli_args, "proposer-reorg-threshold")? + .map(ReOrgThreshold) + .unwrap_or(DEFAULT_RE_ORG_THRESHOLD), + ); + client_config.chain.re_org_max_epochs_since_finalization = + clap_utils::parse_optional(cli_args, "proposer-reorg-epochs-since-finalization")? + .unwrap_or(DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION); + } + // Note: This overrides any previous flags that enable this option. if cli_args.is_present("disable-deposit-contract-sync") { client_config.sync_eth1_chain = false; } + client_config.chain.prepare_payload_lookahead = + clap_utils::parse_optional(cli_args, "prepare-payload-lookahead")? + .map(Duration::from_millis) + .unwrap_or_else(|| { + Duration::from_secs(spec.seconds_per_slot) + / DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR + }); + if let Some(timeout) = clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")? { diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index a43fa10e649..470407ebee9 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -47,6 +47,7 @@ * [Release Candidates](./advanced-release-candidates.md) * [MEV and Lighthouse](./builders.md) * [Merge Migration](./merge-migration.md) + * [Late Block Re-orgs](./late-block-re-orgs.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/builders.md b/book/src/builders.md index 99fae5b3e76..f2a4b3936a5 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -200,19 +200,23 @@ for `INFO` and `WARN` messages indicating why the builder was not used. Examples of messages indicating fallback to a locally produced block are: ``` -INFO No payload provided by connected builder. +INFO Builder did not return a payload ``` ``` -WARN Unable to retrieve a payload from a connected builder +WARN Builder error when requesting payload ``` ``` -INFO The value offered by the connected builder does not meet the configured profit threshold. +WARN Builder returned invalid payload ``` ``` -INFO Due to poor chain health the local execution engine will be used for payload construction. +INFO Builder payload ignored +``` + +``` +INFO Chain is unhealthy, using local payload ``` In case of fallback you should see a log indicating that the locally produced payload was diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md new file mode 100644 index 00000000000..0014af8f152 --- /dev/null +++ b/book/src/late-block-re-orgs.md @@ -0,0 +1,60 @@ +# Late Block Re-orgs + +Since v3.4.0 Lighthouse will opportunistically re-org late blocks when proposing. + +This feature is intended to disincentivise late blocks and improve network health. Proposing a +re-orging block is also more profitable for the proposer because it increases the number of +attestations and transactions that can be included. + +## Command line flags + +There are three flags which control the re-orging behaviour: + +* `--disable-proposer-reorgs`: turn re-orging off (it's on by default). +* `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled. +* `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs, + meaning re-orgs will only be attempted when the chain is finalizing optimally. + +All flags should be applied to `lighthouse bn`. The default configuration is recommended as it +balances the chance of the re-org succeeding against the chance of failure due to attestations +arriving late and making the re-org block non-viable. + +## Safeguards + +To prevent excessive re-orgs there are several safeguards in place that limit when a re-org +will be attempted. + +The full conditions are described in [the spec][] but the most important ones are: + +* Only single-slot re-orgs: Lighthouse will build a block at N + 1 to re-org N by building on the + parent N - 1. The result is a chain with exactly one skipped slot. +* No epoch boundaries: to ensure that the selected proposer does not change, Lighthouse will + not propose a re-orging block in the 0th slot of an epoch. + +## Logs + +You can track the reasons for re-orgs being attempted (or not) via Lighthouse's logs. + +A pair of messages at `INFO` level will be logged if a re-org opportunity is detected: + +> INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 + +> INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 + +This should be followed shortly after by a `WARN` log indicating that a re-org occurred. This is +expected and normal: + +> WARN Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 + +In case a re-org is not viable (which should be most of the time), Lighthouse will just propose a +block as normal and log the reason the re-org was not attempted at debug level: + +> DEBG Not attempting re-org reason: head not late + +If you are interested in digging into the timing of `forkchoiceUpdated` messages sent to the +execution layer, there is also a debug log for the suppression of `forkchoiceUpdated` messages +when Lighthouse thinks that a re-org is likely: + +> DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 + +[the spec]: https://github.com/ethereum/consensus-specs/pull/3034 diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs index 7d59cdf022c..c6e5ad01e68 100644 --- a/common/task_executor/src/test_utils.rs +++ b/common/task_executor/src/test_utils.rs @@ -60,6 +60,13 @@ impl Drop for TestRuntime { } } +impl TestRuntime { + pub fn set_logger(&mut self, log: Logger) { + self.log = log.clone(); + self.task_executor.log = log; + } +} + pub fn null_logger() -> Result { let log_builder = NullLoggerBuilder; log_builder diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 7f12e1d8973..290cef78ab5 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,6 +1,7 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{ - Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, ProtoArrayForkChoice, + Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, + ProtoArrayForkChoice, ReOrgThreshold, }; use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; @@ -22,7 +23,8 @@ pub enum Error { InvalidAttestation(InvalidAttestation), InvalidAttesterSlashing(AttesterSlashingValidationError), InvalidBlock(InvalidBlock), - ProtoArrayError(String), + ProtoArrayStringError(String), + ProtoArrayError(proto_array::Error), InvalidProtoArrayBytes(String), InvalidLegacyProtoArrayBytes(String), FailedToProcessInvalidExecutionPayload(String), @@ -44,6 +46,7 @@ pub enum Error { ForkChoiceStoreError(T), UnableToSetJustifiedCheckpoint(T), AfterBlockFailed(T), + ProposerHeadError(T), InvalidAnchor { block_slot: Slot, state_slot: Slot, @@ -59,6 +62,13 @@ pub enum Error { MissingFinalizedBlock { finalized_checkpoint: Checkpoint, }, + WrongSlotForGetProposerHead { + current_slot: Slot, + fc_store_slot: Slot, + }, + ProposerBoostNotExpiredForGetProposerHead { + proposer_boost_root: Hash256, + }, UnrealizedVoteProcessing(state_processing::EpochProcessingError), ParticipationCacheBuild(BeaconStateError), ValidatorStatuses(BeaconStateError), @@ -153,6 +163,12 @@ pub enum InvalidAttestation { impl From for Error { fn from(e: String) -> Self { + Error::ProtoArrayStringError(e) + } +} + +impl From for Error { + fn from(e: proto_array::Error) -> Self { Error::ProtoArrayError(e) } } @@ -554,6 +570,69 @@ where Ok(head_root) } + /// Get the block to build on as proposer, taking into account proposer re-orgs. + /// + /// You *must* call `get_head` for the proposal slot prior to calling this function and pass + /// in the result of `get_head` as `canonical_head`. + pub fn get_proposer_head( + &self, + current_slot: Slot, + canonical_head: Hash256, + re_org_threshold: ReOrgThreshold, + max_epochs_since_finalization: Epoch, + ) -> Result>> { + // Ensure that fork choice has already been updated for the current slot. This prevents + // us from having to take a write lock or do any dequeueing of attestations in this + // function. + let fc_store_slot = self.fc_store.get_current_slot(); + if current_slot != fc_store_slot { + return Err(ProposerHeadError::Error( + Error::WrongSlotForGetProposerHead { + current_slot, + fc_store_slot, + }, + )); + } + + // Similarly, the proposer boost for the previous head should already have expired. + let proposer_boost_root = self.fc_store.proposer_boost_root(); + if !proposer_boost_root.is_zero() { + return Err(ProposerHeadError::Error( + Error::ProposerBoostNotExpiredForGetProposerHead { + proposer_boost_root, + }, + )); + } + + self.proto_array + .get_proposer_head::( + current_slot, + canonical_head, + self.fc_store.justified_balances(), + re_org_threshold, + max_epochs_since_finalization, + ) + .map_err(ProposerHeadError::convert_inner_error) + } + + pub fn get_preliminary_proposer_head( + &self, + canonical_head: Hash256, + re_org_threshold: ReOrgThreshold, + max_epochs_since_finalization: Epoch, + ) -> Result>> { + let current_slot = self.fc_store.get_current_slot(); + self.proto_array + .get_proposer_head_info::( + current_slot, + canonical_head, + self.fc_store.justified_balances(), + re_org_threshold, + max_epochs_since_finalization, + ) + .map_err(ProposerHeadError::convert_inner_error) + } + /// Return information about: /// /// - The LMD head of the chain. diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 9604e254754..60c58859ed8 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,3 +1,4 @@ +use proto_array::JustifiedBalances; use std::collections::BTreeSet; use std::fmt::Debug; use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; @@ -44,7 +45,7 @@ pub trait ForkChoiceStore: Sized { fn justified_checkpoint(&self) -> &Checkpoint; /// Returns balances from the `state` identified by `justified_checkpoint.root`. - fn justified_balances(&self) -> &[u64]; + fn justified_balances(&self) -> &JustifiedBalances; /// Returns the `best_justified_checkpoint`. fn best_justified_checkpoint(&self) -> &Checkpoint; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 850f7c4a120..00bd1f763dc 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -378,9 +378,13 @@ impl ForkChoiceTest { assert_eq!( &balances[..], - fc.fc_store().justified_balances(), + &fc.fc_store().justified_balances().effective_balances, "balances should match" - ) + ); + assert_eq!( + balances.iter().sum::(), + fc.fc_store().justified_balances().total_effective_balance + ); } /// Returns an attestation that is valid for some slot in the given `chain`. diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index ad79ecc1e6b..dfab6fda567 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -15,3 +15,4 @@ eth2_ssz_derive = "0.3.0" serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" +safe_arith = { path = "../safe_arith" } diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 826bf6c3a79..c55739da792 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -1,3 +1,4 @@ +use safe_arith::ArithError; use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256, Slot}; #[derive(Clone, PartialEq, Debug)] @@ -15,6 +16,7 @@ pub enum Error { InvalidNodeDelta(usize), DeltaOverflow(usize), ProposerBoostOverflow(usize), + ReOrgThresholdOverflow, IndexOverflow(&'static str), InvalidExecutionDeltaOverflow(usize), InvalidDeltaLen { @@ -48,6 +50,13 @@ pub enum Error { block_root: Hash256, parent_root: Hash256, }, + Arith(ArithError), +} + +impl From for Error { + fn from(e: ArithError) -> Self { + Error::Arith(e) + } } #[derive(Clone, PartialEq, Debug)] diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index ba6f3170dc1..035fb799eea 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -5,7 +5,7 @@ mod votes; use crate::proto_array::CountUnrealizedFull; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; -use crate::InvalidationOperation; +use crate::{InvalidationOperation, JustifiedBalances}; use serde_derive::{Deserialize, Serialize}; use std::collections::BTreeSet; use types::{ @@ -101,11 +101,14 @@ impl ForkChoiceTestDefinition { justified_state_balances, expected_head, } => { + let justified_balances = + JustifiedBalances::from_effective_balances(justified_state_balances) + .unwrap(); let head = fork_choice .find_head::( justified_checkpoint, finalized_checkpoint, - &justified_state_balances, + &justified_balances, Hash256::zero(), &equivocating_indices, Slot::new(0), @@ -129,11 +132,14 @@ impl ForkChoiceTestDefinition { expected_head, proposer_boost_root, } => { + let justified_balances = + JustifiedBalances::from_effective_balances(justified_state_balances) + .unwrap(); let head = fork_choice .find_head::( justified_checkpoint, finalized_checkpoint, - &justified_state_balances, + &justified_balances, proposer_boost_root, &equivocating_indices, Slot::new(0), @@ -155,10 +161,13 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, justified_state_balances, } => { + let justified_balances = + JustifiedBalances::from_effective_balances(justified_state_balances) + .unwrap(); let result = fork_choice.find_head::( justified_checkpoint, finalized_checkpoint, - &justified_state_balances, + &justified_balances, Hash256::zero(), &equivocating_indices, Slot::new(0), diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index f1b0e512d7d..ede5bb39481 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -999,7 +999,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }); ops.push(Operation::AssertWeight { block_root: get_root(3), - // This is a "magic number" generated from `calculate_proposer_boost`. + // This is a "magic number" generated from `calculate_committee_fraction`. weight: 31_000, }); diff --git a/consensus/proto_array/src/justified_balances.rs b/consensus/proto_array/src/justified_balances.rs new file mode 100644 index 00000000000..75f6c2f7c80 --- /dev/null +++ b/consensus/proto_array/src/justified_balances.rs @@ -0,0 +1,62 @@ +use safe_arith::{ArithError, SafeArith}; +use types::{BeaconState, EthSpec}; + +#[derive(Debug, PartialEq, Clone, Default)] +pub struct JustifiedBalances { + /// The effective balances for every validator in a given justified state. + /// + /// Any validator who is not active in the epoch of the justified state is assigned a balance of + /// zero. + pub effective_balances: Vec, + /// The sum of `self.effective_balances`. + pub total_effective_balance: u64, + /// The number of active validators included in `self.effective_balances`. + pub num_active_validators: u64, +} + +impl JustifiedBalances { + pub fn from_justified_state(state: &BeaconState) -> Result { + let current_epoch = state.current_epoch(); + let mut total_effective_balance = 0u64; + let mut num_active_validators = 0u64; + + let effective_balances = state + .validators() + .iter() + .map(|validator| { + if validator.is_active_at(current_epoch) { + total_effective_balance.safe_add_assign(validator.effective_balance)?; + num_active_validators.safe_add_assign(1)?; + + Ok(validator.effective_balance) + } else { + Ok(0) + } + }) + .collect::, _>>()?; + + Ok(Self { + effective_balances, + total_effective_balance, + num_active_validators, + }) + } + + pub fn from_effective_balances(effective_balances: Vec) -> Result { + let mut total_effective_balance = 0; + let mut num_active_validators = 0; + + for &balance in &effective_balances { + if balance != 0 { + total_effective_balance.safe_add_assign(balance)?; + num_active_validators.safe_add_assign(1)?; + } + } + + Ok(Self { + effective_balances, + total_effective_balance, + num_active_validators, + }) + } +} diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index e7bd9c0ed56..f2b29e1c7b2 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -1,11 +1,18 @@ mod error; pub mod fork_choice_test_definition; +mod justified_balances; mod proto_array; mod proto_array_fork_choice; mod ssz_container; -pub use crate::proto_array::{CountUnrealizedFull, InvalidationOperation}; -pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; +pub use crate::justified_balances::JustifiedBalances; +pub use crate::proto_array::{ + calculate_committee_fraction, CountUnrealizedFull, InvalidationOperation, +}; +pub use crate::proto_array_fork_choice::{ + Block, DoNotReOrg, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, + ReOrgThreshold, +}; pub use error::Error; pub mod core { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 590407d7eb8..add84f54787 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,5 +1,5 @@ use crate::error::InvalidBestNodeInfo; -use crate::{error::Error, Block, ExecutionStatus}; +use crate::{error::Error, Block, ExecutionStatus, JustifiedBalances}; use serde_derive::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz::Encode; @@ -169,7 +169,7 @@ impl ProtoArray { mut deltas: Vec, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, - new_balances: &[u64], + new_justified_balances: &JustifiedBalances, proposer_boost_root: Hash256, current_slot: Slot, spec: &ChainSpec, @@ -241,9 +241,11 @@ impl ProtoArray { // Invalid nodes (or their ancestors) should not receive a proposer boost. && !execution_status_is_invalid { - proposer_score = - calculate_proposer_boost::(new_balances, proposer_score_boost) - .ok_or(Error::ProposerBoostOverflow(node_index))?; + proposer_score = calculate_committee_fraction::( + new_justified_balances, + proposer_score_boost, + ) + .ok_or(Error::ProposerBoostOverflow(node_index))?; node_delta = node_delta .checked_add(proposer_score as i64) .ok_or(Error::DeltaOverflow(node_index))?; @@ -1006,32 +1008,19 @@ impl ProtoArray { } } -/// A helper method to calculate the proposer boost based on the given `validator_balances`. -/// This does *not* do any verification about whether a boost should or should not be applied. -/// The `validator_balances` array used here is assumed to be structured like the one stored in -/// the `BalancesCache`, where *effective* balances are stored and inactive balances are defaulted -/// to zero. -/// -/// Returns `None` if there is an overflow or underflow when calculating the score. +/// A helper method to calculate the proposer boost based on the given `justified_balances`. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance -pub fn calculate_proposer_boost( - validator_balances: &[u64], +pub fn calculate_committee_fraction( + justified_balances: &JustifiedBalances, proposer_score_boost: u64, ) -> Option { - let mut total_balance: u64 = 0; - let mut num_validators: u64 = 0; - for &balance in validator_balances { - // We need to filter zero balances here to get an accurate active validator count. - // This is because we default inactive validator balances to zero when creating - // this balances array. - if balance != 0 { - total_balance = total_balance.checked_add(balance)?; - num_validators = num_validators.checked_add(1)?; - } - } - let average_balance = total_balance.checked_div(num_validators)?; - let committee_size = num_validators.checked_div(E::slots_per_epoch())?; + let average_balance = justified_balances + .total_effective_balance + .checked_div(justified_balances.num_active_validators)?; + let committee_size = justified_balances + .num_active_validators + .checked_div(E::slots_per_epoch())?; let committee_weight = committee_size.checked_mul(average_balance)?; committee_weight .checked_mul(proposer_score_boost)? diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 8f5d062ec6a..cbd369ae6ec 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,9 +1,12 @@ -use crate::error::Error; -use crate::proto_array::CountUnrealizedFull; -use crate::proto_array::{ - calculate_proposer_boost, InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode, +use crate::{ + error::Error, + proto_array::{ + calculate_committee_fraction, CountUnrealizedFull, InvalidationOperation, Iter, + ProposerBoost, ProtoArray, ProtoNode, + }, + ssz_container::SszContainer, + JustifiedBalances, }; -use crate::ssz_container::SszContainer; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -170,11 +173,128 @@ where } } +/// Information about the proposer head used for opportunistic re-orgs. +#[derive(Clone)] +pub struct ProposerHeadInfo { + /// Information about the *current* head block, which may be re-orged. + pub head_node: ProtoNode, + /// Information about the parent of the current head, which should be selected as the parent + /// for a new proposal *if* a re-org is decided on. + pub parent_node: ProtoNode, + /// The computed fraction of the active committee balance below which we can re-org. + pub re_org_weight_threshold: u64, + /// The current slot from fork choice's point of view, may lead the wall-clock slot by upto + /// 500ms. + pub current_slot: Slot, +} + +/// Error type to enable short-circuiting checks in `get_proposer_head`. +/// +/// This type intentionally does not implement `Debug` so that callers are forced to handle the +/// enum. +#[derive(Clone, PartialEq)] +pub enum ProposerHeadError { + DoNotReOrg(DoNotReOrg), + Error(E), +} + +impl From for ProposerHeadError { + fn from(e: DoNotReOrg) -> ProposerHeadError { + Self::DoNotReOrg(e) + } +} + +impl From for ProposerHeadError { + fn from(e: Error) -> Self { + Self::Error(e) + } +} + +impl ProposerHeadError { + pub fn convert_inner_error(self) -> ProposerHeadError + where + E2: From, + { + self.map_inner_error(E2::from) + } + + pub fn map_inner_error(self, f: impl FnOnce(E1) -> E2) -> ProposerHeadError { + match self { + ProposerHeadError::DoNotReOrg(reason) => ProposerHeadError::DoNotReOrg(reason), + ProposerHeadError::Error(error) => ProposerHeadError::Error(f(error)), + } + } +} + +/// Reasons why a re-org should not be attempted. +/// +/// This type intentionally does not implement `Debug` so that the `Display` impl must be used. +#[derive(Clone, PartialEq)] +pub enum DoNotReOrg { + MissingHeadOrParentNode, + MissingHeadFinalizedCheckpoint, + ParentDistance, + HeadDistance, + ShufflingUnstable, + JustificationAndFinalizationNotCompetitive, + ChainNotFinalizing { + epochs_since_finalization: u64, + }, + HeadNotWeak { + head_weight: u64, + re_org_weight_threshold: u64, + }, + HeadNotLate, + NotProposing, + ReOrgsDisabled, +} + +impl std::fmt::Display for DoNotReOrg { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::MissingHeadOrParentNode => write!(f, "unknown head or parent"), + Self::MissingHeadFinalizedCheckpoint => write!(f, "finalized checkpoint missing"), + Self::ParentDistance => write!(f, "parent too far from head"), + Self::HeadDistance => write!(f, "head too far from current slot"), + Self::ShufflingUnstable => write!(f, "shuffling unstable at epoch boundary"), + Self::JustificationAndFinalizationNotCompetitive => { + write!(f, "justification or finalization not competitive") + } + Self::ChainNotFinalizing { + epochs_since_finalization, + } => write!( + f, + "chain not finalizing ({epochs_since_finalization} epochs since finalization)" + ), + Self::HeadNotWeak { + head_weight, + re_org_weight_threshold, + } => { + write!(f, "head not weak ({head_weight}/{re_org_weight_threshold})") + } + Self::HeadNotLate => { + write!(f, "head arrived on time") + } + Self::NotProposing => { + write!(f, "not proposing at next slot") + } + Self::ReOrgsDisabled => { + write!(f, "re-orgs disabled in config") + } + } + } +} + +/// New-type for the re-org threshold percentage. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ReOrgThreshold(pub u64); + #[derive(PartialEq)] pub struct ProtoArrayForkChoice { pub(crate) proto_array: ProtoArray, pub(crate) votes: ElasticList, - pub(crate) balances: Vec, + pub(crate) balances: JustifiedBalances, } impl ProtoArrayForkChoice { @@ -223,7 +343,7 @@ impl ProtoArrayForkChoice { Ok(Self { proto_array, votes: ElasticList::default(), - balances: vec![], + balances: JustifiedBalances::default(), }) } @@ -282,21 +402,20 @@ impl ProtoArrayForkChoice { &mut self, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, - justified_state_balances: &[u64], + justified_state_balances: &JustifiedBalances, proposer_boost_root: Hash256, equivocating_indices: &BTreeSet, current_slot: Slot, spec: &ChainSpec, ) -> Result { let old_balances = &mut self.balances; - let new_balances = justified_state_balances; let deltas = compute_deltas( &self.proto_array.indices, &mut self.votes, - old_balances, - new_balances, + &old_balances.effective_balances, + &new_balances.effective_balances, equivocating_indices, ) .map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?; @@ -313,13 +432,129 @@ impl ProtoArrayForkChoice { ) .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; - *old_balances = new_balances.to_vec(); + *old_balances = new_balances.clone(); self.proto_array .find_head::(&justified_checkpoint.root, current_slot) .map_err(|e| format!("find_head failed: {:?}", e)) } + /// Get the block to propose on during `current_slot`. + /// + /// This function returns a *definitive* result which should be acted on. + pub fn get_proposer_head( + &self, + current_slot: Slot, + canonical_head: Hash256, + justified_balances: &JustifiedBalances, + re_org_threshold: ReOrgThreshold, + max_epochs_since_finalization: Epoch, + ) -> Result> { + let info = self.get_proposer_head_info::( + current_slot, + canonical_head, + justified_balances, + re_org_threshold, + max_epochs_since_finalization, + )?; + + // Only re-org a single slot. This prevents cascading failures during asynchrony. + let head_slot_ok = info.head_node.slot + 1 == current_slot; + if !head_slot_ok { + return Err(DoNotReOrg::HeadDistance.into()); + } + + // Only re-org if the head's weight is less than the configured committee fraction. + let head_weight = info.head_node.weight; + let re_org_weight_threshold = info.re_org_weight_threshold; + let weak_head = head_weight < re_org_weight_threshold; + if !weak_head { + return Err(DoNotReOrg::HeadNotWeak { + head_weight, + re_org_weight_threshold, + } + .into()); + } + + // All checks have passed, build upon the parent to re-org the head. + Ok(info) + } + + /// Get information about the block to propose on during `current_slot`. + /// + /// This function returns a *partial* result which must be processed further. + pub fn get_proposer_head_info( + &self, + current_slot: Slot, + canonical_head: Hash256, + justified_balances: &JustifiedBalances, + re_org_threshold: ReOrgThreshold, + max_epochs_since_finalization: Epoch, + ) -> Result> { + let mut nodes = self + .proto_array + .iter_nodes(&canonical_head) + .take(2) + .cloned() + .collect::>(); + + let parent_node = nodes.pop().ok_or(DoNotReOrg::MissingHeadOrParentNode)?; + let head_node = nodes.pop().ok_or(DoNotReOrg::MissingHeadOrParentNode)?; + + let parent_slot = parent_node.slot; + let head_slot = head_node.slot; + let re_org_block_slot = head_slot + 1; + + // Check finalization distance. + let proposal_epoch = re_org_block_slot.epoch(E::slots_per_epoch()); + let finalized_epoch = head_node + .unrealized_finalized_checkpoint + .ok_or(DoNotReOrg::MissingHeadFinalizedCheckpoint)? + .epoch; + let epochs_since_finalization = proposal_epoch.saturating_sub(finalized_epoch).as_u64(); + if epochs_since_finalization > max_epochs_since_finalization.as_u64() { + return Err(DoNotReOrg::ChainNotFinalizing { + epochs_since_finalization, + } + .into()); + } + + // Check parent distance from head. + // Do not check head distance from current slot, as that condition needs to be + // late-evaluated and is elided when `current_slot == head_slot`. + let parent_slot_ok = parent_slot + 1 == head_slot; + if !parent_slot_ok { + return Err(DoNotReOrg::ParentDistance.into()); + } + + // Check shuffling stability. + let shuffling_stable = re_org_block_slot % E::slots_per_epoch() != 0; + if !shuffling_stable { + return Err(DoNotReOrg::ShufflingUnstable.into()); + } + + // Check FFG. + let ffg_competitive = parent_node.unrealized_justified_checkpoint + == head_node.unrealized_justified_checkpoint + && parent_node.unrealized_finalized_checkpoint + == head_node.unrealized_finalized_checkpoint; + if !ffg_competitive { + return Err(DoNotReOrg::JustificationAndFinalizationNotCompetitive.into()); + } + + // Compute re-org weight threshold. + let re_org_weight_threshold = + calculate_committee_fraction::(justified_balances, re_org_threshold.0) + .ok_or(Error::ReOrgThresholdOverflow)?; + + Ok(ProposerHeadInfo { + head_node, + parent_node, + re_org_weight_threshold, + current_slot, + }) + } + /// Returns `true` if there are any blocks in `self` with an `INVALID` execution payload status. /// /// This will operate on *all* blocks, even those that do not descend from the finalized @@ -368,7 +603,7 @@ impl ProtoArrayForkChoice { if vote.current_root == node.root { // Any voting validator that does not have a balance should be // ignored. This is consistent with `compute_deltas`. - self.balances.get(validator_index) + self.balances.effective_balances.get(validator_index) } else { None } @@ -382,9 +617,11 @@ impl ProtoArrayForkChoice { // Compute the score based upon the current balances. We can't rely on // the `previous_proposr_boost.score` since it is set to zero with an // invalid node. - let proposer_score = - calculate_proposer_boost::(&self.balances, proposer_score_boost) - .ok_or("Failed to compute proposer boost")?; + let proposer_score = calculate_committee_fraction::( + &self.balances, + proposer_score_boost, + ) + .ok_or("Failed to compute proposer boost")?; // Store the score we've applied here so it can be removed in // a later call to `apply_score_changes`. self.proto_array.previous_proposer_boost.score = proposer_score; @@ -538,10 +775,11 @@ impl ProtoArrayForkChoice { bytes: &[u8], count_unrealized_full: CountUnrealizedFull, ) -> Result { - SszContainer::from_ssz_bytes(bytes) - .map(|container| (container, count_unrealized_full)) - .map(Into::into) - .map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e)) + let container = SszContainer::from_ssz_bytes(bytes) + .map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e))?; + (container, count_unrealized_full) + .try_into() + .map_err(|e| format!("Failed to initialize ProtoArrayForkChoice: {e:?}")) } /// Returns a read-lock to core `ProtoArray` struct. diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 63f75ed0a2f..1a20ef967ad 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -2,10 +2,12 @@ use crate::proto_array::ProposerBoost; use crate::{ proto_array::{CountUnrealizedFull, ProtoArray, ProtoNode}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, + Error, JustifiedBalances, }; use ssz::{four_byte_option_impl, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; +use std::convert::TryFrom; use types::{Checkpoint, Hash256}; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union @@ -30,7 +32,7 @@ impl From<&ProtoArrayForkChoice> for SszContainer { Self { votes: from.votes.0.clone(), - balances: from.balances.clone(), + balances: from.balances.effective_balances.clone(), prune_threshold: proto_array.prune_threshold, justified_checkpoint: proto_array.justified_checkpoint, finalized_checkpoint: proto_array.finalized_checkpoint, @@ -41,8 +43,12 @@ impl From<&ProtoArrayForkChoice> for SszContainer { } } -impl From<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { - fn from((from, count_unrealized_full): (SszContainer, CountUnrealizedFull)) -> Self { +impl TryFrom<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { + type Error = Error; + + fn try_from( + (from, count_unrealized_full): (SszContainer, CountUnrealizedFull), + ) -> Result { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, justified_checkpoint: from.justified_checkpoint, @@ -53,10 +59,10 @@ impl From<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { count_unrealized_full, }; - Self { + Ok(Self { proto_array, votes: ElasticList(from.votes), - balances: from.balances, - } + balances: JustifiedBalances::from_effective_balances(from.balances)?, + }) } } diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 9d548b0499a..de6039f35a0 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -597,6 +597,14 @@ fn main() { .takes_value(true) .help("The genesis time when generating a genesis state."), ) + .arg( + Arg::with_name("proposer-score-boost") + .long("proposer-score-boost") + .value_name("INTEGER") + .takes_value(true) + .help("The proposer score boost to apply as a percentage, e.g. 70 = 70%"), + ) + ) .subcommand( SubCommand::with_name("check-deposit-data") diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 5254ff5a62e..b2760829cb8 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -58,6 +58,10 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul spec.genesis_fork_version = v; } + if let Some(proposer_score_boost) = parse_optional(matches, "proposer-score-boost")? { + spec.proposer_score_boost = Some(proposer_score_boost); + } + if let Some(fork_epoch) = parse_optional(matches, "altair-fork-epoch")? { spec.altair_fork_epoch = Some(fork_epoch); } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index d39235cb136..07c583da5cb 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,6 +1,9 @@ use beacon_node::{beacon_chain::CountUnrealizedFull, ClientConfig as Config}; use crate::exec::{CommandLineTestExec, CompletedTest}; +use beacon_node::beacon_chain::chain_config::{ + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, +}; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; @@ -10,6 +13,7 @@ use std::path::PathBuf; use std::process::Command; use std::str::FromStr; use std::string::ToString; +use std::time::Duration; use tempfile::TempDir; use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp_port, unused_udp_port}; @@ -153,6 +157,31 @@ fn checkpoint_sync_url_timeout_default() { }); } +#[test] +fn prepare_payload_lookahead_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.prepare_payload_lookahead, + Duration::from_secs(4), + ) + }); +} + +#[test] +fn prepare_payload_lookahead_shorter() { + CommandLineTest::new() + .flag("prepare-payload-lookahead", Some("1500")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.prepare_payload_lookahead, + Duration::from_millis(1500) + ) + }); +} + #[test] fn paranoid_block_proposal_default() { CommandLineTest::new() @@ -1500,6 +1529,49 @@ fn ensure_panic_on_failed_launch() { }); } +#[test] +fn enable_proposer_re_orgs_default() { + CommandLineTest::new().run().with_config(|config| { + assert_eq!( + config.chain.re_org_threshold, + Some(DEFAULT_RE_ORG_THRESHOLD) + ); + assert_eq!( + config.chain.re_org_max_epochs_since_finalization, + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + ); + }); +} + +#[test] +fn disable_proposer_re_orgs() { + CommandLineTest::new() + .flag("disable-proposer-reorgs", None) + .run() + .with_config(|config| assert_eq!(config.chain.re_org_threshold, None)); +} + +#[test] +fn proposer_re_org_threshold() { + CommandLineTest::new() + .flag("proposer-reorg-threshold", Some("90")) + .run() + .with_config(|config| assert_eq!(config.chain.re_org_threshold.unwrap().0, 90)); +} + +#[test] +fn proposer_re_org_max_epochs_since_finalization() { + CommandLineTest::new() + .flag("proposer-reorg-epochs-since-finalization", Some("8")) + .run() + .with_config(|config| { + assert_eq!( + config.chain.re_org_max_epochs_since_finalization.as_u64(), + 8 + ) + }); +} + #[test] fn monitoring_endpoint() { CommandLineTest::new() diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 6608b7ca64f..f0ed4f737d4 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -389,6 +389,24 @@ fn no_doppelganger_protection_flag() { .with_config(|config| assert!(!config.enable_doppelganger_protection)); } #[test] +fn block_delay_ms() { + CommandLineTest::new() + .flag("block-delay-ms", Some("2000")) + .run() + .with_config(|config| { + assert_eq!( + config.block_delay, + Some(std::time::Duration::from_millis(2000)) + ) + }); +} +#[test] +fn no_block_delay_ms() { + CommandLineTest::new() + .run() + .with_config(|config| assert_eq!(config.block_delay, None)); +} +#[test] fn no_gas_limit_flag() { CommandLineTest::new() .run() diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index a1348363a9b..82336984afb 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -36,6 +36,7 @@ lcli \ --eth1-follow-distance 1 \ --seconds-per-slot $SECONDS_PER_SLOT \ --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ + --proposer-score-boost "$PROPOSER_SCORE_BOOST" \ --force echo Specification generated at $TESTNET_DIR. diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index b6ea89794f0..2506e9e1cdf 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -44,5 +44,8 @@ SECONDS_PER_SLOT=3 # Seconds per Eth1 block SECONDS_PER_ETH1_BLOCK=1 +# Proposer score boost percentage +PROPOSER_SCORE_BOOST=70 + # Command line arguments for validator client VC_ARGS="" diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index f0d2c9081f0..d4acbe7563d 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -12,7 +12,9 @@ use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; +use std::time::Duration; use tokio::sync::mpsc; +use tokio::time::sleep; use types::{BlindedPayload, BlockType, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot}; #[derive(Debug)] @@ -44,6 +46,7 @@ pub struct BlockServiceBuilder { context: Option>, graffiti: Option, graffiti_file: Option, + block_delay: Option, } impl BlockServiceBuilder { @@ -55,6 +58,7 @@ impl BlockServiceBuilder { context: None, graffiti: None, graffiti_file: None, + block_delay: None, } } @@ -88,6 +92,11 @@ impl BlockServiceBuilder { self } + pub fn block_delay(mut self, block_delay: Option) -> Self { + self.block_delay = block_delay; + self + } + pub fn build(self) -> Result, String> { Ok(BlockService { inner: Arc::new(Inner { @@ -105,6 +114,7 @@ impl BlockServiceBuilder { .ok_or("Cannot build BlockService without runtime_context")?, graffiti: self.graffiti, graffiti_file: self.graffiti_file, + block_delay: self.block_delay, }), }) } @@ -118,6 +128,7 @@ pub struct Inner { context: RuntimeContext, graffiti: Option, graffiti_file: Option, + block_delay: Option, } /// Attempts to produce attestations for any block producer(s) at the start of the epoch. @@ -162,6 +173,16 @@ impl BlockService { async move { while let Some(notif) = notification_rx.recv().await { let service = self.clone(); + + if let Some(delay) = service.block_delay { + debug!( + service.context.log(), + "Delaying block production by {}ms", + delay.as_millis() + ); + sleep(delay).await; + } + service.do_update(notif).await.ok(); } debug!(log, "Block service shutting down"); diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index ef2e66676a5..c82a1a9d362 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -308,5 +308,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { by this validator client. Note this will not necessarily be used if the gas limit \ set here moves too far from the previous block's gas limit. [default: 30,000,000]") .requires("builder-proposals"), - ) + ) + /* + * Experimental/development options. + */ + .arg( + Arg::with_name("block-delay-ms") + .long("block-delay-ms") + .value_name("MILLIS") + .hidden(true) + .help("Time to delay block production from the start of the slot. Should only be \ + used for testing.") + .takes_value(true), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 277a4bd8ded..22741dabbd7 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -13,6 +13,7 @@ use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; use std::path::PathBuf; +use std::time::Duration; use types::{Address, GRAFFITI_BYTES_LEN}; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; @@ -61,6 +62,10 @@ pub struct Config { /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option>, + /// Delay from the start of the slot to wait before publishing a block. + /// + /// This is *not* recommended in prod and should only be used for testing. + pub block_delay: Option, /// Disables publishing http api requests to all beacon nodes for select api calls. pub disable_run_on_all: bool, } @@ -95,6 +100,7 @@ impl Default for Config { monitoring_api: None, enable_doppelganger_protection: false, beacon_nodes_tls_certs: None, + block_delay: None, builder_proposals: false, builder_registration_timestamp_override: None, gas_limit: None, @@ -341,6 +347,13 @@ impl Config { ); } + /* + * Experimental + */ + if let Some(delay_ms) = parse_optional::(cli_args, "block-delay-ms")? { + config.block_delay = Some(Duration::from_millis(delay_ms)); + } + Ok(config) } } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 819efec93c9..4db9804054a 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -427,6 +427,7 @@ impl ProductionValidatorClient { .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) + .block_delay(config.block_delay) .build()?; let attestation_service = AttestationServiceBuilder::new() From b1c33361ea1559fd32bd23f1c3e5940b320a3ae9 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 13 Dec 2022 10:50:24 -0600 Subject: [PATCH 11/17] Fixed Clippy Complaints & Some Failing Tests (#3791) * Fixed Clippy Complaints & Some Failing Tests * Update Dockerfile to Rust-1.65 * EF test file renamed * Touch up comments based on feedback --- Dockerfile | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 10 ++-- .../src/engine_api/json_structures.rs | 6 +-- beacon_node/execution_layer/src/engines.rs | 2 +- beacon_node/execution_layer/src/lib.rs | 2 +- .../src/test_utils/handle_rpc.rs | 27 ++++++++-- beacon_node/http_api/src/publish_blocks.rs | 2 +- .../lighthouse_network/src/types/pubsub.rs | 6 +-- beacon_node/operation_pool/src/lib.rs | 4 ++ consensus/types/src/payload.rs | 49 +++++++++++-------- lcli/src/new_testnet.rs | 4 +- .../src/cases/merkle_proof_validity.rs | 2 +- testing/ef_tests/src/cases/operations.rs | 6 +-- testing/ef_tests/src/lib.rs | 4 +- testing/ef_tests/tests/tests.rs | 2 + 15 files changed, 79 insertions(+), 49 deletions(-) diff --git a/Dockerfile b/Dockerfile index 72423b17c68..7a0602a2213 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.62.1-bullseye AS builder +FROM rust:1.65.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0bbbe92356c..fcd097d4d3f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2205,8 +2205,10 @@ impl BeaconChain { .verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?) } + // TODO: remove this whole block once withdrawals-processing is removed #[cfg(not(feature = "withdrawals-processing"))] { + #[allow(clippy::drop_non_drop)] drop(bls_to_execution_change); Ok(ObservationOutcome::AlreadyKnown) } @@ -4342,17 +4344,17 @@ impl BeaconChain { // Might implement caching here in the future.. let prepare_state = self .state_at_slot(prepare_slot, StateSkipConfig::WithoutStateRoots) - .or_else(|e| { + .map_err(|e| { error!(self.log, "State advance for withdrawals failed"; "error" => ?e); - Err(e) + e })?; Some(get_expected_withdrawals(&prepare_state, &self.spec)) } } .transpose() - .or_else(|e| { + .map_err(|e| { error!(self.log, "Error preparing beacon proposer"; "error" => ?e); - Err(e) + e }) .map(|withdrawals_opt| withdrawals_opt.map(|w| w.into())) .map_err(Error::PrepareProposerFailed)?; diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 1b125cde44d..ea2bb4941d1 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -176,7 +176,7 @@ impl JsonExecutionPayload { .collect::>() .into() }) - .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))? + .ok_or_else(|| Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))? })), ForkName::Eip4844 => Ok(ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { parent_hash: v2.parent_hash, @@ -191,7 +191,7 @@ impl JsonExecutionPayload { timestamp: v2.timestamp, extra_data: v2.extra_data, base_fee_per_gas: v2.base_fee_per_gas, - excess_data_gas: v2.excess_data_gas.ok_or(Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?, + excess_data_gas: v2.excess_data_gas.ok_or_else(|| Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?, block_hash: v2.block_hash, transactions: v2.transactions, #[cfg(feature = "withdrawals")] @@ -204,7 +204,7 @@ impl JsonExecutionPayload { .collect::>() .into() }) - .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))? + .ok_or_else(|| Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))? })), _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))), } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 16562267ca4..271cca26cba 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -342,7 +342,7 @@ impl Engine { impl PayloadIdCacheKey { fn new(head_block_hash: &ExecutionBlockHash, attributes: &PayloadAttributes) -> Self { Self { - head_block_hash: head_block_hash.clone(), + head_block_hash: *head_block_hash, payload_attributes: attributes.clone(), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index b6e85f67dcd..a97bbc4faf0 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1582,7 +1582,7 @@ impl ExecutionLayer { let transactions = VariableList::new( block .transactions() - .into_iter() + .iter() .map(|transaction| VariableList::new(transaction.rlp().to_vec())) .collect::>() .map_err(ApiError::DeserializeTransaction)?, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index fe765cc9495..c83aeccdc5f 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -74,11 +74,29 @@ pub async fn handle_rpc( .unwrap()) } } - ENGINE_NEW_PAYLOAD_V1 => { - let request: JsonExecutionPayload = get_param(params, 0)?; + ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => { + let request = match method { + ENGINE_NEW_PAYLOAD_V1 => { + JsonExecutionPayload::V1(get_param::>(params, 0)?) + } + ENGINE_NEW_PAYLOAD_V2 => { + JsonExecutionPayload::V2(get_param::>(params, 0)?) + } + _ => unreachable!(), + }; + let fork = match request { + JsonExecutionPayload::V1(_) => ForkName::Merge, + JsonExecutionPayload::V2(ref payload) => { + if payload.withdrawals.is_none() { + ForkName::Merge + } else { + ForkName::Capella + } + } + }; // Canned responses set by block hash take priority. - if let Some(status) = ctx.get_new_payload_status(&request.block_hash()) { + if let Some(status) = ctx.get_new_payload_status(request.block_hash()) { return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()); } @@ -97,8 +115,7 @@ pub async fn handle_rpc( Some( ctx.execution_block_generator .write() - // FIXME: should this worry about other forks? - .new_payload(request.try_into_execution_payload(ForkName::Merge).unwrap()), + .new_payload(request.try_into_execution_payload(fork).unwrap()), ) } else { None diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index fb296168db0..c471da7d584 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -41,7 +41,7 @@ pub async fn publish_block( )) } else { //TODO(pawan): return an empty sidecar instead - return Err(warp_utils::reject::broadcast_without_import(format!(""))); + return Err(warp_utils::reject::broadcast_without_import(String::new())); } } _ => PubsubMessage::BeaconBlock(block.clone()), diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 02f2bfff1df..9cce98db946 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -224,12 +224,10 @@ impl PubsubMessage { | ForkName::Merge | ForkName::Capella, ) - | None => { - return Err(format!( + | None => Err(format!( "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", gossip_topic.fork_digest - )) - } + )), } } GossipKind::VoluntaryExit => { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 159454b9e98..37fa6893873 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -558,8 +558,10 @@ impl OperationPool { ) } + // TODO: remove this whole block once withdrwals-processing is removed #[cfg(not(feature = "withdrawals-processing"))] { + #[allow(clippy::drop_copy)] drop((state, spec)); vec![] } @@ -597,8 +599,10 @@ impl OperationPool { ); } + // TODO: remove this whole block once withdrwals-processing is removed #[cfg(not(feature = "withdrawals-processing"))] { + #[allow(clippy::drop_copy)] drop((head_block, head_state, spec)); } } diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index dba94cfd7c1..2d9e37b81ab 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -261,7 +261,7 @@ impl ExecPayload for FullPayload { }) } - fn is_default_with_empty_roots<'a>(&'a self) -> bool { + fn is_default_with_empty_roots(&self) -> bool { // For full payloads the empty/zero distinction does not exist. self.is_default_with_zero_roots() } @@ -536,7 +536,7 @@ impl ExecPayload for BlindedPayload { } } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { + fn is_default_with_zero_roots(&self) -> bool { self.to_ref().is_default_with_zero_roots() } @@ -643,13 +643,13 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { } macro_rules! impl_exec_payload_common { - ($wrapper_type:ident, - $wrapped_type:ident, - $wrapped_type_full:ident, - $wrapped_type_header:ident, - $wrapped_field:ident, - $fork_variant:ident, - $block_type_variant:ident, + ($wrapper_type:ident, // BlindedPayloadMerge | FullPayloadMerge + $wrapped_type:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadMerge + $wrapped_type_full:ident, // ExecutionPayloadMerge | ExecutionPayloadMerge + $wrapped_type_header:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadHeaderMerge + $wrapped_field:ident, // execution_payload_header | execution_payload + $fork_variant:ident, // Merge | Merge + $block_type_variant:ident, // Blinded | Full $f:block, $g:block) => { impl ExecPayload for $wrapper_type { @@ -696,7 +696,15 @@ macro_rules! impl_exec_payload_common { } fn is_default_with_empty_roots(&self) -> bool { - self.$wrapped_field == $wrapped_type::from($wrapped_type_full::default()) + // FIXME: is there a better way than ignoring this lint? + // This is necessary because the first invocation of this macro might expand to: + // self.execution_payload_header == ExecutionPayloadHeaderMerge::from(ExecutionPayloadMerge::default()) + // but the second invocation might expand to: + // self.execution_payload == ExecutionPayloadMerge::from(ExecutionPayloadMerge::default()) + #[allow(clippy::cmp_owned)] + { + self.$wrapped_field == $wrapped_type::from($wrapped_type_full::default()) + } } fn transactions(&self) -> Option<&Transactions> { @@ -720,16 +728,17 @@ macro_rules! impl_exec_payload_common { } macro_rules! impl_exec_payload_for_fork { + // BlindedPayloadMerge, FullPayloadMerge, ExecutionPayloadHeaderMerge, ExecutionPayloadMerge, Merge ($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident) => { //*************** Blinded payload implementations ******************// impl_exec_payload_common!( - $wrapper_type_header, - $wrapped_type_header, - $wrapped_type_full, - $wrapped_type_header, + $wrapper_type_header, // BlindedPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge execution_payload_header, - $fork_variant, + $fork_variant, // Merge Blinded, { |_| { None } }, { @@ -794,12 +803,12 @@ macro_rules! impl_exec_payload_for_fork { //*************** Full payload implementations ******************// impl_exec_payload_common!( - $wrapper_type_full, - $wrapped_type_full, - $wrapped_type_full, - $wrapped_type_header, + $wrapper_type_full, // FullPayloadMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge execution_payload, - $fork_variant, + $fork_variant, // Merge Full, { let c: for<'a> fn(&'a $wrapper_type_full) -> Option<&'a Transactions> = diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 69356045724..58a7c49b397 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -87,9 +87,9 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul execution_payload_header.as_ref() { let eth1_block_hash = - parse_optional(matches, "eth1-block-hash")?.unwrap_or(payload.block_hash()); + parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash()); let genesis_time = - parse_optional(matches, "genesis-time")?.unwrap_or(payload.timestamp()); + parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp()); (eth1_block_hash, genesis_time) } else { let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index a57abc2e070..c180774bb64 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -29,7 +29,7 @@ pub struct MerkleProofValidity { impl LoadCase for MerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); - let state = ssz_decode_state(&path.join("state.ssz_snappy"), spec)?; + let state = ssz_decode_state(&path.join("object.ssz_snappy"), spec)?; let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; // Metadata does not exist in these tests but it is left like this just in case. let meta_path = path.join("meta.yaml"); diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 431fd829f67..f5487a6940d 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -21,8 +21,6 @@ use state_processing::{ ConsensusContext, }; use std::fmt::Debug; -#[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))] -use std::marker::PhantomData; use std::path::Path; #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] use types::SignedBlsToExecutionChange; @@ -44,12 +42,10 @@ struct ExecutionMetadata { } /// Newtype for testing withdrawals. +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] #[derive(Debug, Clone, Deserialize)] pub struct WithdrawalsPayload { - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] payload: FullPayload, - #[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))] - _phantom_data: PhantomData, } #[derive(Debug, Clone)] diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index d45b1e15c7a..fd3bf2bd1b5 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,9 +1,11 @@ pub use case_result::CaseResult; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +pub use cases::WithdrawalsPayload; pub use cases::{ Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, - SyncCommitteeUpdates, WithdrawalsPayload, + SyncCommitteeUpdates, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index f84be64dad9..0227b92ec86 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,12 +82,14 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] #[test] fn operations_withdrawals() { OperationsHandler::>::default().run(); OperationsHandler::>::default().run(); } +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] #[test] fn operations_bls_to_execution_change() { OperationsHandler::::default().run(); From 07d6ef749a6adfcf5e7476449a59b13a40deced8 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 13 Dec 2022 18:49:30 -0600 Subject: [PATCH 12/17] Fixed Payload Reconstruction Bug (#3796) --- beacon_node/beacon_chain/src/beacon_chain.rs | 3 +- beacon_node/beacon_chain/src/errors.rs | 2 + .../execution_layer/src/engine_api/http.rs | 39 ++++++++++++++++--- beacon_node/execution_layer/src/lib.rs | 20 ++++++++-- .../src/test_rig.rs | 3 +- 5 files changed, 55 insertions(+), 12 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index fcd097d4d3f..e51cdacf6a5 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -939,6 +939,7 @@ impl BeaconChain { Some(DatabaseBlock::Blinded(block)) => block, None => return Ok(None), }; + let fork = blinded_block.fork_name(&self.spec)?; // If we only have a blinded block, load the execution payload from the EL. let block_message = blinded_block.message(); @@ -953,7 +954,7 @@ impl BeaconChain { .execution_layer .as_ref() .ok_or(Error::ExecutionLayerMissing)? - .get_payload_by_block_hash(exec_block_hash) + .get_payload_by_block_hash(exec_block_hash, fork) .await .map_err(|e| Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, e))? .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 3a2e4a0bc53..5f1f0595ca7 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -207,6 +207,7 @@ pub enum BeaconChainError { CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), BlsToExecutionChangeBadFork(ForkName), + InconsistentFork(InconsistentFork), } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -230,6 +231,7 @@ easy_from_to!(ForkChoiceStoreError, BeaconChainError); easy_from_to!(HistoricalBlockError, BeaconChainError); easy_from_to!(StateAdvanceError, BeaconChainError); easy_from_to!(BlockReplayError, BeaconChainError); +easy_from_to!(InconsistentFork, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 8eef7aece3f..c71cfa0c04b 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -664,14 +664,41 @@ impl HttpJsonRpc { pub async fn get_block_by_hash_with_txns( &self, block_hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, Error> { let params = json!([block_hash, true]); - self.rpc_request( - ETH_GET_BLOCK_BY_HASH, - params, - ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, - ) - .await + Ok(Some(match fork { + ForkName::Merge => ExecutionBlockWithTransactions::Merge( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Capella => ExecutionBlockWithTransactions::Capella( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Eip4844 => ExecutionBlockWithTransactions::Eip4844( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Base | ForkName::Altair => { + return Err(Error::UnsupportedForkVariant(format!( + "called get_block_by_hash_with_txns with fork {:?}", + fork + ))) + } + })) } pub async fn new_payload_v1( diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index a97bbc4faf0..2aaa7608e30 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1550,10 +1550,11 @@ impl ExecutionLayer { pub async fn get_payload_by_block_hash( &self, hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, Error> { self.engine() .request(|engine| async move { - self.get_payload_by_block_hash_from_engine(engine, hash) + self.get_payload_by_block_hash_from_engine(engine, hash, fork) .await }) .await @@ -1565,15 +1566,26 @@ impl ExecutionLayer { &self, engine: &Engine, hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, ApiError> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); if hash == ExecutionBlockHash::zero() { - // FIXME: how to handle forks properly here? - return Ok(Some(ExecutionPayloadMerge::default().into())); + return match fork { + ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())), + ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), + ForkName::Eip4844 => Ok(Some(ExecutionPayloadEip4844::default().into())), + ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant( + format!("called get_payload_by_block_hash_from_engine with {}", fork), + )), + }; } - let block = if let Some(block) = engine.api.get_block_by_hash_with_txns::(hash).await? { + let block = if let Some(block) = engine + .api + .get_block_by_hash_with_txns::(hash, fork) + .await? + { block } else { return Ok(None); diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 944e2fef6fe..4dab00689c9 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -616,7 +616,8 @@ async fn check_payload_reconstruction( ) { let reconstructed = ee .execution_layer - .get_payload_by_block_hash(payload.block_hash()) + // FIXME: handle other forks here? + .get_payload_by_block_hash(payload.block_hash(), ForkName::Merge) .await .unwrap() .unwrap(); From 75dd8780e0a7d8e095a7d92dba8aab30308b40de Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 14 Dec 2022 11:52:46 +1100 Subject: [PATCH 13/17] Use JsonPayload for payload reconstruction (#3797) --- beacon_node/execution_layer/src/engine_api.rs | 44 ++++++++++++------- beacon_node/execution_layer/src/lib.rs | 20 +++++++-- .../test_utils/execution_block_generator.rs | 2 +- 3 files changed, 46 insertions(+), 20 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 988b04826eb..424ca30d137 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,10 +1,11 @@ use crate::engines::ForkchoiceState; pub use ethers_core::types::Transaction; -use ethers_core::utils::rlp::{Decodable, Rlp}; +use ethers_core::utils::rlp::{self, Decodable, Rlp}; use http::deposit_methods::RpcError; -pub use json_structures::TransitionConfigurationV1; +pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; +use std::convert::TryFrom; use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ @@ -46,6 +47,7 @@ pub enum Error { RequiredMethodUnsupported(&'static str), UnsupportedForkVariant(String), BadConversion(String), + RlpDecoderError(rlp::DecoderError), } impl From for Error { @@ -79,6 +81,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: rlp::DecoderError) -> Self { + Error::RlpDecoderError(e) + } +} + #[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] pub enum PayloadStatusV1Status { @@ -159,12 +167,14 @@ pub struct ExecutionBlockWithTransactions { pub transactions: Vec, #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub withdrawals: Vec, + pub withdrawals: Vec, } -impl From> for ExecutionBlockWithTransactions { - fn from(payload: ExecutionPayload) -> Self { - match payload { +impl TryFrom> for ExecutionBlockWithTransactions { + type Error = Error; + + fn try_from(payload: ExecutionPayload) -> Result { + let json_payload = match payload { ExecutionPayload::Merge(block) => Self::Merge(ExecutionBlockWithTransactionsMerge { parent_hash: block.parent_hash, fee_recipient: block.fee_recipient, @@ -183,8 +193,7 @@ impl From> for ExecutionBlockWithTransactions .transactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>() - .unwrap_or_else(|_| Vec::new()), + .collect::, _>>()?, }), ExecutionPayload::Capella(block) => { Self::Capella(ExecutionBlockWithTransactionsCapella { @@ -205,10 +214,12 @@ impl From> for ExecutionBlockWithTransactions .transactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>() - .unwrap_or_else(|_| Vec::new()), + .collect::, _>>()?, #[cfg(feature = "withdrawals")] - withdrawals: block.withdrawals.into(), + withdrawals: Vec::from(block.withdrawals) + .into_iter() + .map(|withdrawal| withdrawal.into()) + .collect(), }) } ExecutionPayload::Eip4844(block) => { @@ -231,13 +242,16 @@ impl From> for ExecutionBlockWithTransactions .transactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>() - .unwrap_or_else(|_| Vec::new()), + .collect::, _>>()?, #[cfg(feature = "withdrawals")] - withdrawals: block.withdrawals.into(), + withdrawals: Vec::from(block.withdrawals) + .into_iter() + .map(|withdrawal| withdrawal.into()) + .collect(), }) } - } + }; + Ok(json_payload) } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2aaa7608e30..1980e82ce30 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1622,8 +1622,14 @@ impl ExecutionLayer { } ExecutionBlockWithTransactions::Capella(capella_block) => { #[cfg(feature = "withdrawals")] - let withdrawals = VariableList::new(capella_block.withdrawals.clone()) - .map_err(ApiError::DeserializeWithdrawals)?; + let withdrawals = VariableList::new( + capella_block + .withdrawals + .into_iter() + .map(|w| w.into()) + .collect(), + ) + .map_err(ApiError::DeserializeWithdrawals)?; ExecutionPayload::Capella(ExecutionPayloadCapella { parent_hash: capella_block.parent_hash, @@ -1646,8 +1652,14 @@ impl ExecutionLayer { } ExecutionBlockWithTransactions::Eip4844(eip4844_block) => { #[cfg(feature = "withdrawals")] - let withdrawals = VariableList::new(eip4844_block.withdrawals.clone()) - .map_err(ApiError::DeserializeWithdrawals)?; + let withdrawals = VariableList::new( + eip4844_block + .withdrawals + .into_iter() + .map(|w| w.into()) + .collect(), + ) + .map_err(ApiError::DeserializeWithdrawals)?; ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { parent_hash: eip4844_block.parent_hash, diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index f2282c6039d..a7ec429e456 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -76,7 +76,7 @@ impl Block { pub fn as_execution_block_with_tx(&self) -> Option> { match self { - Block::PoS(payload) => Some(payload.clone().into()), + Block::PoS(payload) => Some(payload.clone().try_into().unwrap()), Block::PoW(_) => None, } } From 63d3dd27fc2cc050083861a7a82ed15c3bb08ca9 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 14 Dec 2022 12:01:33 +1100 Subject: [PATCH 14/17] Batch API for address changes (#3798) --- beacon_node/http_api/src/lib.rs | 72 ++++++++++++++++++++++----------- 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b75e583fc76..47ea99c8733 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1673,36 +1673,62 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(network_tx_filter.clone()) + .and(log_filter.clone()) .and_then( |chain: Arc>, - address_change: SignedBlsToExecutionChange, - network_tx: UnboundedSender>| { + address_changes: Vec, + network_tx: UnboundedSender>, + log: Logger| { blocking_json_task(move || { - let outcome = chain - .verify_bls_to_execution_change_for_gossip(address_change) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; + let mut failures = vec![]; + + for (index, address_change) in address_changes.into_iter().enumerate() { + let validator_index = address_change.message.validator_index; + + match chain.verify_bls_to_execution_change_for_gossip(address_change) { + Ok(ObservationOutcome::New(verified_address_change)) => { + #[cfg(feature = "withdrawals-processing")] + { + publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; + } - if let ObservationOutcome::New(address_change) = outcome { - #[cfg(feature = "withdrawals-processing")] - { - publish_pubsub_message( - &network_tx, - PubsubMessage::BlsToExecutionChange(Box::new( - address_change.as_inner().clone(), - )), - )?; + chain.import_bls_to_execution_change(verified_address_change); + } + Ok(ObservationOutcome::AlreadyKnown) => { + debug!( + log, + "BLS to execution change already known"; + "validator_index" => validator_index, + ); + } + Err(e) => { + error!( + log, + "Invalid BLS to execution change"; + "validator_index" => validator_index, + "source" => "HTTP API", + ); + failures.push(api_types::Failure::new( + index, + format!("invalid: {e:?}"), + )); + } } - drop(network_tx); - - chain.import_bls_to_execution_change(address_change); } - Ok(()) + if failures.is_empty() { + Ok(()) + } else { + Err(warp_utils::reject::indexed_bad_request( + "some BLS to execution changes failed to verify".into(), + failures, + )) + } }) }, ); From f3e8ca852e9003a6b9aaadb8c81c7021e996dced Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 14 Dec 2022 14:04:13 +1100 Subject: [PATCH 15/17] Fix Clippy --- beacon_node/http_api/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index e573e6a5d8b..783b8b68f8d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1677,7 +1677,7 @@ pub fn serve( .and_then( |chain: Arc>, address_changes: Vec, - network_tx: UnboundedSender>, + #[allow(unused)] network_tx: UnboundedSender>, log: Logger| { blocking_json_task(move || { let mut failures = vec![]; From d48460782bfa3738f20e7823e6c304313fbf14e5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 15 Dec 2022 11:42:35 +1100 Subject: [PATCH 16/17] Publish capella images on push (#3803) --- .github/workflows/docker.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 13b84116955..0643165c9fd 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,6 +5,7 @@ on: branches: - unstable - stable + - capella tags: - v* @@ -34,6 +35,11 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV + - name: Extract version (if capella) + if: github.event.ref == 'refs/heads/capella' + run: | + echo "VERSION=capella" >> $GITHUB_ENV + echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | From 2c7ebc7278aa46b997f0a1959fdaebdf41719f6f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 15 Dec 2022 12:25:45 +1100 Subject: [PATCH 17/17] Enable withdrawals features in Capella docker images (#3805) --- .github/workflows/docker.yml | 1 + lcli/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 0643165c9fd..25d2cdab302 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -66,6 +66,7 @@ jobs: DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} + CROSS_FEATURES: withdrawals,withdrawals-processing steps: - uses: actions/checkout@v3 - name: Update Rust diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 1129e710f46..feda81d0302 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.62.1-bullseye AS builder +FROM rust:1.65.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE