From a732a8784643d053051d386294ce53f542cf8237 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Oct 2024 12:28:55 +1100 Subject: [PATCH 1/8] Remove TTD flags and `safe-slots-to-import-*` (#6489) * Delete SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY * Update fork choice tests * Remove TTD related flags * Add deprecation warning * Remove more dead code * Delete EF on_merge_block tests * Remove even more dead code * Address Mac's review comments --- .../beacon_chain/src/block_verification.rs | 34 +- .../beacon_chain/src/execution_payload.rs | 34 +- .../tests/payload_invalidation.rs | 552 ------------------ book/src/api-vc-endpoints.md | 2 - book/src/help_bn.md | 29 - book/src/help_general.md | 29 - book/src/help_vc.md | 29 - book/src/help_vm.md | 29 - book/src/help_vm_create.md | 29 - book/src/help_vm_import.md | 29 - book/src/help_vm_move.md | 29 - common/clap_utils/src/lib.rs | 32 +- consensus/fork_choice/src/fork_choice.rs | 37 -- consensus/fork_choice/tests/tests.rs | 122 +--- consensus/types/presets/gnosis/phase0.yaml | 6 - consensus/types/presets/mainnet/phase0.yaml | 6 - consensus/types/presets/minimal/phase0.yaml | 6 - consensus/types/src/chain_spec.rs | 19 - consensus/types/src/preset.rs | 3 - lighthouse/src/main.rs | 44 +- lighthouse/tests/beacon_node.rs | 43 +- lighthouse/tests/exec.rs | 5 - testing/ef_tests/check_all_files_accessed.py | 2 + testing/ef_tests/src/handler.rs | 4 +- testing/ef_tests/tests/tests.rs | 6 - 25 files changed, 44 insertions(+), 1116 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a8233f170f6..661b539fbe1 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -55,8 +55,8 @@ use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlo use crate::data_column_verification::GossipDataColumnError; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ - is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, + validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, + NotifyExecutionLayer, PayloadNotifier, }; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; @@ -74,7 +74,7 @@ use lighthouse_metrics::TryExt; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; -use slog::{debug, error, warn, Logger}; +use slog::{debug, error, Logger}; use slot_clock::SlotClock; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -95,9 +95,9 @@ use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, - BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - FullPayload, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, + Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; pub const POS_PANDA_BANNER: &str = r#" @@ -1388,28 +1388,6 @@ impl ExecutionPendingBlock { } let payload_verification_status = payload_notifier.notify_new_payload().await?; - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let block_hash_opt = block - .message() - .body() - .execution_payload() - .map(|full_payload| full_payload.block_hash()); - - // Ensure the block is a candidate for optimistic import. - if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? - { - warn!( - chain.log, - "Rejecting optimistic block"; - "block_hash" => ?block_hash_opt, - "msg" => "the execution engine is not synced" - ); - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); - } - } - Ok(PayloadVerificationOutcome { payload_verification_status, is_valid_merge_transition_block, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b9b98bfbc00..f2420eea0d2 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -277,9 +277,7 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } .into()), None => { - if allow_optimistic_import == AllowOptimisticImport::Yes - && is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? - { + if allow_optimistic_import == AllowOptimisticImport::Yes { debug!( chain.log, "Optimistically importing merge transition block"; @@ -297,36 +295,6 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } } -/// Check to see if a block with the given parameters is valid to be imported optimistically. -pub async fn is_optimistic_candidate_block( - chain: &Arc>, - block_slot: Slot, - block_parent_root: Hash256, -) -> Result { - let current_slot = chain.slot()?; - let inner_chain = chain.clone(); - - // Use a blocking task to check if the block is an optimistic candidate. Interacting - // with the `fork_choice` lock in an async task can block the core executor. - chain - .spawn_blocking_handle( - move || { - inner_chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_candidate_block( - current_slot, - block_slot, - &block_parent_root, - &inner_chain.spec, - ) - }, - "validate_merge_block_optimistic_candidate", - ) - .await? - .map_err(BeaconChainError::from) -} - /// Validate the gossip block's execution_payload according to the checks described here: /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block pub fn validate_execution_payload_for_gossip( diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index dd195048e87..1325875a275 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,20 +1,14 @@ #![cfg(not(debug_assertions))] -use beacon_chain::otb_verification_service::{ - load_optimistic_transition_blocks, validate_optimistic_transition_blocks, - OptimisticTransitionBlock, -}; use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, - test_utils::ExecutionBlockGenerator, ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; @@ -1270,552 +1264,6 @@ async fn attesting_to_optimistic_head() { get_aggregated_by_slot_and_root().unwrap(); } -/// A helper struct to build out a chain of some configurable length which undergoes the merge -/// transition. -struct OptimisticTransitionSetup { - blocks: Vec>>, - execution_block_generator: ExecutionBlockGenerator, -} - -impl OptimisticTransitionSetup { - async fn new(num_blocks: usize, ttd: u64) -> Self { - let mut spec = E::default_spec(); - spec.terminal_total_difficulty = Uint256::from(ttd); - let mut rig = InvalidPayloadRig::new_with_spec(spec).enable_attestations(); - rig.move_to_terminal_block(); - - let mut blocks = Vec::with_capacity(num_blocks); - for _ in 0..num_blocks { - let root = rig.import_block(Payload::Valid).await; - let block = rig.harness.chain.get_block(&root).await.unwrap().unwrap(); - blocks.push(Arc::new(block)); - } - - let execution_block_generator = rig - .harness - .mock_execution_layer - .as_ref() - .unwrap() - .server - .execution_block_generator() - .clone(); - - Self { - blocks, - execution_block_generator, - } - } -} - -/// Build a chain which has optimistically imported a transition block. -/// -/// The initial chain will be built with respect to `block_ttd`, whilst the `rig` which imports the -/// chain will operate with respect to `rig_ttd`. This allows for testing mismatched TTDs. -async fn build_optimistic_chain( - block_ttd: u64, - rig_ttd: u64, - num_blocks: usize, -) -> InvalidPayloadRig { - let OptimisticTransitionSetup { - blocks, - execution_block_generator, - } = OptimisticTransitionSetup::new(num_blocks, block_ttd).await; - // Build a brand-new testing harness. We will apply the blocks from the previous harness to - // this one. - let mut spec = E::default_spec(); - spec.terminal_total_difficulty = Uint256::from(rig_ttd); - let rig = InvalidPayloadRig::new_with_spec(spec); - - let spec = &rig.harness.chain.spec; - let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); - - // Ensure all the execution blocks from the first rig are available in the second rig. - *mock_execution_layer.server.execution_block_generator() = execution_block_generator; - - // Make the execution layer respond `SYNCING` to all `newPayload` requests. - mock_execution_layer - .server - .all_payloads_syncing_on_new_payload(true); - // Make the execution layer respond `SYNCING` to all `forkchoiceUpdated` requests. - mock_execution_layer - .server - .all_payloads_syncing_on_forkchoice_updated(); - // Make the execution layer respond `None` to all `getBlockByHash` requests. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_none(); - - let current_slot = std::cmp::max( - blocks[0].slot() + spec.safe_slots_to_import_optimistically, - num_blocks.into(), - ); - rig.harness.set_current_slot(current_slot); - - for block in blocks { - rig.harness - .chain - .process_block( - block.canonical_root(), - block, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ) - .await - .unwrap(); - } - - rig.harness.chain.recompute_head_at_current_slot().await; - - // Make the execution layer respond normally to `getBlockByHash` requests. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - // Perform some sanity checks to ensure that the transition happened exactly where we expected. - let pre_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(0), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let pre_transition_block = rig - .harness - .chain - .get_block(&pre_transition_block_root) - .await - .unwrap() - .unwrap(); - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - assert_eq!( - pre_transition_block_root, - post_transition_block.parent_root(), - "the blocks form a single chain" - ); - assert!( - pre_transition_block - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "the block *has not* undergone the merge transition" - ); - assert!( - !post_transition_block - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "the block *has* undergone the merge transition" - ); - - // Assert that the transition block was optimistically imported. - // - // Note: we're using the "fallback" check for optimistic status, so if the block was - // pre-finality then we'll just use the optimistic status of the finalized block. - assert!( - rig.harness - .chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_or_invalid_block(&post_transition_block_root) - .unwrap(), - "the transition block should be imported optimistically" - ); - - // Get the mock execution layer to respond to `getBlockByHash` requests normally again. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - rig -} - -#[tokio::test] -async fn optimistic_transition_block_valid_unfinalized() { - let ttd = 42; - let num_blocks = 16_usize; - let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - valid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should validate fine"); - // now that the transition block has been validated, it should have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert!( - otbs.is_empty(), - "The valid optimistic transition block should have been removed from the database", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_valid_finalized() { - let ttd = 42; - let num_blocks = 130_usize; - let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - > post_transition_block.slot(), - "the transition block should be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - valid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should validate fine"); - // now that the transition block has been validated, it should have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert!( - otbs.is_empty(), - "The valid optimistic transition block should have been removed from the database", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_unfinalized() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 22_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It shouldn't be known as invalid yet - assert!(!rig - .execution_status(post_transition_block_root) - .is_invalid()); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It should be marked invalid now - assert!(rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 22_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It shouldn't be known as invalid yet - assert!(!rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // Make the execution layer respond `None` to all `getBlockByHash` requests to simulate a - // syncing EE. - let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_none(); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - - // It should still be marked as optimistic. - assert!(rig - .execution_status(post_transition_block_root) - .is_strictly_optimistic()); - - // the optimistic merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The optimistic merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // Allow the EL to respond to `getBlockByHash`, as if it has finished syncing. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It should be marked invalid now - assert!(rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_finalized() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 130_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - > post_transition_block.slot(), - "the transition block should be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered yet. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should invalidate merge transition block and shutdown the client"); - - // The beacon chain should have triggered a shutdown. - assert_eq!( - rig.harness.shutdown_reasons(), - vec![ShutdownReason::Failure( - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON - )] - ); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - /// Helper for running tests where we generate a chain with an invalid head and then a /// `fork_block` to recover it. struct InvalidHeadSetup { diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 6cb66859128..80eba7a0590 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -230,7 +230,6 @@ Example Response Body "TERMINAL_TOTAL_DIFFICULTY": "10790000", "TERMINAL_BLOCK_HASH": "0x0000000000000000000000000000000000000000000000000000000000000000", "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH": "18446744073709551615", - "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY": "128", "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT": "16384", "MIN_GENESIS_TIME": "1614588812", "GENESIS_FORK_VERSION": "0x00001020", @@ -263,7 +262,6 @@ Example Response Body "HYSTERESIS_QUOTIENT": "4", "HYSTERESIS_DOWNWARD_MULTIPLIER": "1", "HYSTERESIS_UPWARD_MULTIPLIER": "5", - "SAFE_SLOTS_TO_UPDATE_JUSTIFIED": "8", "MIN_DEPOSIT_AMOUNT": "1000000000", "MAX_EFFECTIVE_BALANCE": "32000000000", "EFFECTIVE_BALANCE_INCREMENT": "1000000000", diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 338905a4fbf..69701a3ad93 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -326,14 +326,6 @@ Options: --quic-port6 The UDP port that quic will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to `port6` + 1 - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --self-limiter-protocols Enables the outbound rate limiter (requests made by this node).Rate limit quotas per protocol can be set in the form of @@ -387,27 +379,6 @@ Options: database. --target-peers The target number of peers. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --trusted-peers One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system. diff --git a/book/src/help_general.md b/book/src/help_general.md index 48314d5108c..aa0ae768553 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -77,39 +77,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. -V, --version Print version diff --git a/book/src/help_vc.md b/book/src/help_vc.md index aa24ab3d91f..2cfbfbc857a 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -118,14 +118,6 @@ Options: specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes. - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --secrets-dir The directory which contains the password to unlock the validator voting keypairs. Each password should be contained in a file where the @@ -140,27 +132,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validator-registration-batch-size Defines the number of validators per validator/register_validator request sent to the BN. This value can be reduced to avoid timeouts diff --git a/book/src/help_vm.md b/book/src/help_vm.md index f787985b215..9b6c5d4f3bd 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -69,39 +69,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. Flags: --disable-log-timestamp diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index cde822e8946..2743117eae2 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -91,14 +91,6 @@ Options: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --suggested-fee-recipient All created validators will use this value for the suggested fee recipient. Omit this flag to use the default value from the VC. @@ -106,27 +98,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. Flags: --disable-deposits diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 0883139ad21..b4999d3fe31 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -50,39 +50,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validators-file The path to a JSON file containing a list of validators to be imported to the validator client. This file is usually named "validators.json". diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index 12dd1e91402..99eee32c782 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -74,14 +74,6 @@ Options: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --src-vc-token The file containing a token required by the source validator client. --src-vc-url @@ -95,27 +87,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validators The validators to be moved. Either a list of 0x-prefixed validator pubkeys or the keyword "all". diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index cba7399c9bf..a4b5f4dc1c4 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,6 +1,5 @@ //! A helper library for parsing values from `clap::ArgMatches`. -use alloy_primitives::U256 as Uint256; use clap::builder::styling::*; use clap::ArgMatches; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; @@ -30,38 +29,9 @@ pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result(cli_args, "terminal-total-difficulty-override")? - { - let stripped = string.replace(',', ""); - let terminal_total_difficulty = Uint256::from_str(&stripped).map_err(|e| { - format!( - "Could not parse --terminal-total-difficulty-override as decimal value: {:?}", - e - ) - })?; - - eth2_network_config.config.terminal_total_difficulty = terminal_total_difficulty; - } - - if let Some(hash) = parse_optional(cli_args, "terminal-block-hash-override")? { - eth2_network_config.config.terminal_block_hash = hash; - } - - if let Some(epoch) = parse_optional(cli_args, "terminal-block-hash-epoch-override")? { - eth2_network_config - .config - .terminal_block_hash_activation_epoch = epoch; - } - - if let Some(slots) = parse_optional(cli_args, "safe-slots-to-import-optimistically")? { - eth2_network_config - .config - .safe_slots_to_import_optimistically = slots; - } - Ok(eth2_network_config) } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index ca59a6adfb6..85704042df4 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1300,43 +1300,6 @@ where } } - /// Returns `Ok(false)` if a block is not viable to be imported optimistically. - /// - /// ## Notes - /// - /// Equivalent to the function with the same name in the optimistic sync specs: - /// - /// https://github.com/ethereum/consensus-specs/blob/dev/sync/optimistic.md#helpers - pub fn is_optimistic_candidate_block( - &self, - current_slot: Slot, - block_slot: Slot, - block_parent_root: &Hash256, - spec: &ChainSpec, - ) -> Result> { - // If the block is sufficiently old, import it. - if block_slot + spec.safe_slots_to_import_optimistically <= current_slot { - return Ok(true); - } - - // If the parent block has execution enabled, always import the block. - // - // See: - // - // https://github.com/ethereum/consensus-specs/pull/2844 - if self - .proto_array - .get_block(block_parent_root) - .map_or(false, |parent| { - parent.execution_status.is_execution_enabled() - }) - { - return Ok(true); - } - - Ok(false) - } - /// Return the current finalized checkpoint. pub fn finalized_checkpoint(&self) -> Checkpoint { *self.fc_store.finalized_checkpoint() diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index ce19d68203e..29265e34e4d 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -256,36 +256,6 @@ impl ForkChoiceTest { self } - /// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - /// - /// If the chain is presently in an unsafe period, transition through it and the following safe - /// period. - /// - /// Note: the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` variable has been removed - /// from the fork choice spec in Q1 2023. We're still leaving references to - /// it in our tests because (a) it's easier and (b) it allows us to easily - /// test for the absence of that parameter. - pub fn move_to_next_unsafe_period(self) -> Self { - self.move_inside_safe_to_update() - .move_outside_safe_to_update() - } - - /// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - pub fn move_outside_safe_to_update(self) -> Self { - while is_safe_to_update(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - self.harness.advance_slot() - } - self - } - - /// Moves to the next slot that is *inside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - pub fn move_inside_safe_to_update(self) -> Self { - while !is_safe_to_update(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - self.harness.advance_slot() - } - self - } - /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts the block was applied successfully. @@ -516,10 +486,6 @@ impl ForkChoiceTest { } } -fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { - slot % E::slots_per_epoch() < spec.safe_slots_to_update_justified -} - #[test] fn justified_and_finalized_blocks() { let tester = ForkChoiceTest::new(); @@ -536,15 +502,13 @@ fn justified_and_finalized_blocks() { assert!(fork_choice.get_finalized_block().is_ok()); } -/// - The new justified checkpoint descends from the current. -/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` +/// - The new justified checkpoint descends from the current. Near genesis. #[tokio::test] -async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { +async fn justified_checkpoint_updates_with_descendent_first_justification() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .await .unwrap() - .move_inside_safe_to_update() .assert_justified_epoch(0) .apply_blocks(1) .await @@ -552,77 +516,29 @@ async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { } /// - The new justified checkpoint descends from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is **not** the first justification since genesis #[tokio::test] -async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { +async fn justified_checkpoint_updates_with_descendent() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2) .await .unwrap() - .move_outside_safe_to_update() .assert_justified_epoch(2) .apply_blocks(1) .await .assert_justified_epoch(3); } -/// - The new justified checkpoint descends from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - This is the first justification since genesis -#[tokio::test] -async fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .move_to_next_unsafe_period() - .assert_justified_epoch(0) - .apply_blocks(1) - .await - .assert_justified_epoch(2); -} - -/// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - Finalized epoch has **not** increased. -#[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .apply_blocks(1) - .await - .move_inside_safe_to_update() - .assert_justified_epoch(2) - .apply_block_directly_to_fork_choice(|_, state| { - // The finalized checkpoint should not change. - state.finalized_checkpoint().epoch = Epoch::new(0); - - // The justified checkpoint has changed. - state.current_justified_checkpoint_mut().epoch = Epoch::new(3); - // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint_mut().root = *state - .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) - .unwrap(); - }) - .await - .assert_justified_epoch(3); -} - /// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`. /// - Finalized epoch has **not** increased. #[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { +async fn justified_checkpoint_updates_with_non_descendent() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .await .unwrap() .apply_blocks(1) .await - .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { // The finalized checkpoint should not change. @@ -636,36 +552,6 @@ async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_wit .unwrap(); }) .await - // Now that `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` has been removed, the new - // block should have updated the justified checkpoint. - .assert_justified_epoch(3); -} - -/// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - Finalized epoch has increased. -#[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .apply_blocks(1) - .await - .move_to_next_unsafe_period() - .assert_justified_epoch(2) - .apply_block_directly_to_fork_choice(|_, state| { - // The finalized checkpoint should change. - state.finalized_checkpoint_mut().epoch = Epoch::new(1); - - // The justified checkpoint has changed. - state.current_justified_checkpoint_mut().epoch = Epoch::new(3); - // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint_mut().root = *state - .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) - .unwrap(); - }) - .await .assert_justified_epoch(3); } diff --git a/consensus/types/presets/gnosis/phase0.yaml b/consensus/types/presets/gnosis/phase0.yaml index 87c73e6fb7a..48129cb47ea 100644 --- a/consensus/types/presets/gnosis/phase0.yaml +++ b/consensus/types/presets/gnosis/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/presets/mainnet/phase0.yaml b/consensus/types/presets/mainnet/phase0.yaml index 89bb97d6a87..02bc96c8cdb 100644 --- a/consensus/types/presets/mainnet/phase0.yaml +++ b/consensus/types/presets/mainnet/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/presets/minimal/phase0.yaml b/consensus/types/presets/minimal/phase0.yaml index c9c81325f1b..1f756031421 100644 --- a/consensus/types/presets/minimal/phase0.yaml +++ b/consensus/types/presets/minimal/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**1 (= 1) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index d8b75260b68..1c4effb4aec 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -114,7 +114,6 @@ pub struct ChainSpec { /* * Fork choice */ - pub safe_slots_to_update_justified: u64, pub proposer_score_boost: Option, pub reorg_head_weight_threshold: Option, pub reorg_parent_weight_threshold: Option, @@ -157,7 +156,6 @@ pub struct ChainSpec { pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, pub terminal_block_hash_activation_epoch: Epoch, - pub safe_slots_to_import_optimistically: u64, /* * Capella hard fork params @@ -705,7 +703,6 @@ impl ChainSpec { /* * Fork choice */ - safe_slots_to_update_justified: 8, proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), @@ -756,7 +753,6 @@ impl ChainSpec { .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), - safe_slots_to_import_optimistically: 128u64, /* * Capella hard fork params @@ -886,7 +882,6 @@ impl ChainSpec { inactivity_penalty_quotient: u64::checked_pow(2, 25).expect("pow does not overflow"), min_slashing_penalty_quotient: 64, proportional_slashing_multiplier: 2, - safe_slots_to_update_justified: 2, // Altair epochs_per_sync_committee_period: Epoch::new(8), altair_fork_version: [0x01, 0x00, 0x00, 0x01], @@ -1026,7 +1021,6 @@ impl ChainSpec { /* * Fork choice */ - safe_slots_to_update_justified: 8, proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), @@ -1077,7 +1071,6 @@ impl ChainSpec { .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), - safe_slots_to_import_optimistically: 128u64, /* * Capella hard fork params @@ -1212,9 +1205,6 @@ pub struct Config { pub terminal_block_hash: ExecutionBlockHash, #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, - #[serde(default = "default_safe_slots_to_import_optimistically")] - #[serde(with = "serde_utils::quoted_u64")] - pub safe_slots_to_import_optimistically: u64, #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, @@ -1425,10 +1415,6 @@ fn default_terminal_block_hash_activation_epoch() -> Epoch { Epoch::new(u64::MAX) } -fn default_safe_slots_to_import_optimistically() -> u64 { - 128u64 -} - fn default_subnets_per_node() -> u8 { 2u8 } @@ -1649,7 +1635,6 @@ impl Config { terminal_total_difficulty: spec.terminal_total_difficulty, terminal_block_hash: spec.terminal_block_hash, terminal_block_hash_activation_epoch: spec.terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically: spec.safe_slots_to_import_optimistically, min_genesis_active_validator_count: spec.min_genesis_active_validator_count, min_genesis_time: spec.min_genesis_time, @@ -1751,7 +1736,6 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically, min_genesis_active_validator_count, min_genesis_time, genesis_fork_version, @@ -1851,7 +1835,6 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically, gossip_max_size, min_epochs_for_block_requests, max_chunk_size, @@ -2103,7 +2086,6 @@ mod yaml_tests { #TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638911 #TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000001 #TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551614 - #SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY: 2 MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 MIN_GENESIS_TIME: 1606824000 GENESIS_FORK_VERSION: 0x00000000 @@ -2152,7 +2134,6 @@ mod yaml_tests { check_default!(terminal_total_difficulty); check_default!(terminal_block_hash); check_default!(terminal_block_hash_activation_epoch); - check_default!(safe_slots_to_import_optimistically); check_default!(bellatrix_fork_version); check_default!(gossip_max_size); check_default!(min_epochs_for_block_requests); diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 2c576ed332c..435a74bdc35 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -27,8 +27,6 @@ pub struct BasePreset { #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_upward_multiplier: u64, #[serde(with = "serde_utils::quoted_u64")] - pub safe_slots_to_update_justified: u64, - #[serde(with = "serde_utils::quoted_u64")] pub min_deposit_amount: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance: u64, @@ -90,7 +88,6 @@ impl BasePreset { hysteresis_quotient: spec.hysteresis_quotient, hysteresis_downward_multiplier: spec.hysteresis_downward_multiplier, hysteresis_upward_multiplier: spec.hysteresis_upward_multiplier, - safe_slots_to_update_justified: spec.safe_slots_to_update_justified, min_deposit_amount: spec.min_deposit_amount, max_effective_balance: spec.max_effective_balance, effective_balance_increment: spec.effective_balance_increment, diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 4f4dabff89b..e33e4cb9b81 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -323,57 +323,43 @@ fn main() { Arg::new("terminal-total-difficulty-override") .long("terminal-total-difficulty-override") .value_name("INTEGER") - .help("Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. \ - Accepts a 256-bit decimal integer (not a hex value). \ - This flag should only be used if the user has a clear understanding that \ - the broad Ethereum community has elected to override the terminal difficulty. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("terminal-block-hash-override") .long("terminal-block-hash-override") .value_name("TERMINAL_BLOCK_HASH") - .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ - This flag should only be used if the user has a clear understanding that \ - the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .requires("terminal-block-hash-epoch-override") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("terminal-block-hash-epoch-override") .long("terminal-block-hash-epoch-override") .value_name("EPOCH") - .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ - parameter. This flag should only be used if the user has a clear understanding \ - that the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .requires("terminal-block-hash-override") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("safe-slots-to-import-optimistically") .long("safe-slots-to-import-optimistically") .value_name("INTEGER") - .help("Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY \ - parameter. This flag should only be used if the user has a clear understanding \ - that the broad Ethereum community has elected to override this parameter in the event \ - of an attack at the PoS transition block. Incorrect use of this flag can cause your \ - node to possibly accept an invalid chain or sync more slowly. Be extremely careful with \ - this flag.") + .help("DEPRECATED") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("genesis-state-url") @@ -631,6 +617,20 @@ fn run( ); } + // Warn for DEPRECATED global flags. This code should be removed when we finish deleting these + // flags. + let deprecated_flags = [ + "terminal-total-difficulty-override", + "terminal-block-hash-override", + "terminal-block-hash-epoch-override", + "safe-slots-to-import-optimistically", + ]; + for flag in deprecated_flags { + if matches.get_one::(flag).is_some() { + slog::warn!(log, "The {} flag is deprecated and does nothing", flag); + } + } + // Note: the current code technically allows for starting a beacon node _and_ a validator // client at the same time. // diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index f22e4387008..ac7ddcdbd98 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -21,7 +21,7 @@ use std::string::ToString; use std::time::Duration; use tempfile::TempDir; use types::non_zero_usize::new_non_zero_usize; -use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, Hash256, MainnetEthSpec}; +use types::{Address, Checkpoint, Epoch, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -742,16 +742,14 @@ fn jwt_optional_flags() { fn jwt_optional_alias_flags() { run_jwt_optional_flags_test("jwt-secrets", "jwt-id", "jwt-version"); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn terminal_total_difficulty_override_flag() { - use beacon_node::beacon_chain::types::Uint256; CommandLineTest::new() .flag("terminal-total-difficulty-override", Some("1337424242")) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!(spec.terminal_total_difficulty, Uint256::from(1337424242)) - }); + .run_with_zero_port(); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn terminal_block_hash_and_activation_epoch_override_flags() { CommandLineTest::new() @@ -760,43 +758,14 @@ fn terminal_block_hash_and_activation_epoch_override_flags() { "terminal-block-hash-override", Some("0x4242424242424242424242424242424242424242424242424242424242424242"), ) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!( - spec.terminal_block_hash, - ExecutionBlockHash::from_str( - "0x4242424242424242424242424242424242424242424242424242424242424242" - ) - .unwrap() - ); - assert_eq!(spec.terminal_block_hash_activation_epoch, 1337); - }); -} -#[test] -#[should_panic] -fn terminal_block_hash_missing_activation_epoch() { - CommandLineTest::new() - .flag( - "terminal-block-hash-override", - Some("0x4242424242424242424242424242424242424242424242424242424242424242"), - ) - .run_with_zero_port(); -} -#[test] -#[should_panic] -fn epoch_override_missing_terminal_block_hash() { - CommandLineTest::new() - .flag("terminal-block-hash-epoch-override", Some("1337")) .run_with_zero_port(); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn safe_slots_to_import_optimistically_flag() { CommandLineTest::new() .flag("safe-slots-to-import-optimistically", Some("421337")) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!(spec.safe_slots_to_import_optimistically, 421337) - }); + .run_with_zero_port(); } // Tests for Network flags. diff --git a/lighthouse/tests/exec.rs b/lighthouse/tests/exec.rs index 9d6453908c8..5379912c131 100644 --- a/lighthouse/tests/exec.rs +++ b/lighthouse/tests/exec.rs @@ -140,11 +140,6 @@ impl CompletedTest { func(&self.config); } - pub fn with_spec(self, func: F) { - let spec = ChainSpec::from_config::(&self.chain_config).unwrap(); - func(spec); - } - pub fn with_config_and_dir(self, func: F) { func(&self.config, &self.dir); } diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 9495047e7f9..117c89a22f5 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -25,6 +25,8 @@ # Intentionally omitted, as per https://github.com/sigp/lighthouse/issues/1835 "tests/.*/.*/ssz_static/Eth1Block/", "tests/.*/.*/ssz_static/PowBlock/", + # We no longer implement merge logic. + "tests/.*/bellatrix/fork_choice/on_merge_block", # light_client "tests/.*/.*/light_client/single_merkle_proof", "tests/.*/.*/light_client/sync", diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 97b449dab91..5e928d22441 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -627,8 +627,8 @@ impl Handler for ForkChoiceHandler { } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - // Merge block tests are only enabled for Bellatrix. - if self.handler_name == "on_merge_block" && fork_name != ForkName::Bellatrix { + // We no longer run on_merge_block tests since removing merge support. + if self.handler_name == "on_merge_block" { return false; } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 1812a101ca6..c2524c14e28 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -826,12 +826,6 @@ fn fork_choice_on_block() { ForkChoiceHandler::::new("on_block").run(); } -#[test] -fn fork_choice_on_merge_block() { - ForkChoiceHandler::::new("on_merge_block").run(); - ForkChoiceHandler::::new("on_merge_block").run(); -} - #[test] fn fork_choice_ex_ante() { ForkChoiceHandler::::new("ex_ante").run(); From 56a9befaa1e96d971e2f2ddf036ebeddb90e695e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Oct 2024 17:54:42 +1100 Subject: [PATCH 2/8] Clarify command-line reference docs (#6524) * Clarify command-line reference docs * Update page title * Merge remote-tracking branch 'origin/unstable' into cli-reference * Update CLI script --- book/src/SUMMARY.md | 4 +-- book/src/cli.md | 55 ---------------------------------------- book/src/help_general.md | 2 +- scripts/cli.sh | 2 +- 4 files changed, 4 insertions(+), 59 deletions(-) delete mode 100644 book/src/cli.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 7fb0b2f4e70..86c97af0da1 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -54,13 +54,13 @@ * [Merge Migration](./merge-migration.md) * [Late Block Re-orgs](./late-block-re-orgs.md) * [Blobs](./advanced-blobs.md) -* [Built-In Documentation](./help_general.md) +* [Command Line Reference (CLI)](./help_general.md) * [Beacon Node](./help_bn.md) * [Validator Client](./help_vc.md) * [Validator Manager](./help_vm.md) * [Create](./help_vm_create.md) * [Import](./help_vm_import.md) - * [Move](./help_vm_move.md) + * [Move](./help_vm_move.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/cli.md b/book/src/cli.md deleted file mode 100644 index f9e7df07488..00000000000 --- a/book/src/cli.md +++ /dev/null @@ -1,55 +0,0 @@ -# Command-Line Interface (CLI) - -The `lighthouse` binary provides all necessary Ethereum consensus client functionality. It -has two primary sub-commands: - -- `$ lighthouse beacon_node`: the largest and most fundamental component which connects to - the p2p network, processes messages and tracks the head of the beacon - chain. -- `$ lighthouse validator_client`: a lightweight but important component which loads a validators private - key and signs messages using a `beacon_node` as a source-of-truth. - -There are also some ancillary binaries like `lcli` and `account_manager`, but -these are primarily for testing. - -> **Note:** documentation sometimes uses `$ lighthouse bn` and `$ lighthouse -> vc` instead of the long-form `beacon_node` and `validator_client`. These -> commands are valid on the CLI too. - -## Installation - -Typical users may install `lighthouse` to `CARGO_HOME` with `cargo install ---path lighthouse` from the root of the repository. See ["Configuring the -`PATH` environment variable"](https://www.rust-lang.org/tools/install) for more -information. - -For developers, we recommend building Lighthouse using the `$ cargo build --release ---bin lighthouse` command and executing binaries from the -`/target/release` directory. This is more ergonomic when -modifying and rebuilding regularly. - -## Documentation - -Each binary supports the `--help` flag, this is the best source of -documentation. - -```bash -lighthouse beacon_node --help -``` - -```bash -lighthouse validator_client --help -``` - -## Creating a new database/testnet - -Lighthouse should run out-of-the box and connect to the current testnet -maintained by Sigma Prime. - -However, for developers, testnets can be created by following the instructions -outlined in [testnets](./testnets.md). The steps listed here will create a -local database specified to a new testnet. - -## Resuming from an existing database - -Once a database/testnet has been created, it can be resumed by running `$ lighthouse bn`. diff --git a/book/src/help_general.md b/book/src/help_general.md index aa0ae768553..996b048d10a 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -1,4 +1,4 @@ -# Lighthouse General Commands +# Lighthouse CLI Reference ``` Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a diff --git a/scripts/cli.sh b/scripts/cli.sh index e43c05a834f..ef4ed158ad8 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -40,7 +40,7 @@ vm_import=./help_vm_import.md vm_move=./help_vm_move.md # create .md files -write_to_file "$general_cli" "$general" "Lighthouse General Commands" +write_to_file "$general_cli" "$general" "Lighthouse CLI Reference" write_to_file "$bn_cli" "$bn" "Beacon Node" write_to_file "$vc_cli" "$vc" "Validator Client" write_to_file "$vm_cli" "$vm" "Validator Manager" From 9aefb5539baff637d68deb3dd386ff45312f3573 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Oct 2024 23:42:51 +1100 Subject: [PATCH 3/8] Fix BlobsByRange by reverting PR6462 (#6526) * Revert "Remove generic E from RequestId (#6462)" This reverts commit 772929fae27bd9a2978884c7648dc10fecf3d876. --- .../lighthouse_network/src/rpc/codec.rs | 19 ++++++------- .../lighthouse_network/src/rpc/handler.rs | 14 ++++------ .../lighthouse_network/src/rpc/methods.rs | 28 +++++++++++-------- beacon_node/lighthouse_network/src/rpc/mod.rs | 10 +++---- .../lighthouse_network/src/rpc/outbound.rs | 8 ++---- .../lighthouse_network/src/rpc/protocol.rs | 14 +++++----- .../src/rpc/rate_limiter.rs | 3 +- .../src/rpc/self_limiter.rs | 12 ++++---- .../lighthouse_network/src/service/mod.rs | 6 ++-- .../lighthouse_network/tests/rpc_tests.rs | 1 - .../network_beacon_processor/rpc_methods.rs | 4 +-- .../src/network_beacon_processor/tests.rs | 7 ++--- beacon_node/network/src/router.rs | 8 +++--- beacon_node/network/src/service.rs | 2 +- .../network/src/sync/network_context.rs | 1 - 15 files changed, 68 insertions(+), 69 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 19f1b8def73..9bdecab70b1 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -28,7 +28,7 @@ const CONTEXT_BYTES_LEN: usize = 4; /* Inbound Codec */ -pub struct SSZSnappyInboundCodec { +pub struct SSZSnappyInboundCodec { protocol: ProtocolId, inner: Uvi, len: Option, @@ -143,7 +143,7 @@ impl Encoder> for SSZSnappyInboundCodec { // Decoder for inbound streams: Decodes RPC requests from peers impl Decoder for SSZSnappyInboundCodec { - type Item = RequestType; + type Item = RequestType; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -195,7 +195,7 @@ impl Decoder for SSZSnappyInboundCodec { } /* Outbound Codec: Codec for initiating RPC requests */ -pub struct SSZSnappyOutboundCodec { +pub struct SSZSnappyOutboundCodec { inner: Uvi, len: Option, protocol: ProtocolId, @@ -322,10 +322,10 @@ impl SSZSnappyOutboundCodec { } // Encoder for outbound streams: Encodes RPC Requests to peers -impl Encoder for SSZSnappyOutboundCodec { +impl Encoder> for SSZSnappyOutboundCodec { type Error = RPCError; - fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { RequestType::Status(req) => req.as_ssz_bytes(), RequestType::Goodbye(req) => req.as_ssz_bytes(), @@ -549,11 +549,11 @@ fn handle_length( /// Decodes an `InboundRequest` from the byte stream. /// `decoded_buffer` should be an ssz-encoded bytestream with // length = length-prefix received in the beginning of the stream. -fn handle_rpc_request( +fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], spec: &ChainSpec, -) -> Result, RPCError> { +) -> Result>, RPCError> { match versioned_protocol { SupportedProtocol::StatusV1 => Ok(Some(RequestType::Status( StatusMessage::from_ssz_bytes(decoded_buffer)?, @@ -1035,7 +1035,6 @@ mod tests { BlobsByRangeRequest { start_slot: 0, count: 10, - max_blobs_per_block: Spec::max_blobs_per_block(), } } @@ -1181,7 +1180,7 @@ mod tests { } /// Verifies that requests we send are encoded in a way that we would correctly decode too. - fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { + fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); @@ -1778,7 +1777,7 @@ mod tests { fn test_encode_then_decode_request() { let chain_spec = Spec::default_spec(); - let requests: &[RequestType] = &[ + let requests: &[RequestType] = &[ RequestType::Ping(ping_message()), RequestType::Status(status_message()), RequestType::Goodbye(GoodbyeReason::Fault), diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 74ccb85dccc..e76d6d27866 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -20,7 +20,6 @@ use slog::{crit, debug, trace}; use smallvec::SmallVec; use std::{ collections::{hash_map::Entry, VecDeque}, - marker::PhantomData, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -97,7 +96,7 @@ where events_out: SmallVec<[HandlerEvent; 4]>, /// Queue of outbound substreams to open. - dial_queue: SmallVec<[(Id, RequestType); 4]>, + dial_queue: SmallVec<[(Id, RequestType); 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, @@ -207,7 +206,7 @@ pub enum OutboundSubstreamState { /// The framed negotiated substream. substream: Box>, /// Keeps track of the actual request sent. - request: RequestType, + request: RequestType, }, /// Closing an outbound substream> Closing(Box>), @@ -275,7 +274,7 @@ where } /// Opens an outbound substream with a request. - fn send_request(&mut self, id: Id, req: RequestType) { + fn send_request(&mut self, id: Id, req: RequestType) { match self.state { HandlerState::Active => { self.dial_queue.push((id, req)); @@ -331,7 +330,7 @@ where type ToBehaviour = HandlerEvent; type InboundProtocol = RPCProtocol; type OutboundProtocol = OutboundRequestContainer; - type OutboundOpenInfo = (Id, RequestType); // Keep track of the id and the request + type OutboundOpenInfo = (Id, RequestType); // Keep track of the id and the request type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { @@ -789,7 +788,6 @@ where req: req.clone(), fork_context: self.fork_context.clone(), max_rpc_size: self.listen_protocol().upgrade().max_rpc_size, - phantom: PhantomData, }, (), ) @@ -907,7 +905,7 @@ where fn on_fully_negotiated_outbound( &mut self, substream: OutboundFramed, - (id, request): (Id, RequestType), + (id, request): (Id, RequestType), ) { self.dial_negotiated -= 1; // Reset any io-retries counter. @@ -963,7 +961,7 @@ where } fn on_dial_upgrade_error( &mut self, - request_info: (Id, RequestType), + request_info: (Id, RequestType), error: StreamUpgradeError, ) { let (id, req) = request_info; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 912fda36060..bb8bfb0e206 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -8,6 +8,7 @@ use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::U256, VariableList}; use std::collections::BTreeMap; use std::fmt::Display; +use std::marker::PhantomData; use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; @@ -93,19 +94,27 @@ pub struct Ping { variant_attributes(derive(Clone, Debug, PartialEq, Serialize),) )] #[derive(Clone, Debug, PartialEq)] -pub struct MetadataRequest; +pub struct MetadataRequest { + _phantom_data: PhantomData, +} -impl MetadataRequest { +impl MetadataRequest { pub fn new_v1() -> Self { - Self::V1(MetadataRequestV1 {}) + Self::V1(MetadataRequestV1 { + _phantom_data: PhantomData, + }) } pub fn new_v2() -> Self { - Self::V2(MetadataRequestV2 {}) + Self::V2(MetadataRequestV2 { + _phantom_data: PhantomData, + }) } pub fn new_v3() -> Self { - Self::V3(MetadataRequestV3 {}) + Self::V3(MetadataRequestV3 { + _phantom_data: PhantomData, + }) } } @@ -315,14 +324,11 @@ pub struct BlobsByRangeRequest { /// The number of slots from the start slot. pub count: u64, - - /// maximum number of blobs in a single block. - pub max_blobs_per_block: usize, } impl BlobsByRangeRequest { - pub fn max_blobs_requested(&self) -> u64 { - self.count.saturating_mul(self.max_blobs_per_block as u64) + pub fn max_blobs_requested(&self) -> u64 { + self.count.saturating_mul(E::max_blobs_per_block() as u64) } } @@ -338,7 +344,7 @@ pub struct DataColumnsByRangeRequest { } impl DataColumnsByRangeRequest { - pub fn max_requested(&self) -> u64 { + pub fn max_requested(&self) -> u64 { self.count.saturating_mul(self.columns.len() as u64) } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index ed4da463ffd..7d091da7660 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -61,7 +61,7 @@ pub enum RPCSend { /// /// The `Id` is given by the application making the request. These /// go over *outbound* connections. - Request(Id, RequestType), + Request(Id, RequestType), /// A response sent from Lighthouse. /// /// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the @@ -79,7 +79,7 @@ pub enum RPCReceived { /// /// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the /// *inbound* substream over which it is managed. - Request(Request), + Request(Request), /// A response received from the outside. /// /// The `Id` corresponds to the application given ID of the original request sent to the @@ -113,10 +113,10 @@ impl RequestId { /// An Rpc Request. #[derive(Debug, Clone)] -pub struct Request { +pub struct Request { pub id: RequestId, pub substream_id: SubstreamId, - pub r#type: RequestType, + pub r#type: RequestType, } impl std::fmt::Display for RPCSend { @@ -221,7 +221,7 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType) { + pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType) { let event = if let Some(self_limiter) = self.self_limiter.as_mut() { match self_limiter.allows(peer_id, request_id, req) { Ok(event) => event, diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 1037139f2fa..b614313a84b 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -7,7 +7,6 @@ use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, SinkExt}; use libp2p::core::{OutboundUpgrade, UpgradeInfo}; -use std::marker::PhantomData; use std::sync::Arc; use tokio_util::{ codec::Framed, @@ -20,14 +19,13 @@ use types::{EthSpec, ForkContext}; // `OutboundUpgrade` #[derive(Debug, Clone)] -pub struct OutboundRequestContainer { - pub req: RequestType, +pub struct OutboundRequestContainer { + pub req: RequestType, pub fork_context: Arc, pub max_rpc_size: usize, - pub phantom: PhantomData, } -impl UpgradeInfo for OutboundRequestContainer { +impl UpgradeInfo for OutboundRequestContainer { type Info = ProtocolId; type InfoIter = Vec; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index b4f6dac4faf..16c3a133912 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -686,7 +686,7 @@ pub fn rpc_data_column_limits() -> RpcLimits { // The inbound protocol reads the request, decodes it and returns the stream to the protocol // handler to respond to once ready. -pub type InboundOutput = (RequestType, InboundFramed); +pub type InboundOutput = (RequestType, InboundFramed); pub type InboundFramed = Framed>>>, SSZSnappyInboundCodec>; @@ -754,7 +754,7 @@ where } #[derive(Debug, Clone, PartialEq)] -pub enum RequestType { +pub enum RequestType { Status(StatusMessage), Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), @@ -768,11 +768,11 @@ pub enum RequestType { LightClientFinalityUpdate, LightClientUpdatesByRange(LightClientUpdatesByRangeRequest), Ping(Ping), - MetaData(MetadataRequest), + MetaData(MetadataRequest), } /// Implements the encoding per supported protocol for `RPCRequest`. -impl RequestType { +impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. @@ -782,10 +782,10 @@ impl RequestType { RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, - RequestType::BlobsByRange(req) => req.max_blobs_requested(), + RequestType::BlobsByRange(req) => req.max_blobs_requested::(), RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, - RequestType::DataColumnsByRange(req) => req.max_requested(), + RequestType::DataColumnsByRange(req) => req.max_requested::(), RequestType::Ping(_) => 1, RequestType::MetaData(_) => 1, RequestType::LightClientBootstrap(_) => 1, @@ -1027,7 +1027,7 @@ impl std::error::Error for RPCError { } } -impl std::fmt::Display for RequestType { +impl std::fmt::Display for RequestType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { RequestType::Status(status) => write!(f, "Status Message: {}", status), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index e11f7f0e73e..ecbacc8c112 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -9,6 +9,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use tokio::time::Interval; +use types::EthSpec; /// Nanoseconds since a given time. // Maintained as u64 to reduce footprint @@ -261,7 +262,7 @@ pub trait RateLimiterItem { fn max_responses(&self) -> u64; } -impl RateLimiterItem for super::RequestType { +impl RateLimiterItem for super::RequestType { fn protocol(&self) -> Protocol { self.versioned_protocol().protocol() } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 9c68e0793d9..e968ad11e3d 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -19,8 +19,8 @@ use super::{ /// A request that was rate limited or waiting on rate limited requests for the same peer and /// protocol. -struct QueuedRequest { - req: RequestType, +struct QueuedRequest { + req: RequestType, request_id: Id, } @@ -28,7 +28,7 @@ pub(crate) struct SelfRateLimiter { /// Requests queued for sending per peer. This requests are stored when the self rate /// limiter rejects them. Rate limiting is based on a Peer and Protocol basis, therefore /// are stored in the same way. - delayed_requests: HashMap<(PeerId, Protocol), VecDeque>>, + delayed_requests: HashMap<(PeerId, Protocol), VecDeque>>, /// The delay required to allow a peer's outbound request per protocol. next_peer_request: DelayQueue<(PeerId, Protocol)>, /// Rate limiter for our own requests. @@ -70,7 +70,7 @@ impl SelfRateLimiter { &mut self, peer_id: PeerId, request_id: Id, - req: RequestType, + req: RequestType, ) -> Result, Error> { let protocol = req.versioned_protocol().protocol(); // First check that there are not already other requests waiting to be sent. @@ -101,9 +101,9 @@ impl SelfRateLimiter { limiter: &mut RateLimiter, peer_id: PeerId, request_id: Id, - req: RequestType, + req: RequestType, log: &Logger, - ) -> Result, (QueuedRequest, Duration)> { + ) -> Result, (QueuedRequest, Duration)> { match limiter.allows(&peer_id, &req) { Ok(()) => Ok(BehaviourAction::NotifyHandler { peer_id, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 462612e40a5..056b6be24d3 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -80,7 +80,7 @@ pub enum NetworkEvent { /// Identifier of the request. All responses to this request must use this id. id: PeerRequestId, /// Request the peer sent. - request: rpc::Request, + request: rpc::Request, }, ResponseReceived { /// Peer that sent the response. @@ -966,7 +966,7 @@ impl Network { &mut self, peer_id: PeerId, request_id: AppRequestId, - request: RequestType, + request: RequestType, ) -> Result<(), (AppRequestId, RPCError)> { // Check if the peer is connected before sending an RPC request if !self.swarm.is_connected(&peer_id) { @@ -1179,7 +1179,7 @@ impl Network { /// Sends a METADATA response to a peer. fn send_meta_data_response( &mut self, - _req: MetadataRequest, + _req: MetadataRequest, id: PeerRequestId, request_id: rpc::RequestId, peer_id: PeerId, diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index b5125a2d6bf..f721c8477cf 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -327,7 +327,6 @@ fn test_blobs_by_range_chunked_rpc() { let rpc_request = RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: 0, count: slot_count, - max_blobs_per_block: E::max_blobs_per_block(), }); // BlocksByRange Response diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 709cbe5b120..6d32806713d 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -893,7 +893,7 @@ impl NetworkBeaconProcessor { ); // Should not send more than max request blocks - if req.max_blobs_requested() > self.chain.spec.max_request_blob_sidecars { + if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { return Err(( RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", @@ -1098,7 +1098,7 @@ impl NetworkBeaconProcessor { ); // Should not send more than max request data columns - if req.max_requested() > self.chain.spec.max_request_data_column_sidecars { + if req.max_requested::() > self.chain.spec.max_request_data_column_sidecars { return Err(( RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index e9805eb5ba7..9d774d97c15 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -30,9 +30,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, EthSpec, Hash256, - MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedVoluntaryExit, Slot, SubnetId, + Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, Hash256, MainnetEthSpec, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, + SubnetId, }; type E = MainnetEthSpec; @@ -366,7 +366,6 @@ impl TestRig { BlobsByRangeRequest { start_slot: 0, count, - max_blobs_per_block: E::max_blobs_per_block(), }, ) .unwrap(); diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index a445cd6ea36..e1badfda9d5 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -58,7 +58,7 @@ pub enum RouterMessage { RPCRequestReceived { peer_id: PeerId, id: PeerRequestId, - request: rpc::Request, + request: rpc::Request, }, /// An RPC response has been received. RPCResponseReceived { @@ -193,11 +193,11 @@ impl Router { /* RPC - Related functionality */ /// A new RPC request has been received from the network. - fn handle_rpc_request( + fn handle_rpc_request( &mut self, peer_id: PeerId, request_id: PeerRequestId, - rpc_request: rpc::Request, + rpc_request: rpc::Request, ) { if !self.network_globals.peers.read().is_connected(&peer_id) { debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?rpc_request); @@ -836,7 +836,7 @@ impl HandlerNetworkContext { } /// Sends a request to the network task. - pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { + pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { self.inform_network(NetworkMessage::SendRequest { peer_id, request_id: AppRequestId::Router, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 269744dc054..5a66cb7f30d 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -62,7 +62,7 @@ pub enum NetworkMessage { /// Send an RPC request to the libp2p service. SendRequest { peer_id: PeerId, - request: RequestType, + request: RequestType, request_id: AppRequestId, }, /// Send a successful Response to the libp2p service. diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index eb42e697cd2..5f7778ffcc6 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -401,7 +401,6 @@ impl SyncNetworkContext { request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: *request.start_slot(), count: *request.count(), - max_blobs_per_block: T::EthSpec::max_blobs_per_block(), }), request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), }) From b88cb8ced3f41e6c0f99e427d9381876cade7213 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 22 Oct 2024 09:52:19 +1100 Subject: [PATCH 4/8] VC: use block publication v2 SSZ API (#6523) * VC: use block publication v2 SSZ API --- validator_client/src/block_service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 665eaf0a0f7..9903324cade 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -525,7 +525,7 @@ impl BlockService { &[metrics::BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blocks(signed_block) + .post_beacon_blocks_v2_ssz(signed_block, None) .await .or_else(|e| handle_block_post_error(e, slot, log))? } @@ -535,7 +535,7 @@ impl BlockService { &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blinded_blocks(signed_block) + .post_beacon_blinded_blocks_v2_ssz(signed_block, None) .await .or_else(|e| handle_block_post_error(e, slot, log))? } From ad229a63c033de0b51441963e195fd6fa6594abc Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 23 Oct 2024 09:51:42 +1100 Subject: [PATCH 5/8] Use `make cli-local` in CI test suite to remove redundant docker (#6531) * Remove docker command from `make cli`. * Run `cli-local` on CI. * Update Makefile Co-authored-by: Mac L --- .github/workflows/test-suite.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 7cda3e477d6..a80470cf167 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -420,7 +420,7 @@ jobs: channel: stable cache-target: release - name: Run Makefile to trigger the bash script - run: make cli + run: make cli-local # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. test-suite-success: diff --git a/Makefile b/Makefile index 32665d43aed..fd7d45f26a0 100644 --- a/Makefile +++ b/Makefile @@ -183,7 +183,7 @@ test-exec-engine: # test vectors. test: test-release -# Updates the CLI help text pages in the Lighthouse book, building with Docker. +# Updates the CLI help text pages in the Lighthouse book, building with Docker (primarily for Windows users). cli: docker run --rm --user=root \ -v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \ From 40d3423193e6f97f44c42eb3673af607627c8f7f Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Wed, 23 Oct 2024 08:47:18 +0900 Subject: [PATCH 6/8] RequestType::max_responses for LightClientUpdatesByRange (#6534) * return the actual number of instances the request requires --- beacon_node/lighthouse_network/src/rpc/protocol.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 16c3a133912..d0dbffe9326 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -791,7 +791,7 @@ impl RequestType { RequestType::LightClientBootstrap(_) => 1, RequestType::LightClientOptimisticUpdate => 1, RequestType::LightClientFinalityUpdate => 1, - RequestType::LightClientUpdatesByRange(req) => req.max_requested(), + RequestType::LightClientUpdatesByRange(req) => req.count, } } From 9d069a9588faf859ac0e7e4155a7a18ee62a9af6 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 24 Oct 2024 22:19:13 -0700 Subject: [PATCH 7/8] Fix electra light client types (#6361) * persist light client updates * update beacon chain to serve light client updates * resolve todos * cache best update * extend cache parts * is better light client update * resolve merge conflict * initial api changes * add lc update db column * fmt * added tests * add sim * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * fix some weird issues with the simulator * tests * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * test changes * merge conflict * testing * started work on ef tests and some code clean up * update tests * linting * noop pre altair, were still failing on electra though * allow for zeroed light client header * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * merge unstable * remove unwraps * remove unwraps * fetch bootstrap without always querying for state * storing bootstrap parts in db * mroe code cleanup * test * prune sync committee branches from dropped chains * Update light_client_update.rs * merge unstable * move functionality to helper methods * refactor is best update fn * refactor is best update fn * improve organization of light client server cache logic * fork diget calc, and only spawn as many blcoks as we need for the lc update test * resovle merge conflict * add electra bootstrap logic, add logic to cache current sync committee * add latest sync committe branch cache * fetch lc update from the cache if it exists * fmt * Fix beacon_chain tests * Add debug code to update ranking_order ef test * Fix compare code * merge conflicts * merge conflict * add better error messaging * resolve merge conflicts * remove lc update from basicsim * rename sync comittte variable and fix persist condition * refactor get_light_client_update logic * add better comments, return helpful error messages over http and rpc * pruning canonical non checkpoint slots * fix test * rerun test * update pruning logic, add tests * fix tests * fix imports * fmt * refactor db code * Refactor db method * Refactor db method * lc electra changes * Merge branch 'unstable' of https://github.com/sigp/lighthouse into light-client-electra * add additional comments * testing lc merkle changes * lc electra * update struct defs * Merge branch 'unstable' of https://github.com/sigp/lighthouse into light-client-electra * fix merge * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-bootstrap * fix merge * linting * merge conflict * prevent overflow * enable lc server for http api tests * Merge branch 'unstable' of https://github.com/sigp/lighthouse into light-client-electra * get tests working: * remove related TODOs * fix test lint * Merge branch 'persist-light-client-bootstrap' of https://github.com/eserilev/lighthouse into light-client-electra * fix tests * fix conflicts * remove prints * Merge branch 'persist-light-client-bootstrap' of https://github.com/eserilev/lighthouse into light-client-electra * remove warning * resolve conflicts * merge conflicts * linting * remove comments * cleanup * linting * Merge branch 'unstable' of https://github.com/sigp/lighthouse into light-client-electra * pre/post electra light client cached data * add proof type alias * move is_empty_branch method out of impl * add ssz tests for all forks * refactor beacon state proof codepaths * rename method * fmt * clean up proof logic * refactor merkle proof api * fmt * Merge branch 'unstable' into light-client-electra * Use superstruct mapping macros * Merge branch 'unstable' of https://github.com/sigp/lighthouse into light-client-electra * rename proof to merkleproof * fmt * Resolve merge conflicts * merge conflicts --- .../src/light_client_server_cache.rs | 46 ++---- beacon_node/beacon_chain/tests/store_tests.rs | 12 -- beacon_node/store/src/hot_cold_store.rs | 8 +- consensus/types/src/beacon_state.rs | 92 +++++++---- consensus/types/src/lib.rs | 2 +- consensus/types/src/light_client_bootstrap.rs | 57 +++++-- .../types/src/light_client_finality_update.rs | 43 ++++- consensus/types/src/light_client_header.rs | 28 ++++ .../src/light_client_optimistic_update.rs | 26 ++- consensus/types/src/light_client_update.rs | 156 ++++++++++++++---- testing/ef_tests/check_all_files_accessed.py | 5 - .../src/cases/merkle_proof_validity.rs | 109 +++++++++++- testing/ef_tests/src/handler.rs | 47 ++++-- testing/ef_tests/tests/tests.rs | 44 ++--- 14 files changed, 490 insertions(+), 185 deletions(-) diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index ca015d0365a..e0ddd8c8826 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,25 +1,19 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; -use eth2::types::light_client_update::CurrentSyncCommitteeProofLen; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; use slog::{debug, Logger}; use ssz::Decode; -use ssz_types::FixedVector; use std::num::NonZeroUsize; use std::sync::Arc; use store::DBColumn; use store::KeyValueStore; use tree_hash::TreeHash; -use types::light_client_update::{ - FinalizedRootProofLen, NextSyncCommitteeProofLen, CURRENT_SYNC_COMMITTEE_INDEX, - FINALIZED_ROOT_INDEX, NEXT_SYNC_COMMITTEE_INDEX, -}; use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, EthSpec, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, - LightClientUpdate, Slot, SyncAggregate, SyncCommittee, + LightClientUpdate, MerkleProof, Slot, SyncAggregate, SyncCommittee, }; /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the @@ -69,17 +63,14 @@ impl LightClientServerCache { block_post_state: &mut BeaconState, ) -> Result<(), BeaconChainError> { let _timer = metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES); - + let fork_name = spec.fork_name_at_slot::(block.slot()); // Only post-altair - if spec.fork_name_at_slot::(block.slot()) == ForkName::Base { - return Ok(()); + if fork_name.altair_enabled() { + // Persist in memory cache for a descendent block + let cached_data = LightClientCachedData::from_state(block_post_state)?; + self.prev_block_cache.lock().put(block_root, cached_data); } - // Persist in memory cache for a descendent block - - let cached_data = LightClientCachedData::from_state(block_post_state)?; - self.prev_block_cache.lock().put(block_root, cached_data); - Ok(()) } @@ -413,16 +404,12 @@ impl Default for LightClientServerCache { } } -type FinalityBranch = FixedVector; -type NextSyncCommitteeBranch = FixedVector; -type CurrentSyncCommitteeBranch = FixedVector; - #[derive(Clone)] struct LightClientCachedData { finalized_checkpoint: Checkpoint, - finality_branch: FinalityBranch, - next_sync_committee_branch: NextSyncCommitteeBranch, - current_sync_committee_branch: CurrentSyncCommitteeBranch, + finality_branch: MerkleProof, + next_sync_committee_branch: MerkleProof, + current_sync_committee_branch: MerkleProof, next_sync_committee: Arc>, current_sync_committee: Arc>, finalized_block_root: Hash256, @@ -430,17 +417,18 @@ struct LightClientCachedData { impl LightClientCachedData { fn from_state(state: &mut BeaconState) -> Result { + let (finality_branch, next_sync_committee_branch, current_sync_committee_branch) = ( + state.compute_finalized_root_proof()?, + state.compute_current_sync_committee_proof()?, + state.compute_next_sync_committee_proof()?, + ); Ok(Self { finalized_checkpoint: state.finalized_checkpoint(), - finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), + finality_branch, next_sync_committee: state.next_sync_committee()?.clone(), current_sync_committee: state.current_sync_committee()?.clone(), - next_sync_committee_branch: state - .compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)? - .into(), - current_sync_committee_branch: state - .compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)? - .into(), + next_sync_committee_branch, + current_sync_committee_branch, finalized_block_root: state.finalized_checkpoint().root, }) } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 1a6b444319c..9e6760d06e3 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -206,13 +206,6 @@ async fn light_client_bootstrap_test() { .build() .expect("should build"); - let current_state = harness.get_current_state(); - - if ForkName::Electra == current_state.fork_name_unchecked() { - // TODO(electra) fix beacon state `compute_merkle_proof` - return; - } - let finalized_checkpoint = beacon_chain .canonical_head .cached_head() @@ -353,11 +346,6 @@ async fn light_client_updates_test() { let current_state = harness.get_current_state(); - if ForkName::Electra == current_state.fork_name_unchecked() { - // TODO(electra) fix beacon state `compute_merkle_proof` - return; - } - // calculate the sync period from the previous slot let sync_period = (current_state.slot() - Slot::new(1)) .epoch(E::slots_per_epoch()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 991f215210c..5483c490dcd 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -44,7 +44,6 @@ use std::path::Path; use std::sync::Arc; use std::time::Duration; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; -use types::light_client_update::CurrentSyncCommitteeProofLen; use types::*; /// On-disk database that stores finalized states efficiently. @@ -641,15 +640,14 @@ impl, Cold: ItemStore> HotColdDB pub fn get_sync_committee_branch( &self, block_root: &Hash256, - ) -> Result>, Error> { + ) -> Result, Error> { let column = DBColumn::SyncCommitteeBranch; if let Some(bytes) = self .hot_db .get_bytes(column.into(), &block_root.as_ssz_bytes())? { - let sync_committee_branch: FixedVector = - FixedVector::from_ssz_bytes(&bytes)?; + let sync_committee_branch = Vec::::from_ssz_bytes(&bytes)?; return Ok(Some(sync_committee_branch)); } @@ -677,7 +675,7 @@ impl, Cold: ItemStore> HotColdDB pub fn store_sync_committee_branch( &self, block_root: Hash256, - sync_committee_branch: &FixedVector, + sync_committee_branch: &MerkleProof, ) -> Result<(), Error> { let column = DBColumn::SyncCommitteeBranch; self.hot_db.put_bytes( diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index d772cb23b3d..f214991d516 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -2506,33 +2506,64 @@ impl BeaconState { Ok(()) } - pub fn compute_merkle_proof(&self, generalized_index: usize) -> Result, Error> { - // 1. Convert generalized index to field index. - let field_index = match generalized_index { + pub fn compute_current_sync_committee_proof(&self) -> Result, Error> { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + let field_index = if self.fork_name_unchecked().electra_enabled() { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + } else { light_client_update::CURRENT_SYNC_COMMITTEE_INDEX - | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { - // Sync committees are top-level fields, subtract off the generalized indices - // for the internal nodes. Result should be 22 or 23, the field offset of the committee - // in the `BeaconState`: - // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate - generalized_index - .checked_sub(self.num_fields_pow2()) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - light_client_update::FINALIZED_ROOT_INDEX => { - // Finalized root is the right child of `finalized_checkpoint`, divide by two to get - // the generalized index of `state.finalized_checkpoint`. - let finalized_checkpoint_generalized_index = generalized_index / 2; - // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches - // position of `finalized_checkpoint` in `BeaconState`. - finalized_checkpoint_generalized_index - .checked_sub(self.num_fields_pow2()) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - _ => return Err(Error::IndexNotSupported(generalized_index)), }; + let leaves = self.get_beacon_state_leaves(); + self.generate_proof(field_index, &leaves) + } - // 2. Get all `BeaconState` leaves. + pub fn compute_next_sync_committee_proof(&self) -> Result, Error> { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + let field_index = if self.fork_name_unchecked().electra_enabled() { + light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + } else { + light_client_update::NEXT_SYNC_COMMITTEE_INDEX + }; + let leaves = self.get_beacon_state_leaves(); + self.generate_proof(field_index, &leaves) + } + + pub fn compute_finalized_root_proof(&self) -> Result, Error> { + // Finalized root is the right child of `finalized_checkpoint`, divide by two to get + // the generalized index of `state.finalized_checkpoint`. + let field_index = if self.fork_name_unchecked().electra_enabled() { + // Index should be 169/2 - 64 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState` + light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + } else { + // Index should be 105/2 - 32 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState` + light_client_update::FINALIZED_ROOT_INDEX + }; + let leaves = self.get_beacon_state_leaves(); + let mut proof = self.generate_proof(field_index, &leaves)?; + proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); + Ok(proof) + } + + fn generate_proof( + &self, + field_index: usize, + leaves: &[Hash256], + ) -> Result, Error> { + let depth = self.num_fields_pow2().ilog2() as usize; + let tree = merkle_proof::MerkleTree::create(leaves, depth); + let (_, proof) = tree.generate_proof(field_index, depth)?; + Ok(proof) + } + + fn get_beacon_state_leaves(&self) -> Vec { let mut leaves = vec![]; #[allow(clippy::arithmetic_side_effects)] match self { @@ -2568,18 +2599,7 @@ impl BeaconState { } }; - // 3. Make deposit tree. - // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). - let depth = light_client_update::CURRENT_SYNC_COMMITTEE_PROOF_LEN; - let tree = merkle_proof::MerkleTree::create(&leaves, depth); - let (_, mut proof) = tree.generate_proof(field_index, depth)?; - - // 4. If we're proving the finalized root, patch in the finalized epoch to complete the proof. - if generalized_index == light_client_update::FINALIZED_ROOT_INDEX { - proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); - } - - Ok(proof) + leaves } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index e168199b98c..eff52378342 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -200,7 +200,7 @@ pub use crate::light_client_optimistic_update::{ }; pub use crate::light_client_update::{ Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, + LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, MerkleProof, }; pub use crate::participation_flags::ParticipationFlags; pub use crate::payload::{ diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 25f029bcc00..21a7e5416f2 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -57,7 +57,16 @@ pub struct LightClientBootstrap { /// The `SyncCommittee` used in the requested period. pub current_sync_committee: Arc>, /// Merkle proof for sync committee + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "current_sync_committee_branch_altair") + )] pub current_sync_committee_branch: FixedVector, + #[superstruct( + only(Electra), + partial_getter(rename = "current_sync_committee_branch_electra") + )] + pub current_sync_committee_branch: FixedVector, } impl LightClientBootstrap { @@ -115,7 +124,7 @@ impl LightClientBootstrap { pub fn new( block: &SignedBlindedBeaconBlock, current_sync_committee: Arc>, - current_sync_committee_branch: FixedVector, + current_sync_committee_branch: Vec, chain_spec: &ChainSpec, ) -> Result { let light_client_bootstrap = match block @@ -126,22 +135,22 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), }; @@ -155,9 +164,7 @@ impl LightClientBootstrap { ) -> Result { let mut header = beacon_state.latest_block_header().clone(); header.state_root = beacon_state.update_tree_hash_cache()?; - let current_sync_committee_branch = - FixedVector::new(beacon_state.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?)?; - + let current_sync_committee_branch = beacon_state.compute_current_sync_committee_proof()?; let current_sync_committee = beacon_state.current_sync_committee()?.clone(); let light_client_bootstrap = match block @@ -168,22 +175,22 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), }; @@ -210,8 +217,28 @@ impl ForkVersionDeserialize for LightClientBootstrap { #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientBootstrapAltair, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientBootstrapCapella, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapCapella); + } - ssz_tests!(LightClientBootstrapDeneb); + #[cfg(test)] + mod deneb { + use crate::{LightClientBootstrapDeneb, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientBootstrapElectra, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapElectra); + } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 91ee58b4be6..ba2f2083cd9 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -63,8 +63,13 @@ pub struct LightClientFinalityUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. - #[test_random(default)] + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "finality_branch_altair") + )] pub finality_branch: FixedVector, + #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + pub finality_branch: FixedVector, /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -75,7 +80,7 @@ impl LightClientFinalityUpdate { pub fn new( attested_block: &SignedBlindedBeaconBlock, finalized_block: &SignedBlindedBeaconBlock, - finality_branch: FixedVector, + finality_branch: Vec, sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, @@ -92,7 +97,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderAltair::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }) @@ -104,7 +109,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderCapella::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -115,7 +120,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderDeneb::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -126,7 +131,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderElectra::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -226,8 +231,28 @@ impl ForkVersionDeserialize for LightClientFinalityUpdate { #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientFinalityUpdateAltair, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientFinalityUpdateCapella, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateCapella); + } - ssz_tests!(LightClientFinalityUpdateDeneb); + #[cfg(test)] + mod deneb { + use crate::{LightClientFinalityUpdateDeneb, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientFinalityUpdateElectra, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateElectra); + } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index fecdc39533f..52800f18ac2 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -307,3 +307,31 @@ impl ForkVersionDeserialize for LightClientHeader { } } } + +#[cfg(test)] +mod tests { + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientHeaderAltair, MainnetEthSpec}; + ssz_tests!(LightClientHeaderAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientHeaderCapella, MainnetEthSpec}; + ssz_tests!(LightClientHeaderCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientHeaderDeneb, MainnetEthSpec}; + ssz_tests!(LightClientHeaderDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientHeaderElectra, MainnetEthSpec}; + ssz_tests!(LightClientHeaderElectra); + } +} diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 2f8cc034ebf..209388af87b 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -214,8 +214,28 @@ impl ForkVersionDeserialize for LightClientOptimisticUpdate { #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientOptimisticUpdateAltair, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientOptimisticUpdateCapella, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateCapella); + } - ssz_tests!(LightClientOptimisticUpdateDeneb); + #[cfg(test)] + mod deneb { + use crate::{LightClientOptimisticUpdateDeneb, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientOptimisticUpdateElectra, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateElectra); + } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 1f5592a929f..a7ddf8eb314 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -14,7 +14,7 @@ use serde_json::Value; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; -use ssz_types::typenum::{U4, U5, U6}; +use ssz_types::typenum::{U4, U5, U6, U7}; use std::sync::Arc; use superstruct::superstruct; use test_random_derive::TestRandom; @@ -25,24 +25,39 @@ pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; pub const EXECUTION_PAYLOAD_INDEX: usize = 25; +pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; +pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; +pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; + pub type FinalizedRootProofLen = U6; pub type CurrentSyncCommitteeProofLen = U5; pub type ExecutionPayloadProofLen = U4; - pub type NextSyncCommitteeProofLen = U5; +pub type FinalizedRootProofLenElectra = U7; +pub type CurrentSyncCommitteeProofLenElectra = U6; +pub type NextSyncCommitteeProofLenElectra = U6; + pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; +pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; + +pub type MerkleProof = Vec; // Max light client updates by range request limits // spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; type FinalityBranch = FixedVector; +type FinalityBranchElectra = FixedVector; type NextSyncCommitteeBranch = FixedVector; +type NextSyncCommitteeBranchElectra = FixedVector; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), @@ -124,8 +139,17 @@ pub struct LightClientUpdate { pub attested_header: LightClientHeaderElectra, /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, - /// Merkle proof for next sync committee + // Merkle proof for next sync committee + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "next_sync_committee_branch_altair") + )] pub next_sync_committee_branch: NextSyncCommitteeBranch, + #[superstruct( + only(Electra), + partial_getter(rename = "next_sync_committee_branch_electra") + )] + pub next_sync_committee_branch: NextSyncCommitteeBranchElectra, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -136,7 +160,13 @@ pub struct LightClientUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "finality_branch_altair") + )] pub finality_branch: FinalityBranch, + #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + pub finality_branch: FinalityBranchElectra, /// current sync aggreggate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -165,8 +195,8 @@ impl LightClientUpdate { sync_aggregate: &SyncAggregate, block_slot: Slot, next_sync_committee: Arc>, - next_sync_committee_branch: FixedVector, - finality_branch: FixedVector, + next_sync_committee_branch: Vec, + finality_branch: Vec, attested_block: &SignedBlindedBeaconBlock, finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, @@ -189,9 +219,9 @@ impl LightClientUpdate { Self::Altair(LightClientUpdateAltair { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -209,9 +239,9 @@ impl LightClientUpdate { Self::Capella(LightClientUpdateCapella { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -229,9 +259,9 @@ impl LightClientUpdate { Self::Deneb(LightClientUpdateDeneb { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -249,9 +279,9 @@ impl LightClientUpdate { Self::Electra(LightClientUpdateElectra { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -391,22 +421,18 @@ impl LightClientUpdate { return Ok(new.signature_slot() < self.signature_slot()); } - fn is_next_sync_committee_branch_empty(&self) -> bool { - for index in self.next_sync_committee_branch().iter() { - if *index != Hash256::default() { - return false; - } - } - true + fn is_next_sync_committee_branch_empty<'a>(&'a self) -> bool { + map_light_client_update_ref!(&'a _, self.to_ref(), |update, cons| { + cons(update); + is_empty_branch(update.next_sync_committee_branch.as_ref()) + }) } - pub fn is_finality_branch_empty(&self) -> bool { - for index in self.finality_branch().iter() { - if *index != Hash256::default() { - return false; - } - } - true + pub fn is_finality_branch_empty<'a>(&'a self) -> bool { + map_light_client_update_ref!(&'a _, self.to_ref(), |update, cons| { + cons(update); + is_empty_branch(update.finality_branch.as_ref()) + }) } // A `LightClientUpdate` has two `LightClientHeader`s @@ -436,6 +462,15 @@ impl LightClientUpdate { } } +fn is_empty_branch(branch: &[Hash256]) -> bool { + for index in branch.iter() { + if *index != Hash256::default() { + return false; + } + } + true +} + fn compute_sync_committee_period_at_slot( slot: Slot, chain_spec: &ChainSpec, @@ -447,16 +482,53 @@ fn compute_sync_committee_period_at_slot( #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; use ssz_types::typenum::Unsigned; - ssz_tests!(LightClientUpdateDeneb); + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateAltair); + } + + #[cfg(test)] + mod capella { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateElectra); + } #[test] fn finalized_root_params() { assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX); assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32 + 1) > FINALIZED_ROOT_INDEX); assert_eq!(FinalizedRootProofLen::to_usize(), FINALIZED_ROOT_PROOF_LEN); + + assert!( + 2usize.pow(FINALIZED_ROOT_PROOF_LEN_ELECTRA as u32) <= FINALIZED_ROOT_INDEX_ELECTRA + ); + assert!( + 2usize.pow(FINALIZED_ROOT_PROOF_LEN_ELECTRA as u32 + 1) > FINALIZED_ROOT_INDEX_ELECTRA + ); + assert_eq!( + FinalizedRootProofLenElectra::to_usize(), + FINALIZED_ROOT_PROOF_LEN_ELECTRA + ); } #[test] @@ -471,6 +543,19 @@ mod tests { CurrentSyncCommitteeProofLen::to_usize(), CURRENT_SYNC_COMMITTEE_PROOF_LEN ); + + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32) + <= CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32 + 1) + > CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert_eq!( + CurrentSyncCommitteeProofLenElectra::to_usize(), + CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA + ); } #[test] @@ -481,5 +566,18 @@ mod tests { NextSyncCommitteeProofLen::to_usize(), NEXT_SYNC_COMMITTEE_PROOF_LEN ); + + assert!( + 2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32) + <= NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert!( + 2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32 + 1) + > NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert_eq!( + NextSyncCommitteeProofLenElectra::to_usize(), + NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA + ); } } diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 117c89a22f5..dacca204c19 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -48,11 +48,6 @@ "tests/.*/eip6110", "tests/.*/whisk", "tests/.*/eip7594", - # TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - "tests/.*/electra/ssz_static/LightClientUpdate", - "tests/.*/electra/ssz_static/LightClientFinalityUpdate", - "tests/.*/electra/ssz_static/LightClientBootstrap", - "tests/.*/electra/merkle_proof", ] diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index b68bbdc5d39..49c07197848 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -3,8 +3,8 @@ use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use tree_hash::Hash256; use types::{ - BeaconBlockBody, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, FixedVector, - FullPayload, Unsigned, + light_client_update, BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, + BeaconBlockBodyElectra, BeaconState, FixedVector, FullPayload, Unsigned, }; #[derive(Debug, Clone, Deserialize)] @@ -22,13 +22,13 @@ pub struct MerkleProof { #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] -pub struct MerkleProofValidity { +pub struct BeaconStateMerkleProofValidity { pub metadata: Option, pub state: BeaconState, pub merkle_proof: MerkleProof, } -impl LoadCase for MerkleProofValidity { +impl LoadCase for BeaconStateMerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); let state = ssz_decode_state(&path.join("object.ssz_snappy"), spec)?; @@ -49,11 +49,30 @@ impl LoadCase for MerkleProofValidity { } } -impl Case for MerkleProofValidity { +impl Case for BeaconStateMerkleProofValidity { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut state = self.state.clone(); state.update_tree_hash_cache().unwrap(); - let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else { + + let proof = match self.merkle_proof.leaf_index { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + | light_client_update::CURRENT_SYNC_COMMITTEE_INDEX => { + state.compute_current_sync_committee_proof() + } + light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { + state.compute_next_sync_committee_proof() + } + light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + | light_client_update::FINALIZED_ROOT_INDEX => state.compute_finalized_root_proof(), + _ => { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof, invalid index".to_string(), + )); + } + }; + + let Ok(proof) = proof else { return Err(Error::FailedToParseTest( "Could not retrieve merkle proof".to_string(), )); @@ -198,3 +217,81 @@ impl Case for KzgInclusionMerkleProofValidity { } } } + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct BeaconBlockBodyMerkleProofValidity { + pub metadata: Option, + pub block_body: BeaconBlockBody>, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for BeaconBlockBodyMerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let block_body: BeaconBlockBody> = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix => { + return Err(Error::InternalError(format!( + "Beacon block body merkle proof validity test skipped for {:?}", + fork_name + ))) + } + ForkName::Capella => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + .into() + } + ForkName::Deneb => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))?.into() + } + ForkName::Electra => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + .into() + } + }; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + Ok(Self { + metadata, + block_body, + merkle_proof, + }) + } +} + +impl Case for BeaconBlockBodyMerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let binding = self.block_body.clone(); + let block_body = binding.to_ref(); + let Ok(proof) = block_body.block_body_merkle_proof(self.merkle_proof.leaf_index) else { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )); + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merke proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + + Ok(()) + } +} diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 5e928d22441..f4a09de32cb 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -921,10 +921,10 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { #[derive(Derivative)] #[derivative(Default(bound = ""))] -pub struct MerkleProofValidityHandler(PhantomData); +pub struct BeaconStateMerkleProofValidityHandler(PhantomData); -impl Handler for MerkleProofValidityHandler { - type Case = cases::MerkleProofValidity; +impl Handler for BeaconStateMerkleProofValidityHandler { + type Case = cases::BeaconStateMerkleProofValidity; fn config_name() -> &'static str { E::name() @@ -935,15 +935,11 @@ impl Handler for MerkleProofValidityHandler { } fn handler_name(&self) -> String { - "single_merkle_proof".into() + "single_merkle_proof/BeaconState".into() } - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { - // Test is skipped due to some changes in the Capella light client - // spec. - // - // https://github.com/sigp/lighthouse/issues/4022 - false + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name.altair_enabled() } } @@ -967,8 +963,32 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { - // TODO(electra) re-enable for electra once merkle proof issues for electra are resolved - fork_name.deneb_enabled() && !fork_name.electra_enabled() + // Enabled in Deneb + fork_name.deneb_enabled() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct BeaconBlockBodyMerkleProofValidityHandler(PhantomData); + +impl Handler for BeaconBlockBodyMerkleProofValidityHandler { + type Case = cases::BeaconBlockBodyMerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "single_merkle_proof/BeaconBlockBody".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name.capella_enabled() } } @@ -993,8 +1013,7 @@ impl Handler for LightClientUpdateHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Enabled in Altair - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - fork_name.altair_enabled() && fork_name != ForkName::Electra + fork_name.altair_enabled() } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index c2524c14e28..3f802d89447 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -396,11 +396,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only() - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only() - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only() + .run(); } // LightClientHeader has no internal indicator of which fork it is for, so we test it separately. @@ -476,13 +475,12 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::deneb_only( ) .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only( - // ) - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only( - // ) - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } // LightClientUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -506,13 +504,12 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only( - // ) - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only( - // ) - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } #[test] @@ -922,8 +919,13 @@ fn kzg_recover_cells_and_proofs() { } #[test] -fn merkle_proof_validity() { - MerkleProofValidityHandler::::default().run(); +fn beacon_state_merkle_proof_validity() { + BeaconStateMerkleProofValidityHandler::::default().run(); +} + +#[test] +fn beacon_block_body_merkle_proof_validity() { + BeaconBlockBodyMerkleProofValidityHandler::::default().run(); } #[test] From 8188e036a04e46e9a916f7e82a0d930bc1c74a5a Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 25 Oct 2024 08:50:31 +0300 Subject: [PATCH 8/8] Generalize sync block lookup tests (#6498) * Generalize sync block lookup tests --- .../network/src/sync/block_lookups/mod.rs | 2 - beacon_node/network/src/sync/mod.rs | 2 + .../tests.rs => tests/lookups.rs} | 111 +++++------------- beacon_node/network/src/sync/tests/mod.rs | 67 +++++++++++ beacon_node/network/src/sync/tests/range.rs | 1 + 5 files changed, 102 insertions(+), 81 deletions(-) rename beacon_node/network/src/sync/{block_lookups/tests.rs => tests/lookups.rs} (96%) create mode 100644 beacon_node/network/src/sync/tests/mod.rs create mode 100644 beacon_node/network/src/sync/tests/range.rs diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index f5e68d1512f..5a11bca4814 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -50,8 +50,6 @@ use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; pub mod common; pub mod parent_chain; mod single_block_lookup; -#[cfg(test)] -mod tests; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 1dca6f02ac2..0f5fd6fb9f1 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -9,6 +9,8 @@ mod network_context; mod peer_sampling; mod peer_sync_info; mod range_sync; +#[cfg(test)] +mod tests; pub use lighthouse_network::service::api_types::SamplingId; pub use manager::{BatchProcessResult, SyncMessage}; diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/tests/lookups.rs similarity index 96% rename from beacon_node/network/src/sync/block_lookups/tests.rs rename to beacon_node/network/src/sync/tests/lookups.rs index 7192faa12dc..9f2c9ef66f0 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1,97 +1,50 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; -use crate::sync::manager::{BlockProcessType, SyncManager}; -use crate::sync::peer_sampling::SamplingConfig; -use crate::sync::range_sync::RangeSyncType; -use crate::sync::{SamplingId, SyncMessage}; +use crate::sync::block_lookups::{ + BlockLookupSummary, PARENT_DEPTH_TOLERANCE, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, +}; +use crate::sync::{ + manager::{BlockProcessType, BlockProcessingResult, SyncManager}, + peer_sampling::SamplingConfig, + SamplingId, SyncMessage, +}; use crate::NetworkMessage; use std::sync::Arc; +use std::time::Duration; use super::*; use crate::sync::block_lookups::common::ResponseType; -use beacon_chain::blob_verification::GossipVerifiedBlob; -use beacon_chain::block_verification_types::BlockImportData; -use beacon_chain::builder::Witness; -use beacon_chain::data_availability_checker::Availability; -use beacon_chain::eth1_chain::CachingEth1Backend; -use beacon_chain::test_utils::{ - build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, - BeaconChainHarness, EphemeralHarnessType, LoggerType, NumBlobs, -}; -use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ - AvailabilityPendingExecutedBlock, PayloadVerificationOutcome, PayloadVerificationStatus, + blob_verification::GossipVerifiedBlob, + block_verification_types::{AsBlock, BlockImportData}, + data_availability_checker::Availability, + test_utils::{ + build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, + BeaconChainHarness, EphemeralHarnessType, LoggerType, NumBlobs, + }, + validator_monitor::timestamp_now, + AvailabilityPendingExecutedBlock, AvailabilityProcessingStatus, BlockError, + PayloadVerificationOutcome, PayloadVerificationStatus, }; use beacon_processor::WorkEvent; -use lighthouse_network::rpc::{RPCError, RequestType, RpcErrorResponse}; -use lighthouse_network::service::api_types::{ - AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SamplingRequester, - SingleLookupReqId, SyncRequestId, +use lighthouse_network::{ + rpc::{RPCError, RequestType, RpcErrorResponse}, + service::api_types::{ + AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, + SamplingRequester, SingleLookupReqId, SyncRequestId, + }, + types::SyncState, + NetworkConfig, NetworkGlobals, PeerId, }; -use lighthouse_network::types::SyncState; -use lighthouse_network::NetworkConfig; -use lighthouse_network::NetworkGlobals; use slog::info; -use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; -use store::MemoryStore; +use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; -use types::data_column_sidecar::ColumnIndex; -use types::test_utils::TestRandom; use types::{ - test_utils::{SeedableRng, XorShiftRng}, - BlobSidecar, ForkName, MinimalEthSpec as E, SignedBeaconBlock, Slot, + data_column_sidecar::ColumnIndex, + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, Epoch, EthSpec, ForkName, + Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, }; -use types::{BeaconState, BeaconStateBase}; -use types::{DataColumnSidecar, Epoch}; - -type T = Witness, E, MemoryStore, MemoryStore>; - -/// This test utility enables integration testing of Lighthouse sync components. -/// -/// It covers the following: -/// 1. Sending `SyncMessage` to `SyncManager` to trigger `RangeSync`, `BackFillSync` and `BlockLookups` behaviours. -/// 2. Making assertions on `WorkEvent`s received from sync -/// 3. Making assertion on `NetworkMessage` received from sync (Outgoing RPC requests). -/// -/// The test utility covers testing the interactions from and to `SyncManager`. In diagram form: -/// +-----------------+ -/// | BeaconProcessor | -/// +---------+-------+ -/// ^ | -/// | | -/// WorkEvent | | SyncMsg -/// | | (Result) -/// | v -/// +--------+ +-----+-----------+ +----------------+ -/// | Router +----------->| SyncManager +------------>| NetworkService | -/// +--------+ SyncMsg +-----------------+ NetworkMsg +----------------+ -/// (RPC resp) | - RangeSync | (RPC req) -/// +-----------------+ -/// | - BackFillSync | -/// +-----------------+ -/// | - BlockLookups | -/// +-----------------+ -struct TestRig { - /// Receiver for `BeaconProcessor` events (e.g. block processing results). - beacon_processor_rx: mpsc::Receiver>, - beacon_processor_rx_queue: Vec>, - /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) - network_rx: mpsc::UnboundedReceiver>, - /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) - network_rx_queue: Vec>, - /// Receiver for `SyncMessage` from the network - sync_rx: mpsc::UnboundedReceiver>, - /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. - sync_manager: SyncManager, - /// To manipulate sync state and peer connection status - network_globals: Arc>, - /// Beacon chain harness - harness: BeaconChainHarness>, - /// `rng` for generating test blocks and blobs. - rng: XorShiftRng, - fork_name: ForkName, - log: Logger, -} const D: Duration = Duration::new(0, 0); const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs new file mode 100644 index 00000000000..47666b413c5 --- /dev/null +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -0,0 +1,67 @@ +use crate::sync::manager::SyncManager; +use crate::sync::range_sync::RangeSyncType; +use crate::sync::SyncMessage; +use crate::NetworkMessage; +use beacon_chain::builder::Witness; +use beacon_chain::eth1_chain::CachingEth1Backend; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use beacon_processor::WorkEvent; +use lighthouse_network::NetworkGlobals; +use slog::Logger; +use slot_clock::ManualSlotClock; +use std::sync::Arc; +use store::MemoryStore; +use tokio::sync::mpsc; +use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; + +mod lookups; +mod range; + +type T = Witness, E, MemoryStore, MemoryStore>; + +/// This test utility enables integration testing of Lighthouse sync components. +/// +/// It covers the following: +/// 1. Sending `SyncMessage` to `SyncManager` to trigger `RangeSync`, `BackFillSync` and `BlockLookups` behaviours. +/// 2. Making assertions on `WorkEvent`s received from sync +/// 3. Making assertion on `NetworkMessage` received from sync (Outgoing RPC requests). +/// +/// The test utility covers testing the interactions from and to `SyncManager`. In diagram form: +/// +-----------------+ +/// | BeaconProcessor | +/// +---------+-------+ +/// ^ | +/// | | +/// WorkEvent | | SyncMsg +/// | | (Result) +/// | v +/// +--------+ +-----+-----------+ +----------------+ +/// | Router +----------->| SyncManager +------------>| NetworkService | +/// +--------+ SyncMsg +-----------------+ NetworkMsg +----------------+ +/// (RPC resp) | - RangeSync | (RPC req) +/// +-----------------+ +/// | - BackFillSync | +/// +-----------------+ +/// | - BlockLookups | +/// +-----------------+ +struct TestRig { + /// Receiver for `BeaconProcessor` events (e.g. block processing results). + beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx_queue: Vec>, + /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) + network_rx: mpsc::UnboundedReceiver>, + /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) + network_rx_queue: Vec>, + /// Receiver for `SyncMessage` from the network + sync_rx: mpsc::UnboundedReceiver>, + /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. + sync_manager: SyncManager, + /// To manipulate sync state and peer connection status + network_globals: Arc>, + /// Beacon chain harness + harness: BeaconChainHarness>, + /// `rng` for generating test blocks and blobs. + rng: XorShiftRng, + fork_name: ForkName, + log: Logger, +} diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/beacon_node/network/src/sync/tests/range.rs @@ -0,0 +1 @@ +