From 62e4abfbff1b06192a34751cf849d1547b2d5ef6 Mon Sep 17 00:00:00 2001 From: AMIT SINGH Date: Thu, 18 Apr 2024 12:44:56 +0530 Subject: [PATCH 01/13] Fix execution layer redundancy (#5588) * remove execution layer url redundancy * fix typo * fix tests * fix formatting --- beacon_node/beacon_chain/src/test_utils.rs | 12 ++++------ beacon_node/execution_layer/src/lib.rs | 22 +++++++------------ .../src/test_utils/mock_builder.rs | 4 ++-- .../src/test_utils/mock_execution_layer.rs | 4 ++-- beacon_node/src/config.rs | 4 ++-- lighthouse/tests/beacon_node.rs | 20 +++++++++++------ .../src/test_rig.rs | 12 +++++----- testing/simulator/src/eth1_sim.rs | 8 +++---- testing/simulator/src/local_network.rs | 12 +++++----- 9 files changed, 46 insertions(+), 52 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 542262487ae..debc4881a60 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -419,21 +419,17 @@ where self } - pub fn execution_layer_from_urls(mut self, urls: &[&str]) -> Self { + pub fn execution_layer_from_url(mut self, url: &str) -> Self { assert!( self.execution_layer.is_none(), "execution layer already defined" ); - let urls: Vec = urls - .iter() - .map(|s| SensitiveUrl::parse(s)) - .collect::>() - .unwrap(); + let url = SensitiveUrl::parse(url).ok(); let config = execution_layer::Config { - execution_endpoints: urls, - secret_files: vec![], + execution_endpoint: url, + secret_file: None, suggested_fee_recipient: Some(Address::repeat_byte(42)), ..Default::default() }; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 30930318eff..22410976c9d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -355,14 +355,14 @@ struct Inner { #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Config { - /// Endpoint urls for EL nodes that are running the engine api. - pub execution_endpoints: Vec, + /// Endpoint url for EL nodes that are running the engine api. + pub execution_endpoint: Option, /// Endpoint urls for services providing the builder api. pub builder_url: Option, /// User agent to send with requests to the builder API. pub builder_user_agent: Option, - /// JWT secrets for the above endpoints running the engine api. - pub secret_files: Vec, + /// JWT secret for the above endpoint running the engine api. + pub secret_file: Option, /// The default fee recipient to use on the beacon node if none if provided from /// the validator client during block preparation. pub suggested_fee_recipient: Option
, @@ -386,10 +386,10 @@ impl ExecutionLayer { /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { let Config { - execution_endpoints: urls, + execution_endpoint: url, builder_url, builder_user_agent, - secret_files, + secret_file, suggested_fee_recipient, jwt_id, jwt_version, @@ -397,16 +397,10 @@ impl ExecutionLayer { execution_timeout_multiplier, } = config; - if urls.len() > 1 { - warn!(log, "Only the first execution engine url will be used"); - } - let execution_url = urls.into_iter().next().ok_or(Error::NoEngine)?; + let execution_url = url.ok_or(Error::NoEngine)?; // Use the default jwt secret path if not provided via cli. - let secret_file = secret_files - .into_iter() - .next() - .unwrap_or_else(|| default_datadir.join(DEFAULT_JWT_FILE)); + let secret_file = secret_file.unwrap_or_else(|| default_datadir.join(DEFAULT_JWT_FILE)); let jwt_key = if secret_file.exists() { // Read secret from file if it already exists diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index b12e26a3d6c..756e0b793f8 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -229,8 +229,8 @@ impl MockBuilder { // This EL should not talk to a builder let config = Config { - execution_endpoints: vec![mock_el_url], - secret_files: vec![path], + execution_endpoint: Some(mock_el_url), + secret_file: Some(path), suggested_fee_recipient: None, ..Default::default() }; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index f76edfa90b7..6717bbc2ab3 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -67,8 +67,8 @@ impl MockExecutionLayer { std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); let config = Config { - execution_endpoints: vec![url], - secret_files: vec![path], + execution_endpoint: Some(url), + secret_file: Some(path), suggested_fee_recipient: Some(Address::repeat_byte(42)), ..Default::default() }; diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 5a27b148c99..fd2cf473cb3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -348,8 +348,8 @@ pub fn get_config( } // Set config values from parse values. - el_config.secret_files = vec![secret_file.clone()]; - el_config.execution_endpoints = vec![execution_endpoint.clone()]; + el_config.secret_file = Some(secret_file.clone()); + el_config.execution_endpoint = Some(execution_endpoint.clone()); el_config.suggested_fee_recipient = clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4a50126945b..ec10ff4429d 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -445,13 +445,16 @@ fn run_merge_execution_endpoints_flag_test(flag: &str) { .run_with_zero_port() .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.execution_endpoints.len(), 1); + assert_eq!(config.execution_endpoint.is_some(), true); assert_eq!( - config.execution_endpoints[0], + config.execution_endpoint.as_ref().unwrap().clone(), SensitiveUrl::parse(&urls[0]).unwrap() ); // Only the first secret file should be used. - assert_eq!(config.secret_files, vec![jwts[0].clone()]); + assert_eq!( + config.secret_file.as_ref().unwrap().clone(), + jwts[0].clone() + ); }); } #[test] @@ -464,11 +467,11 @@ fn run_execution_jwt_secret_key_is_persisted() { .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); assert_eq!( - config.execution_endpoints[0].full.to_string(), + config.execution_endpoint.as_ref().unwrap().full.to_string(), "http://localhost:8551/" ); let mut file_jwt_secret_key = String::new(); - File::open(config.secret_files[0].clone()) + File::open(config.secret_file.as_ref().unwrap()) .expect("could not open jwt_secret_key file") .read_to_string(&mut file_jwt_secret_key) .expect("could not read from file"); @@ -515,10 +518,13 @@ fn merge_jwt_secrets_flag() { .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); assert_eq!( - config.execution_endpoints[0].full.to_string(), + config.execution_endpoint.as_ref().unwrap().full.to_string(), "http://localhost:8551/" ); - assert_eq!(config.secret_files[0], dir.path().join("jwt-file")); + assert_eq!( + config.secret_file.as_ref().unwrap().clone(), + dir.path().join("jwt-file") + ); }); } #[test] diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 8f782c7e4e0..0103f7074b5 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -121,11 +121,11 @@ impl TestRig { let ee_a = { let execution_engine = ExecutionEngine::new(generic_engine.clone()); - let urls = vec![execution_engine.http_auth_url()]; + let url = Some(execution_engine.http_auth_url()); let config = execution_layer::Config { - execution_endpoints: urls, - secret_files: vec![], + execution_endpoint: url, + secret_file: None, suggested_fee_recipient: Some(Address::repeat_byte(42)), default_datadir: execution_engine.datadir(), ..Default::default() @@ -140,11 +140,11 @@ impl TestRig { let ee_b = { let execution_engine = ExecutionEngine::new(generic_engine); - let urls = vec![execution_engine.http_auth_url()]; + let url = Some(execution_engine.http_auth_url()); let config = execution_layer::Config { - execution_endpoints: urls, - secret_files: vec![], + execution_endpoint: url, + secret_file: None, suggested_fee_recipient: fee_recipient, default_datadir: execution_engine.datadir(), ..Default::default() diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 8d6ffc42ffa..20c7c9ce9ab 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -395,11 +395,9 @@ async fn create_local_network( if post_merge_sim { let el_config = execution_layer::Config { - execution_endpoints: vec![SensitiveUrl::parse(&format!( - "http://localhost:{}", - EXECUTION_PORT - )) - .unwrap()], + execution_endpoint: Some( + SensitiveUrl::parse(&format!("http://localhost:{}", EXECUTION_PORT)).unwrap(), + ), ..Default::default() }; diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index dc8bf0d27dd..018954a5d3b 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -85,9 +85,9 @@ impl LocalNetwork { mock_execution_config, ); el_config.default_datadir = execution_node.datadir.path().to_path_buf(); - el_config.secret_files = vec![execution_node.datadir.path().join("jwt.hex")]; - el_config.execution_endpoints = - vec![SensitiveUrl::parse(&execution_node.server.url()).unwrap()]; + el_config.secret_file = Some(execution_node.datadir.path().join("jwt.hex")); + el_config.execution_endpoint = + Some(SensitiveUrl::parse(&execution_node.server.url()).unwrap()); vec![execution_node] } else { vec![] @@ -180,9 +180,9 @@ impl LocalNetwork { config, ); el_config.default_datadir = execution_node.datadir.path().to_path_buf(); - el_config.secret_files = vec![execution_node.datadir.path().join("jwt.hex")]; - el_config.execution_endpoints = - vec![SensitiveUrl::parse(&execution_node.server.url()).unwrap()]; + el_config.secret_file = Some(execution_node.datadir.path().join("jwt.hex")); + el_config.execution_endpoint = + Some(SensitiveUrl::parse(&execution_node.server.url()).unwrap()); self.execution_nodes.write().push(execution_node); } From 5c30afbc7c0ef32fc70236543298a0db0ad5bd71 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 18 Apr 2024 15:14:59 +0800 Subject: [PATCH 02/13] Revise `secrets-dir` flag in the VC (#5480) * Update docs on secrets-dir * Hidden secrets-dir flag * Remove conflicts_with * Restore description * make cli * Update book/src/validator-management.md --- account_manager/src/validator/create.rs | 1 - book/src/validator-management.md | 5 ++++- validator_client/src/cli.rs | 1 - 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 8da32531a80..93b041c61c4 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -62,7 +62,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "The path where the validator keystore passwords will be stored. \ Defaults to ~/.lighthouse/{network}/secrets", ) - .conflicts_with("datadir") .takes_value(true), ) .arg( diff --git a/book/src/validator-management.md b/book/src/validator-management.md index df7c2ac4760..bc6aba3c4f9 100644 --- a/book/src/validator-management.md +++ b/book/src/validator-management.md @@ -59,7 +59,9 @@ Each permitted field of the file is listed below for reference: - `voting_keystore_password`: The password to the EIP-2335 keystore. > **Note**: Either `voting_keystore_password_path` or `voting_keystore_password` *must* be -> supplied. If both are supplied, `voting_keystore_password_path` is ignored. +> supplied. If both are supplied, `voting_keystore_password_path` is ignored. + +>If you do not wish to have `voting_keystore_password` being stored in the `validator_definitions.yml` file, you can add the field `voting_keystore_password_path` and point it to a file containing the password. The file can be, e.g., on a mounted portable drive that contains the password so that no password is stored on the validating node. ## Populating the `validator_definitions.yml` file @@ -75,6 +77,7 @@ recap: ### Automatic validator discovery + When the `--disable-auto-discover` flag is **not** provided, the validator client will search the `validator-dir` for validators and add any *new* validators to the `validator_definitions.yml` with `enabled: true`. diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index f91efbdfbc5..991b621f27d 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -73,7 +73,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { key. Defaults to ~/.lighthouse/{network}/secrets.", ) .takes_value(true) - .conflicts_with("datadir") ) .arg( Arg::with_name("init-slashing-protection") From 5a9e973f047fd7c401339ff8bd715f43bab8a1a0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 19 Apr 2024 18:18:30 +1000 Subject: [PATCH 03/13] Fix on-disk consensus context format (#5598) * Fix on-disk consensus context format * Keep indexed attestations, thanks Sean --- .../state_lru_cache.rs | 21 ++++-- beacon_node/store/src/consensus_context.rs | 66 +++++++++++++++++++ beacon_node/store/src/lib.rs | 2 + .../state_processing/src/consensus_context.rs | 31 ++++++--- 4 files changed, 104 insertions(+), 16 deletions(-) create mode 100644 beacon_node/store/src/consensus_context.rs diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index c3492b53bda..b6dbf2b952f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -8,8 +8,9 @@ use crate::{ use lru::LruCache; use parking_lot::RwLock; use ssz_derive::{Decode, Encode}; -use state_processing::{BlockReplayer, ConsensusContext, StateProcessingStrategy}; +use state_processing::{BlockReplayer, StateProcessingStrategy}; use std::sync::Arc; +use store::OnDiskConsensusContext; use types::beacon_block_body::KzgCommitments; use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc}; use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; @@ -26,7 +27,7 @@ pub struct DietAvailabilityPendingExecutedBlock { parent_block: SignedBeaconBlock>, parent_eth1_finalization_data: Eth1FinalizationData, confirmed_state_roots: Vec, - consensus_context: ConsensusContext, + consensus_context: OnDiskConsensusContext, payload_verification_outcome: PayloadVerificationOutcome, } @@ -94,7 +95,9 @@ impl StateLRUCache { parent_block: executed_block.import_data.parent_block, parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, confirmed_state_roots: executed_block.import_data.confirmed_state_roots, - consensus_context: executed_block.import_data.consensus_context, + consensus_context: OnDiskConsensusContext::from_consensus_context( + executed_block.import_data.consensus_context, + ), payload_verification_outcome: executed_block.payload_verification_outcome, } } @@ -119,7 +122,9 @@ impl StateLRUCache { parent_eth1_finalization_data: diet_executed_block .parent_eth1_finalization_data, confirmed_state_roots: diet_executed_block.confirmed_state_roots, - consensus_context: diet_executed_block.consensus_context, + consensus_context: diet_executed_block + .consensus_context + .into_consensus_context(), }, payload_verification_outcome: diet_executed_block.payload_verification_outcome, }) @@ -145,7 +150,9 @@ impl StateLRUCache { parent_block: diet_executed_block.parent_block, parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data, confirmed_state_roots: diet_executed_block.confirmed_state_roots, - consensus_context: diet_executed_block.consensus_context, + consensus_context: diet_executed_block + .consensus_context + .into_consensus_context(), }, payload_verification_outcome: diet_executed_block.payload_verification_outcome, }) @@ -232,7 +239,9 @@ impl From> parent_block: value.import_data.parent_block, parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, confirmed_state_roots: value.import_data.confirmed_state_roots, - consensus_context: value.import_data.consensus_context, + consensus_context: OnDiskConsensusContext::from_consensus_context( + value.import_data.consensus_context, + ), payload_verification_outcome: value.payload_verification_outcome, } } diff --git a/beacon_node/store/src/consensus_context.rs b/beacon_node/store/src/consensus_context.rs new file mode 100644 index 00000000000..08fad17b14b --- /dev/null +++ b/beacon_node/store/src/consensus_context.rs @@ -0,0 +1,66 @@ +use ssz_derive::{Decode, Encode}; +use state_processing::ConsensusContext; +use std::collections::HashMap; +use types::{AttestationData, BitList, EthSpec, Hash256, IndexedAttestation, Slot}; + +/// The consensus context is stored on disk as part of the data availability overflow cache. +/// +/// We use this separate struct to keep the on-disk format stable in the presence of changes to the +/// in-memory `ConsensusContext`. You MUST NOT change the fields of this struct without +/// superstructing it and implementing a schema migration. +#[derive(Debug, PartialEq, Clone, Encode, Decode)] +pub struct OnDiskConsensusContext { + /// Slot to act as an identifier/safeguard + slot: Slot, + /// Proposer index of the block at `slot`. + proposer_index: Option, + /// Block root of the block at `slot`. + current_block_root: Option, + /// We keep the indexed attestations in the *in-memory* version of this struct so that we don't + /// need to regenerate them if roundtripping via this type *without* going to disk. + /// + /// They are not part of the on-disk format. + #[ssz(skip_serializing, skip_deserializing)] + indexed_attestations: + HashMap<(AttestationData, BitList), IndexedAttestation>, +} + +impl OnDiskConsensusContext { + pub fn from_consensus_context(ctxt: ConsensusContext) -> Self { + // Match exhaustively on fields here so we are forced to *consider* updating the on-disk + // format when the `ConsensusContext` fields change. + let ConsensusContext { + slot, + previous_epoch: _, + current_epoch: _, + proposer_index, + current_block_root, + indexed_attestations, + } = ctxt; + OnDiskConsensusContext { + slot, + proposer_index, + current_block_root, + indexed_attestations, + } + } + + pub fn into_consensus_context(self) -> ConsensusContext { + let OnDiskConsensusContext { + slot, + proposer_index, + current_block_root, + indexed_attestations, + } = self; + + let mut ctxt = ConsensusContext::new(slot); + + if let Some(proposer_index) = proposer_index { + ctxt = ctxt.set_proposer_index(proposer_index); + } + if let Some(block_root) = current_block_root { + ctxt = ctxt.set_current_block_root(block_root); + } + ctxt.set_indexed_attestations(indexed_attestations) + } +} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index e86689b0cf1..c3136a910db 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -14,6 +14,7 @@ mod chunk_writer; pub mod chunked_iter; pub mod chunked_vector; pub mod config; +pub mod consensus_context; pub mod errors; mod forwards_iter; mod garbage_collection; @@ -30,6 +31,7 @@ pub mod iter; pub use self::chunk_writer::ChunkWriter; pub use self::config::StoreConfig; +pub use self::consensus_context::OnDiskConsensusContext; pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 263539fa429..073d87be85b 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,7 +1,6 @@ use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use crate::EpochCacheError; -use ssz_derive::{Decode, Encode}; use std::collections::{hash_map::Entry, HashMap}; use tree_hash::TreeHash; use types::{ @@ -9,22 +8,20 @@ use types::{ ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; -#[derive(Debug, PartialEq, Clone, Encode, Decode)] +#[derive(Debug, PartialEq, Clone)] pub struct ConsensusContext { /// Slot to act as an identifier/safeguard - slot: Slot, + pub slot: Slot, /// Previous epoch of the `slot` precomputed for optimization purpose. - pub(crate) previous_epoch: Epoch, + pub previous_epoch: Epoch, /// Current epoch of the `slot` precomputed for optimization purpose. - pub(crate) current_epoch: Epoch, + pub current_epoch: Epoch, /// Proposer index of the block at `slot`. - proposer_index: Option, + pub proposer_index: Option, /// Block root of the block at `slot`. - current_block_root: Option, + pub current_block_root: Option, /// Cache of indexed attestations constructed during block processing. - /// We can skip serializing / deserializing this as the cache will just be rebuilt - #[ssz(skip_serializing, skip_deserializing)] - indexed_attestations: + pub indexed_attestations: HashMap<(AttestationData, BitList), IndexedAttestation>, } @@ -62,6 +59,7 @@ impl ConsensusContext { } } + #[must_use] pub fn set_proposer_index(mut self, proposer_index: u64) -> Self { self.proposer_index = Some(proposer_index); self @@ -109,6 +107,7 @@ impl ConsensusContext { Ok(proposer_index) } + #[must_use] pub fn set_current_block_root(mut self, block_root: Hash256) -> Self { self.current_block_root = Some(block_root); self @@ -174,4 +173,16 @@ impl ConsensusContext { pub fn num_cached_indexed_attestations(&self) -> usize { self.indexed_attestations.len() } + + #[must_use] + pub fn set_indexed_attestations( + mut self, + attestations: HashMap< + (AttestationData, BitList), + IndexedAttestation, + >, + ) -> Self { + self.indexed_attestations = attestations; + self + } } From 9b5895ca8969a6b76e52e9d0e15445b2c6f69404 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 22 Apr 2024 14:01:06 +0100 Subject: [PATCH 04/13] Fix cargo audit RUSTSEC-2024-0336 (#5612) * replace unmaintained rust_yaml with serde_yml * update warp --- Cargo.lock | 1199 +++++++++++++++++++---------- Cargo.toml | 6 +- consensus/int_to_bytes/Cargo.toml | 2 +- consensus/int_to_bytes/src/lib.rs | 10 +- 4 files changed, 821 insertions(+), 396 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0452fa0e8c7..79be0a55f18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,7 @@ version = "0.3.5" dependencies = [ "account_utils", "bls", - "clap", + "clap 2.34.0", "clap_utils", "directory", "environment", @@ -126,14 +126,14 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.9.4" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" +checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" dependencies = [ "aead 0.4.3", "aes 0.7.5", "cipher 0.3.0", - "ctr 0.8.0", + "ctr 0.7.0", "ghash 0.4.4", "subtle", ] @@ -148,15 +148,15 @@ dependencies = [ "aes 0.8.4", "cipher 0.4.4", "ctr 0.9.2", - "ghash 0.5.0", + "ghash 0.5.1", "subtle", ] [[package]] name = "ahash" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b79b82693f705137f8fb9b37871d99e4f9a7df12b917eed79c3d3954830a60b" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", @@ -166,18 +166,18 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-consensus" @@ -265,7 +265,7 @@ checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -292,11 +292,59 @@ dependencies = [ "winapi", ] +[[package]] +name = "anstream" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" + +[[package]] +name = "anstyle-parse" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", +] + [[package]] name = "anyhow" -version = "1.0.80" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "arbitrary" @@ -309,9 +357,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "ark-ff" @@ -507,9 +555,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65" +checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" dependencies = [ "async-lock", "cfg-if", @@ -518,7 +566,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.31", + "rustix 0.38.33", "slab", "tracing", "windows-sys 0.52.0", @@ -537,13 +585,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -589,7 +637,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" dependencies = [ - "http 0.2.11", + "http 0.2.12", "log", "url", ] @@ -613,29 +661,29 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "axum" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1236b4b292f6c4d6dc34604bb5120d85c3fe1d1aa596bd5cc52ca054d13e7b9e" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" dependencies = [ "async-trait", "axum-core", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.2.0", + "hyper 1.3.1", "hyper-util", "itoa", "matchit", @@ -648,7 +696,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 1.0.1", "tokio", "tower", "tower-layer", @@ -665,13 +713,13 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", "tracing", @@ -679,9 +727,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -722,6 +770,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + [[package]] name = "base64ct" version = "1.6.0" @@ -793,7 +847,7 @@ name = "beacon_node" version = "5.1.3" dependencies = [ "beacon_chain", - "clap", + "clap 2.34.0", "clap_utils", "client", "directory", @@ -806,7 +860,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.2.0", + "hyper 1.3.1", "lighthouse_network", "lighthouse_version", "monitoring_api", @@ -900,9 +954,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "bitvec" @@ -1006,7 +1060,7 @@ name = "boot_node" version = "5.1.3" dependencies = [ "beacon_node", - "clap", + "clap 2.34.0", "clap_utils", "eth2_network_config", "ethereum_ssz", @@ -1034,9 +1088,9 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bs58" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ "tinyvec", ] @@ -1055,9 +1109,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.3" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-slice-cast" @@ -1073,9 +1127,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" dependencies = [ "serde", ] @@ -1141,9 +1195,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "694c8807f2ae16faecc43dc17d74b3eb042482789fd0eb64b39a2e04e087053f" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" dependencies = [ "serde", ] @@ -1170,12 +1224,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.90" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -1193,6 +1248,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" version = "0.9.1" @@ -1219,14 +1280,14 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -1275,11 +1336,38 @@ dependencies = [ "vec_map", ] +[[package]] +name = "clap" +version = "4.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim 0.11.1", +] + +[[package]] +name = "clap_lex" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" + [[package]] name = "clap_utils" version = "0.1.0" dependencies = [ - "clap", + "clap 2.34.0", "dirs", "eth2_network_config", "ethereum-types 0.14.1", @@ -1344,6 +1432,12 @@ dependencies = [ "cc", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "compare_fields" version = "0.2.0" @@ -1369,11 +1463,24 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "console" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "unicode-width", + "windows-sys 0.52.0", +] + [[package]] name = "const-hex" -version = "1.11.1" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbd12d49ab0eaf8193ba9175e45f56bbc2e4b27d57b8cfe62aa47942a46b9a9" +checksum = "5ba00838774b4ab0233e355d26710fbfc8327a05c017f6dc4873f876d1f79f78" dependencies = [ "cfg-if", "cpufeatures", @@ -1451,7 +1558,7 @@ checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", - "clap", + "clap 2.34.0", "criterion-plot", "csv", "itertools", @@ -1566,9 +1673,9 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.11.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" +checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" dependencies = [ "generic-array", "subtle", @@ -1595,6 +1702,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "ctr" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" +dependencies = [ + "cipher 0.3.0", +] + [[package]] name = "ctr" version = "0.8.0" @@ -1615,11 +1731,11 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.2" +version = "3.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b467862cc8610ca6fc9a1532d7777cee0804e678ab45410897b9396495994a0b" +checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" dependencies = [ - "nix 0.27.1", + "nix 0.28.0", "windows-sys 0.52.0", ] @@ -1634,7 +1750,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "platforms 3.3.0", + "platforms 3.4.0", "rustc_version 0.4.0", "subtle", "zeroize", @@ -1648,7 +1764,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -1657,8 +1773,18 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core 0.14.4", + "darling_macro 0.14.4", ] [[package]] @@ -1675,13 +1801,38 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 1.0.109", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", + "darling_core 0.13.4", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core 0.14.4", "quote", "syn 1.0.109", ] @@ -1744,7 +1895,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "beacon_node", - "clap", + "clap 2.34.0", "clap_utils", "environment", "hex", @@ -1799,9 +1950,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "pem-rfc7468", @@ -1850,7 +2001,38 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", +] + +[[package]] +name = "derive_builder" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" +dependencies = [ + "darling 0.14.4", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder_macro" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" +dependencies = [ + "derive_builder_core", + "syn 1.0.109", ] [[package]] @@ -1866,13 +2048,26 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "dialoguer" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" +dependencies = [ + "console", + "shell-words", + "tempfile", + "thiserror", + "zeroize", +] + [[package]] name = "diesel" -version = "2.1.4" +version = "2.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8" +checksum = "ff236accb9a5069572099f0b350a92e9560e8e63a9b8d546162f4a5e03026bb2" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "byteorder", "diesel_derives", "itoa", @@ -1882,14 +2077,14 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.1.2" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" +checksum = "14701062d6bed917b5c7103bdffaee1e4609279e240488ad24e7bd979ca6866c" dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -1909,7 +2104,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -1937,7 +2132,7 @@ dependencies = [ name = "directory" version = "0.1.0" dependencies = [ - "clap", + "clap 2.34.0", "clap_utils", "eth2_network_config", ] @@ -1990,7 +2185,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bac33cb3f99889a57e56a8c6ccb77aaf0cfc7787602b7af09783f736d77314e1" dependencies = [ "aes 0.7.5", - "aes-gcm 0.9.4", + "aes-gcm 0.9.2", "arrayvec", "delay_map", "enr", @@ -2022,7 +2217,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -2031,6 +2226,29 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +[[package]] +name = "dtt" +version = "0.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6b2dd9ee2d76888dc4c17d6da74629fa11b3cb1e8094fdc159b7f8ff259fc88" +dependencies = [ + "regex", + "serde", + "time", +] + +[[package]] +name = "duct" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4ab5718d1224b63252cd0c6f74f6480f9ffeb117438a2e0f5cf6d9a4798929c" +dependencies = [ + "libc", + "once_cell", + "os_pipe", + "shared_child", +] + [[package]] name = "dunce" version = "1.0.4" @@ -2055,7 +2273,7 @@ version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.8", + "der 0.7.9", "digest 0.10.7", "elliptic-curve 0.13.8", "rfc6979 0.4.0", @@ -2125,9 +2343,9 @@ dependencies = [ [[package]] name = "either" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "elliptic-curve" @@ -2168,11 +2386,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] @@ -2205,7 +2429,17 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", +] + +[[package]] +name = "env_filter" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" +dependencies = [ + "log", + "regex", ] [[package]] @@ -2231,6 +2465,19 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + [[package]] name = "environment" version = "0.1.2" @@ -2418,7 +2665,7 @@ dependencies = [ "sha2 0.9.9", "tempfile", "unicode-normalization", - "uuid", + "uuid 0.8.2", "zeroize", ] @@ -2458,7 +2705,7 @@ dependencies = [ "serde_repr", "tempfile", "tiny-bip39", - "uuid", + "uuid 0.8.2", ] [[package]] @@ -2602,7 +2849,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6085d7fd3cf84bd2b8fec150d54c8467fb491d8db9c460607c5534f653a0ee38" dependencies = [ - "darling", + "darling 0.13.4", "proc-macro2", "quote", "syn 1.0.109", @@ -2712,7 +2959,7 @@ dependencies = [ "getrandom", "hashers", "hex", - "http 0.2.11", + "http 0.2.12", "once_cell", "parking_lot 0.11.2", "pin-project", @@ -2858,9 +3105,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" [[package]] name = "fastrlp" @@ -2901,9 +3148,9 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" +checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" [[package]] name = "field-offset" @@ -2915,6 +3162,12 @@ dependencies = [ "rustc_version 0.4.0", ] +[[package]] +name = "figlet-rs" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4742a071cd9694fc86f9fa1a08fa3e53d40cc899d7ee532295da2d085639fbc5" + [[package]] name = "filesystem" version = "0.1.0" @@ -3020,6 +3273,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "funty" version = "1.1.0" @@ -3093,9 +3352,9 @@ checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ "futures-core", "pin-project-lite", @@ -3109,7 +3368,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -3119,7 +3378,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls", + "rustls 0.21.11", ] [[package]] @@ -3212,9 +3471,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "js-sys", @@ -3235,12 +3494,12 @@ dependencies = [ [[package]] name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", - "polyval 0.6.1", + "polyval 0.6.2", ] [[package]] @@ -3266,7 +3525,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -3339,27 +3598,8 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 0.2.11", - "indexmap 2.2.5", - "slab", - "tokio", - "tokio-util 0.7.10", - "tracing", -] - -[[package]] -name = "h2" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 1.0.0", - "indexmap 2.2.5", + "http 0.2.12", + "indexmap 2.2.6", "slab", "tokio", "tokio-util 0.7.10", @@ -3439,7 +3679,7 @@ dependencies = [ "base64 0.21.7", "bytes", "headers-core", - "http 0.2.11", + "http 0.2.12", "httpdate", "mime", "sha1", @@ -3451,7 +3691,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http 0.2.11", + "http 0.2.12", ] [[package]] @@ -3498,9 +3738,9 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hickory-proto" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "091a6fbccf4860009355e3efc52ff4acf37a63489aad7435372d44ceeb6fbbcf" +checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" dependencies = [ "async-trait", "cfg-if", @@ -3523,9 +3763,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35b8f021164e6a984c9030023544c57789c51760065cd510572fedcfb04164e8" +checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" dependencies = [ "cfg-if", "futures-util", @@ -3567,7 +3807,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ - "crypto-mac 0.11.1", + "crypto-mac 0.11.0", "digest 0.9.0", ] @@ -3604,9 +3844,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -3615,9 +3855,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -3631,7 +3871,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http 0.2.11", + "http 0.2.12", "pin-project-lite", ] @@ -3642,18 +3882,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http 1.0.0", + "http 1.1.0", ] [[package]] name = "http-body-util" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes", - "futures-util", - "http 1.0.0", + "futures-core", + "http 1.1.0", "http-body 1.0.0", "pin-project-lite", ] @@ -3754,8 +3994,8 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", - "http 0.2.11", + "h2", + "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", @@ -3770,15 +4010,14 @@ dependencies = [ [[package]] name = "hyper" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.4", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "httparse", "httpdate", @@ -3795,11 +4034,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.28", - "rustls", + "rustls 0.21.11", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", ] [[package]] @@ -3823,9 +4062,9 @@ checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", - "hyper 1.2.0", + "hyper 1.3.1", "pin-project-lite", "socket2 0.5.6", "tokio", @@ -3919,7 +4158,7 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.28", "log", "rand", @@ -4002,9 +4241,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.5" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -4037,7 +4276,7 @@ version = "0.2.0" dependencies = [ "bytes", "hex", - "yaml-rust", + "serde_yml", ] [[package]] @@ -4067,7 +4306,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ "socket2 0.5.6", - "widestring 1.0.2", + "widestring 1.1.0", "windows-sys 0.48.0", "winreg", ] @@ -4100,9 +4339,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jemalloc-ctl" @@ -4137,18 +4376,18 @@ dependencies = [ [[package]] name = "jobserver" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -4261,7 +4500,7 @@ dependencies = [ "account_utils", "beacon_chain", "bls", - "clap", + "clap 2.34.0", "clap_utils", "deposit_contract", "directory", @@ -4349,12 +4588,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2caa5afb8bf9f3a2652760ce7d4f62d21c4d5a423e68466fca30df82f2330164" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -4509,7 +4748,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" dependencies = [ "asn1_der", - "bs58 0.5.0", + "bs58 0.5.1", "ed25519-dalek", "hkdf", "libsecp256k1", @@ -4640,7 +4879,7 @@ dependencies = [ "quinn", "rand", "ring 0.16.20", - "rustls", + "rustls 0.21.11", "socket2 0.5.6", "thiserror", "tokio", @@ -4679,7 +4918,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -4711,8 +4950,8 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls", - "rustls-webpki", + "rustls 0.21.11", + "rustls-webpki 0.101.7", "thiserror", "x509-parser", "yasna", @@ -4751,13 +4990,12 @@ dependencies = [ [[package]] name = "libredox" -version = "0.0.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "libc", - "redox_syscall 0.4.1", ] [[package]] @@ -4821,9 +5059,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.15" +version = "1.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" dependencies = [ "cc", "pkg-config", @@ -4840,7 +5078,7 @@ dependencies = [ "beacon_processor", "bls", "boot_node", - "clap", + "clap 2.34.0", "clap_utils", "database_manager", "directory", @@ -5148,15 +5386,15 @@ checksum = "8878cd8d1b3c8c8ae4b2ba0a36652b7cf192f618a599a7fbdfa25cffd4ea72dd" [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memoffset" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" dependencies = [ "autocfg", ] @@ -5188,7 +5426,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37cb4045d5677b7da537f8cb5d0730d5b6414e3cc81c61e4b50e1f0cbdc73909" dependencies = [ - "darling", + "darling 0.13.4", "itertools", "proc-macro2", "quote", @@ -5417,9 +5655,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" +checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" dependencies = [ "bytes", "futures", @@ -5492,12 +5730,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", + "cfg_aliases", "libc", ] @@ -5701,7 +5940,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "foreign-types", "libc", @@ -5718,7 +5957,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -5738,9 +5977,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.101" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -5772,6 +6011,16 @@ dependencies = [ "types", ] +[[package]] +name = "os_pipe" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57119c3b893986491ec9aa85056780d3a0f3cf4da7cc09dd3650dbd6c6738fb9" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "overload" version = "0.1.1" @@ -5919,7 +6168,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" dependencies = [ - "crypto-mac 0.11.1", + "crypto-mac 0.11.0", ] [[package]] @@ -5951,11 +6200,11 @@ dependencies = [ [[package]] name = "pem" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "serde", ] @@ -5976,9 +6225,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.8" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f8023d0fb78c8e03784ea1c7f3fa36e68a723138990b8d5a47d916b651e7a8" +checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" dependencies = [ "memchr", "thiserror", @@ -6015,29 +6264,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -6061,7 +6310,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.8", + "der 0.7.9", "spki 0.7.3", ] @@ -6079,9 +6328,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "platforms" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "plotters" @@ -6113,14 +6362,15 @@ dependencies = [ [[package]] name = "polling" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24f040dee2588b4963afb4e420540439d126f73fdacf4a9c486a96d840bac3c9" +checksum = "e0c976a60b2d7e99d6f229e414670a9b85d13ac305cc6d1e9c134de58c5aaaf6" dependencies = [ "cfg-if", "concurrent-queue", + "hermit-abi 0.3.9", "pin-project-lite", - "rustix 0.38.31", + "rustix 0.38.33", "tracing", "windows-sys 0.52.0", ] @@ -6145,14 +6395,14 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.4.1", + "universal-hash 0.4.0", ] [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", @@ -6275,9 +6525,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -6314,9 +6564,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f87c10af16e0af74010d2a123d202e8363c04db5acfa91d8747f64a8524da3a" +checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" dependencies = [ "dtoa", "itoa", @@ -6332,7 +6582,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -6343,13 +6593,13 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.2", + "bitflags 2.5.0", "lazy_static", "num-traits", "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", "rusty-fork", "tempfile", "unarray", @@ -6468,7 +6718,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls", + "rustls 0.21.11", "thiserror", "tokio", "tracing", @@ -6484,7 +6734,7 @@ dependencies = [ "rand", "ring 0.16.20", "rustc-hash", - "rustls", + "rustls 0.21.11", "slab", "thiserror", "tinyvec", @@ -6506,9 +6756,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -6587,9 +6837,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -6611,7 +6861,7 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ - "pem 3.0.3", + "pem 3.0.4", "ring 0.16.20", "time", "yasna", @@ -6637,9 +6887,9 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ "getrandom", "libredox", @@ -6648,14 +6898,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -6669,13 +6919,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -6686,23 +6936,23 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "reqwest" -version = "0.11.24" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2 0.3.26", - "http 0.2.11", + "h2", + "http 0.2.12", "http-body 0.4.6", "hyper 0.14.28", "hyper-rustls", @@ -6715,16 +6965,16 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls", - "rustls-pemfile", + "rustls 0.21.11", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "system-configuration", "tokio", "tokio-native-tls", - "tokio-rustls", + "tokio-rustls 0.24.1", "tokio-util 0.7.10", "tower-service", "url", @@ -6803,6 +7053,30 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" +[[package]] +name = "rlg" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6ccf670238310d5c31a52fed1a3314620d037a64f1e5fbdc71b2c50909134dc" +dependencies = [ + "dtt", + "tokio", + "vrd 0.0.4", +] + +[[package]] +name = "rlg" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e02c717e23f67b23032a4acb01cf63534d6259938d592e6d2451c02f09fc368" +dependencies = [ + "dtt", + "hostname", + "serde_json", + "tokio", + "vrd 0.0.5", +] + [[package]] name = "rlp" version = "0.5.2" @@ -6851,9 +7125,9 @@ dependencies = [ [[package]] name = "ruint" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b1d9521f889713d1221270fdd63370feca7e5c71a18745343402fa86e4f04f" +checksum = "8f308135fef9fc398342da5472ce7c484529df23743fb7c734e0f3d472971e62" dependencies = [ "alloy-rlp", "ark-ff 0.3.0", @@ -6954,11 +7228,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "e3cc72858054fcff6d7dea32df2aeaee6a7c24227366d7ea429aada2f26b16ad" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys 0.4.13", @@ -6967,16 +7241,30 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", "ring 0.17.8", - "rustls-webpki", + "rustls-webpki 0.101.7", "sct", ] +[[package]] +name = "rustls" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring 0.17.8", + "rustls-pki-types", + "rustls-webpki 0.102.2", + "subtle", + "zeroize", +] + [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -6986,6 +7274,22 @@ dependencies = [ "base64 0.21.7", ] +[[package]] +name = "rustls-pemfile" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +dependencies = [ + "base64 0.22.0", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -6996,11 +7300,22 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustls-webpki" +version = "0.102.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +dependencies = [ + "ring 0.17.8", + "rustls-pki-types", + "untrusted 0.9.0", +] + [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" [[package]] name = "rusty-fork" @@ -7055,9 +7370,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.10.0" +version = "2.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" +checksum = "7c453e59a955f81fb62ee5d596b450383d699f152d350e9d23a0db2adb78e4c0" dependencies = [ "cfg-if", "derive_more", @@ -7067,9 +7382,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.10.0" +version = "2.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" +checksum = "18cf6c6447f813ef19eb450e985bcce6705f9ce7660db221b59093d15c79c4b7" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", @@ -7150,7 +7465,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.8", + "der 0.7.9", "generic-array", "pkcs8 0.10.2", "subtle", @@ -7159,9 +7474,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -7172,9 +7487,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -7223,9 +7538,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" dependencies = [ "serde_derive", ] @@ -7252,20 +7567,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "itoa", "ryu", @@ -7274,9 +7589,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd154a240de39fdebcf5775d2675c204d7c13cf39a4c697be6493c8e734337c" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ "itoa", "serde", @@ -7284,13 +7599,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -7330,7 +7645,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", + "darling 0.13.4", "proc-macro2", "quote", "syn 1.0.109", @@ -7338,17 +7653,38 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.32" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "itoa", "ryu", "serde", "unsafe-libyaml", ] +[[package]] +name = "serde_yml" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "196c3750ff0411738366b0d9534ca55fc74d04a3d4284a450039de950bd11938" +dependencies = [ + "dtt", + "env_logger 0.11.3", + "figlet-rs", + "indexmap 2.2.6", + "itoa", + "log", + "openssl", + "rlg 0.0.3", + "ryu", + "serde", + "unsafe-libyaml", + "uuid 1.8.0", + "xtasks", +] + [[package]] name = "sha1" version = "0.10.6" @@ -7425,6 +7761,22 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shared_child" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0d94659ad3c2137fef23ae75b03d5241d633f8acded53d672decfa0e6e0caef" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "shell-words" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" + [[package]] name = "shlex" version = "1.3.0" @@ -7433,9 +7785,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -7476,7 +7828,7 @@ dependencies = [ name = "simulator" version = "0.2.0" dependencies = [ - "clap", + "clap 2.34.0", "env_logger 0.9.3", "eth1", "eth1_test_rig", @@ -7686,9 +8038,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "snap" @@ -7762,7 +8114,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.8", + "der 0.7.9", ] [[package]] @@ -7875,6 +8227,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + [[package]] name = "strum" version = "0.24.1" @@ -7899,9 +8257,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "superstruct" @@ -7909,7 +8267,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" dependencies = [ - "darling", + "darling 0.13.4", "itertools", "proc-macro2", "quote", @@ -7939,9 +8297,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.52" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ "proc-macro2", "quote", @@ -7954,6 +8312,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "synstructure" version = "0.12.6" @@ -8060,7 +8424,7 @@ checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", - "rustix 0.38.31", + "rustix 0.38.33", "windows-sys 0.52.0", ] @@ -8128,22 +8492,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -8167,9 +8531,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -8188,9 +8552,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -8262,15 +8626,16 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", "libc", "mio", "num_cpus", + "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", "socket2 0.5.6", @@ -8296,7 +8661,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -8341,15 +8706,26 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.11", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite", @@ -8424,7 +8800,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", @@ -8437,7 +8813,7 @@ version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "toml_datetime", "winnow", ] @@ -8502,7 +8878,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -8590,7 +8966,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84303a9c7cda5f085a3ed9cd241d1e95e04d88aab1d679b02f212e653537ba86" dependencies = [ - "darling", + "darling 0.13.4", "quote", "syn 1.0.109", ] @@ -8744,9 +9120,9 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" +checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ "generic-array", "subtle", @@ -8764,9 +9140,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "unsigned-varint" @@ -8826,6 +9202,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" version = "0.8.2" @@ -8836,6 +9218,15 @@ dependencies = [ "serde", ] +[[package]] +name = "uuid" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +dependencies = [ + "getrandom", +] + [[package]] name = "validator_client" version = "0.3.5" @@ -8843,7 +9234,7 @@ dependencies = [ "account_utils", "bincode", "bls", - "clap", + "clap 2.34.0", "clap_utils", "deposit_contract", "directory", @@ -8855,7 +9246,7 @@ dependencies = [ "filesystem", "futures", "hex", - "hyper 1.2.0", + "hyper 1.3.1", "itertools", "lazy_static", "libsecp256k1", @@ -8915,7 +9306,7 @@ version = "0.1.0" dependencies = [ "account_utils", "bls", - "clap", + "clap 2.34.0", "clap_utils", "environment", "eth2", @@ -8964,6 +9355,25 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "vrd" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a81b8b5b404f3d7afa1b8142a6bc980c20cd68556c634c3db517871aa0402521" +dependencies = [ + "rand", +] + +[[package]] +name = "vrd" +version = "0.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee1067b8d17481f5be71b59d11c329e955ffe36348907e0a4a41b619682bb4af" +dependencies = [ + "rand", + "serde", +] + [[package]] name = "wait-timeout" version = "0.2.0" @@ -8994,29 +9404,28 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e92e22e03ff1230c03a1a8ee37d2f89cd489e2e541b7550d6afad96faed169" +checksum = "4378d202ff965b011c64817db11d5829506d3404edeadb61f190d111da3f231c" dependencies = [ "bytes", "futures-channel", "futures-util", "headers", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.28", "log", "mime", "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile", + "rustls-pemfile 2.1.2", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", - "tokio-stream", + "tokio-rustls 0.25.0", "tokio-util 0.7.10", "tower-service", "tracing", @@ -9056,9 +9465,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -9066,24 +9475,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -9093,9 +9502,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9103,22 +9512,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-streams" @@ -9157,14 +9566,14 @@ dependencies = [ "beacon_node", "bls", "byteorder", - "clap", + "clap 2.34.0", "diesel", "diesel_migrations", "env_logger 0.9.3", "eth2", "hex", "http_api", - "hyper 1.2.0", + "hyper 1.3.1", "log", "logging", "network", @@ -9185,9 +9594,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -9227,9 +9636,9 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "whoami" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fec781d48b41f8163426ed18e8fc2864c12937df9ce54c88ede7bd47270893e" +checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" dependencies = [ "redox_syscall 0.4.1", "wasite", @@ -9244,9 +9653,9 @@ checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -9316,7 +9725,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -9343,7 +9752,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -9378,17 +9787,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -9405,9 +9815,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -9423,9 +9833,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -9441,9 +9851,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -9459,9 +9875,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -9477,9 +9893,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -9495,9 +9911,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -9513,9 +9929,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -9601,9 +10017,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" +checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" [[package]] name = "xmltree" @@ -9615,12 +10031,23 @@ dependencies = [ ] [[package]] -name = "yaml-rust" -version = "0.4.5" +name = "xtasks" +version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +checksum = "940db5674e301470e6cd91098b2c68a1fad751a1623575d1133f7456146e6d2f" dependencies = [ - "linked-hash-map", + "anyhow", + "clap 4.5.4", + "derive_builder", + "dialoguer", + "dtt", + "duct", + "fs_extra", + "glob", + "rlg 0.0.2", + "serde", + "serde_json", + "vrd 0.0.5", ] [[package]] @@ -9679,7 +10106,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -9699,7 +10126,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -9743,9 +10170,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 24775b728de..69f6835f575 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -174,7 +174,7 @@ tree_hash = "0.5" tree_hash_derive = "0.5" url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } -warp = { version = "0.3.6", default-features = false, features = ["tls"] } +warp = { version = "0.3.7", default-features = false, features = ["tls"] } zeroize = { version = "1", features = ["zeroize_derive"] } zip = "0.6" @@ -182,7 +182,7 @@ zip = "0.6" account_utils = { path = "common/account_utils" } beacon_chain = { path = "beacon_node/beacon_chain" } beacon_node = { path = "beacon_node" } -beacon_processor = { path = "beacon_node/beacon_processor" } +beacon_processor = { path = "beacon_node/beacon_processor" } bls = { path = "crypto/bls" } cached_tree_hash = { path = "consensus/cached_tree_hash" } clap_utils = { path = "common/clap_utils" } @@ -219,7 +219,7 @@ network = { path = "beacon_node/network" } operation_pool = { path = "beacon_node/operation_pool" } pretty_reqwest_error = { path = "common/pretty_reqwest_error" } proto_array = { path = "consensus/proto_array" } -safe_arith = {path = "consensus/safe_arith"} +safe_arith = { path = "consensus/safe_arith" } sensitive_url = { path = "common/sensitive_url" } slasher = { path = "slasher" } slashing_protection = { path = "validator_client/slashing_protection" } diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index 03bec9d3801..1958fac6e56 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } bytes = { workspace = true } [dev-dependencies] -yaml-rust = "0.4.4" hex = { workspace = true } +serde_yml = "0.0.4" diff --git a/consensus/int_to_bytes/src/lib.rs b/consensus/int_to_bytes/src/lib.rs index 589c72d249d..076e7154647 100644 --- a/consensus/int_to_bytes/src/lib.rs +++ b/consensus/int_to_bytes/src/lib.rs @@ -78,8 +78,7 @@ pub fn int_to_bytes96(int: u64) -> Vec { #[cfg(test)] mod tests { use super::*; - use std::{fs::File, io::prelude::*, path::PathBuf}; - use yaml_rust::yaml; + use std::{collections::HashMap, fs::File, io::prelude::*, path::PathBuf}; #[test] fn fixed_bytes32() { @@ -111,14 +110,13 @@ mod tests { file.read_to_string(&mut yaml_str).unwrap(); - let docs = yaml::YamlLoader::load_from_str(&yaml_str).unwrap(); - let doc = &docs[0]; - let test_cases = doc["test_cases"].as_vec().unwrap(); + let docs: HashMap = serde_yml::from_str(&yaml_str).unwrap(); + let test_cases = docs["test_cases"].as_sequence().unwrap(); for test_case in test_cases { let byte_length = test_case["byte_length"].as_i64().unwrap() as u64; let int = test_case["int"].as_i64().unwrap() as u64; - let bytes_string = test_case["bytes"].clone().into_string().unwrap(); + let bytes_string = test_case["bytes"].as_str().unwrap(); let bytes = hex::decode(bytes_string.replace("0x", "")).unwrap(); match byte_length { From 67f8405921750f6a21e5c21d54688ecf31d4e825 Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 23 Apr 2024 01:08:36 +1000 Subject: [PATCH 05/13] Update Simulator tests (#5520) * Rewrite Simulator * Add fallback simulator * Try Sean's test fix * More fixes * Cleanup * Merge branch 'unstable' into update-simulator * Update cli.rs * Add sync sim to basic sim * Formatting * Add fixes and new block production check * Merge branch 'unstable' of https://github.com/sigp/lighthouse into update-simulator * fix compile --- .github/workflows/test-suite.yml | 58 +-- Cargo.lock | 6 +- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/builder.rs | 16 + beacon_node/client/src/config.rs | 5 + .../test_utils/execution_block_generator.rs | 34 +- .../src/test_utils/handle_rpc.rs | 6 + .../execution_layer/src/test_utils/mod.rs | 1 + testing/simulator/Cargo.toml | 5 +- .../src/{eth1_sim.rs => basic_sim.rs} | 274 +++++------- testing/simulator/src/checks.rs | 174 +++++++- testing/simulator/src/cli.rs | 163 ++++---- testing/simulator/src/fallback_sim.rs | 261 ++++++++++++ testing/simulator/src/local_network.rs | 355 ++++++++++------ testing/simulator/src/main.rs | 26 +- testing/simulator/src/no_eth1_sim.rs | 172 -------- testing/simulator/src/sync_sim.rs | 390 ------------------ validator_client/src/block_service.rs | 1 + 18 files changed, 928 insertions(+), 1020 deletions(-) rename testing/simulator/src/{eth1_sim.rs => basic_sim.rs} (51%) create mode 100644 testing/simulator/src/fallback_sim.rs delete mode 100644 testing/simulator/src/no_eth1_sim.rs delete mode 100644 testing/simulator/src/sync_sim.rs diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index a8db6fab8fd..413dd2b95dd 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -225,8 +225,8 @@ jobs: run: docker build --build-arg FEATURES=portable -t lighthouse:local . - name: Test the built image run: docker run -t lighthouse:local lighthouse --version - eth1-simulator-ubuntu: - name: eth1-simulator-ubuntu + basic-simulator-ubuntu: + name: basic-simulator-ubuntu runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -235,42 +235,10 @@ jobs: with: channel: stable cache-target: release - - name: Install Foundry (anvil) - uses: foundry-rs/foundry-toolchain@v1 - with: - version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - - name: Run the beacon chain sim that starts from an eth1 contract - run: cargo run --release --bin simulator eth1-sim - merge-transition-ubuntu: - name: merge-transition-ubuntu - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Get latest version of stable Rust - uses: moonrepo/setup-rust@v1 - with: - channel: stable - cache-target: release - - name: Install Foundry (anvil) - uses: foundry-rs/foundry-toolchain@v1 - with: - version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - - name: Run the beacon chain sim and go through the merge transition - run: cargo run --release --bin simulator eth1-sim --post-merge - no-eth1-simulator-ubuntu: - name: no-eth1-simulator-ubuntu - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Get latest version of stable Rust - uses: moonrepo/setup-rust@v1 - with: - channel: stable - cache-target: release - - name: Run the beacon chain sim without an eth1 connection - run: cargo run --release --bin simulator no-eth1-sim - syncing-simulator-ubuntu: - name: syncing-simulator-ubuntu + - name: Run a basic beacon chain sim that starts from Bellatrix + run: cargo run --release --bin simulator basic-sim + fallback-simulator-ubuntu: + name: fallback-simulator-ubuntu runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -279,12 +247,8 @@ jobs: with: channel: stable cache-target: release - - name: Install Foundry (anvil) - uses: foundry-rs/foundry-toolchain@v1 - with: - version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - - name: Run the syncing simulator - run: cargo run --release --bin simulator syncing-sim + - name: Run a beacon chain sim which tests VC fallback behaviour + run: cargo run --release --bin simulator fallback-sim doppelganger-protection-test: name: doppelganger-protection-test runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} @@ -442,10 +406,8 @@ jobs: 'state-transition-vectors-ubuntu', 'ef-tests-ubuntu', 'dockerfile-ubuntu', - 'eth1-simulator-ubuntu', - 'merge-transition-ubuntu', - 'no-eth1-simulator-ubuntu', - 'syncing-simulator-ubuntu', + 'basic-simulator-ubuntu', + 'fallback-simulator-ubuntu', 'doppelganger-protection-test', 'execution-engine-integration-ubuntu', 'check-code', diff --git a/Cargo.lock b/Cargo.lock index 79be0a55f18..4e3557c7efb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1420,6 +1420,7 @@ dependencies = [ "time", "timer", "tokio", + "tree_hash", "types", ] @@ -7831,13 +7832,16 @@ dependencies = [ "clap 2.34.0", "env_logger 0.9.3", "eth1", - "eth1_test_rig", + "eth2_network_config", + "ethereum-types 0.14.1", "execution_layer", "futures", "node_test_rig", "parking_lot 0.12.1", "rayon", "sensitive_url", + "serde_json", + "ssz_types", "tokio", "types", ] diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 03cbcc9ff7f..16c4a947a66 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -46,3 +46,4 @@ execution_layer = { workspace = true } beacon_processor = { workspace = true } num_cpus = { workspace = true } ethereum_ssz = { workspace = true } +tree_hash = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 6c505751542..e7f201b8521 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -26,6 +26,7 @@ use eth2::{ types::{BlockId, StateId}, BeaconNodeHttpClient, Error as ApiError, Timeouts, }; +use execution_layer::test_utils::generate_genesis_header; use execution_layer::ExecutionLayer; use futures::channel::mpsc::Receiver; use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH}; @@ -267,6 +268,21 @@ where )?; builder.genesis_state(genesis_state).map(|v| (v, None))? } + ClientGenesis::InteropMerge { + validator_count, + genesis_time, + } => { + let execution_payload_header = generate_genesis_header(&spec, true); + let keypairs = generate_deterministic_keypairs(validator_count); + let genesis_state = interop_genesis_state( + &keypairs, + genesis_time, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + execution_payload_header, + &spec, + )?; + builder.genesis_state(genesis_state).map(|v| (v, None))? + } ClientGenesis::GenesisState => { info!( context.log(), diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 48ad77abc58..a441e2c186c 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -24,6 +24,11 @@ pub enum ClientGenesis { validator_count: usize, genesis_time: u64, }, + // Creates a genesis state similar to the 2019 Canada specs, but starting post-Merge. + InteropMerge { + validator_count: usize, + genesis_time: u64, + }, /// Reads the genesis state and other persisted data from the `Store`. FromStore, /// Connects to an eth1 node and waits until it can create the genesis state from the deposit diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 08f8ca68435..bac2304fa85 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -91,7 +91,14 @@ impl Block { pub fn as_execution_block_with_tx(&self) -> Option> { match self { Block::PoS(payload) => Some(payload.clone().try_into().unwrap()), - Block::PoW(_) => None, + Block::PoW(block) => Some( + ExecutionPayload::Merge(ExecutionPayloadMerge { + block_hash: block.block_hash, + ..Default::default() + }) + .try_into() + .unwrap(), + ), } } } @@ -190,6 +197,19 @@ impl ExecutionBlockGenerator { .map(|block| block.as_execution_block(self.terminal_total_difficulty)) } + pub fn genesis_block(&self) -> Option> { + if let Some(genesis_block_hash) = self.block_hashes.get(&0) { + self.blocks.get(genesis_block_hash.first()?).cloned() + } else { + None + } + } + + pub fn genesis_execution_block(&self) -> Option { + self.genesis_block() + .map(|block| block.as_execution_block(self.terminal_total_difficulty)) + } + pub fn block_by_number(&self, number: u64) -> Option> { // Get the latest canonical head block let mut latest_block = self.latest_block()?; @@ -502,13 +522,6 @@ impl ExecutionBlockGenerator { let id = match payload_attributes { None => None, Some(attributes) => { - if !self.blocks.iter().any(|(_, block)| { - block.block_hash() == self.terminal_block_hash - || block.block_number() == self.terminal_block_number - }) { - return Err("refusing to create payload id before terminal block".to_string()); - } - let parent = self .blocks .get(&head_block_hash) @@ -766,12 +779,14 @@ pub fn generate_genesis_header( generate_genesis_block(spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK) .ok() .map(|block| block.block_hash); + let empty_transactions_root = Transactions::::empty().tree_hash_root(); match genesis_fork { ForkName::Base | ForkName::Altair => None, ForkName::Merge => { if post_transition_merge { let mut header = ExecutionPayloadHeader::Merge(<_>::default()); *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + *header.transactions_root_mut() = empty_transactions_root; Some(header) } else { Some(ExecutionPayloadHeader::::Merge(<_>::default())) @@ -780,16 +795,19 @@ pub fn generate_genesis_header( ForkName::Capella => { let mut header = ExecutionPayloadHeader::Capella(<_>::default()); *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + *header.transactions_root_mut() = empty_transactions_root; Some(header) } ForkName::Deneb => { let mut header = ExecutionPayloadHeader::Deneb(<_>::default()); *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + *header.transactions_root_mut() = empty_transactions_root; Some(header) } ForkName::Electra => { let mut header = ExecutionPayloadHeader::Electra(<_>::default()); *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + *header.transactions_root_mut() = empty_transactions_root; Some(header) } } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 77d972ab88e..e0ca07dcc6e 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -49,6 +49,12 @@ pub async fn handle_rpc( .latest_execution_block(), ) .unwrap()), + "0x0" => Ok(serde_json::to_value( + ctx.execution_block_generator + .read() + .genesis_execution_block(), + ) + .unwrap()), other => Err(( format!("The tag {} is not supported", other), BAD_PARAMS_ERROR_CODE, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index eeb449b9920..43a6ee8ac22 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -58,6 +58,7 @@ mod mock_builder; mod mock_execution_layer; /// Configuration for the MockExecutionLayer. +#[derive(Clone)] pub struct MockExecutionConfig { pub server_config: Config, pub jwt_key: JwtKey, diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index eadcaf51b20..d7ff7b3dd85 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -14,8 +14,11 @@ types = { workspace = true } parking_lot = { workspace = true } futures = { workspace = true } tokio = { workspace = true } -eth1_test_rig = { workspace = true } env_logger = { workspace = true } clap = { workspace = true } rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } +ssz_types = { workspace = true } +ethereum-types = { workspace = true } +eth2_network_config = { workspace = true } +serde_json = { workspace = true } diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/basic_sim.rs similarity index 51% rename from testing/simulator/src/eth1_sim.rs rename to testing/simulator/src/basic_sim.rs index 20c7c9ce9ab..755bb71b430 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -1,50 +1,48 @@ -use crate::local_network::{EXECUTION_PORT, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; +use crate::local_network::LocalNetworkParams; +use crate::local_network::TERMINAL_BLOCK; use crate::{checks, LocalNetwork}; use clap::ArgMatches; -use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; -use eth1_test_rig::AnvilEth1Instance; use crate::retry::with_retry; -use execution_layer::http::deposit_methods::Eth1Id; use futures::prelude::*; -use node_test_rig::environment::RuntimeContext; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, - testing_client_config, testing_validator_config, ApiTopic, ClientConfig, ClientGenesis, - ValidatorFiles, + testing_validator_config, ApiTopic, ValidatorFiles, }; use rayon::prelude::*; -use sensitive_url::SensitiveUrl; use std::cmp::max; -use std::net::Ipv4Addr; use std::time::Duration; use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; const END_EPOCH: u64 = 16; -const ALTAIR_FORK_EPOCH: u64 = 1; -const BELLATRIX_FORK_EPOCH: u64 = 2; +const GENESIS_DELAY: u64 = 32; +const ALTAIR_FORK_EPOCH: u64 = 0; +const BELLATRIX_FORK_EPOCH: u64 = 0; +const CAPELLA_FORK_EPOCH: u64 = 1; +const DENEB_FORK_EPOCH: u64 = 2; +//const ELECTRA_FORK_EPOCH: u64 = 3; const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; -pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { - let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); - let proposer_nodes = value_t!(matches, "proposer-nodes", usize).unwrap_or(0); - println!("PROPOSER-NODES: {}", proposer_nodes); - let validators_per_node = value_t!(matches, "validators_per_node", usize) - .expect("missing validators_per_node default"); +pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { + let node_count = value_t!(matches, "nodes", usize).expect("Missing nodes default"); + let proposer_nodes = + value_t!(matches, "proposer-nodes", usize).expect("Missing proposer-nodes default"); + let validators_per_node = value_t!(matches, "validators-per-node", usize) + .expect("Missing validators-per-node default"); let speed_up_factor = - value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default"); - let continue_after_checks = matches.is_present("continue_after_checks"); - let post_merge_sim = matches.is_present("post-merge"); + value_t!(matches, "speed-up-factor", u64).expect("Missing speed-up-factor default"); + let log_level = value_t!(matches, "debug-level", String).expect("Missing default log-level"); + let continue_after_checks = matches.is_present("continue-after-checks"); - println!("Beacon Chain Simulator:"); - println!(" nodes:{}, proposer_nodes: {}", node_count, proposer_nodes); - - println!(" validators_per_node:{}", validators_per_node); - println!(" post merge simulation:{}", post_merge_sim); - println!(" continue_after_checks:{}", continue_after_checks); + println!("Basic Simulator:"); + println!(" nodes: {}", node_count); + println!(" proposer-nodes: {}", proposer_nodes); + println!(" validators-per-node: {}", validators_per_node); + println!(" speed-up-factor: {}", speed_up_factor); + println!(" continue-after-checks: {}", continue_after_checks); // Generate the directories and keystores required for the validator clients. let validator_files = (0..node_count) @@ -65,8 +63,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let mut env = EnvironmentBuilder::minimal() .initialize_logger(LoggerConfig { path: None, - debug_level: String::from("debug"), - logfile_debug_level: String::from("debug"), + debug_level: log_level.clone(), + logfile_debug_level: log_level, log_format: None, logfile_format: None, log_color: false, @@ -80,32 +78,29 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { .multi_threaded_tokio_runtime()? .build()?; - let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor); - let spec = &mut env.eth2_config.spec; let total_validator_count = validators_per_node * node_count; - let altair_fork_version = spec.altair_fork_version; - let bellatrix_fork_version = spec.bellatrix_fork_version; + let genesis_delay = GENESIS_DELAY; + + // Convenience variables. Update these values when adding a newer fork. + let latest_fork_version = spec.deneb_fork_version; + let latest_fork_start_epoch = DENEB_FORK_EPOCH; spec.seconds_per_slot /= speed_up_factor; spec.seconds_per_slot = max(1, spec.seconds_per_slot); - spec.eth1_follow_distance = 16; - spec.genesis_delay = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2; + spec.genesis_delay = genesis_delay; spec.min_genesis_time = 0; spec.min_genesis_active_validator_count = total_validator_count as u64; - spec.seconds_per_eth1_block = eth1_block_time.as_secs(); spec.altair_fork_epoch = Some(Epoch::new(ALTAIR_FORK_EPOCH)); - // Set these parameters only if we are doing a merge simulation - if post_merge_sim { - spec.terminal_total_difficulty = TERMINAL_DIFFICULTY.into(); - spec.bellatrix_fork_epoch = Some(Epoch::new(BELLATRIX_FORK_EPOCH)); - } + spec.bellatrix_fork_epoch = Some(Epoch::new(BELLATRIX_FORK_EPOCH)); + spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); + spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); + //spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); - let seconds_per_slot = spec.seconds_per_slot; let slot_duration = Duration::from_secs(spec.seconds_per_slot); + let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); let initial_validator_count = spec.min_genesis_active_validator_count as usize; - let deposit_amount = env.eth2_config.spec.max_effective_balance; let context = env.core_context(); @@ -114,36 +109,36 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * Create a new `LocalNetwork` with one beacon node. */ let max_retries = 3; - let (network, beacon_config) = with_retry(max_retries, || { - Box::pin(create_local_network( + let (network, beacon_config, mock_execution_config) = with_retry(max_retries, || { + Box::pin(LocalNetwork::create_local_network( + None, + None, LocalNetworkParams { - eth1_block_time, - total_validator_count, - deposit_amount, + validator_count: total_validator_count, node_count, proposer_nodes, - post_merge_sim, + genesis_delay, }, context.clone(), )) }) .await?; - /* - * One by one, add beacon nodes to the network. - */ - for _ in 0..node_count - 1 { + // Add nodes to the network. + for _ in 0..node_count { network - .add_beacon_node(beacon_config.clone(), false) + .add_beacon_node(beacon_config.clone(), mock_execution_config.clone(), false) .await?; } /* * One by one, add proposer nodes to the network. */ - for _ in 0..proposer_nodes - 1 { + for _ in 0..proposer_nodes { println!("Adding a proposer node"); - network.add_beacon_node(beacon_config.clone(), true).await?; + network + .add_beacon_node(beacon_config.clone(), mock_execution_config.clone(), true) + .await?; } /* @@ -156,9 +151,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { executor.spawn( async move { let mut validator_config = testing_validator_config(); - if post_merge_sim { - validator_config.fee_recipient = Some(SUGGESTED_FEE_RECIPIENT.into()); - } + validator_config.fee_recipient = Some(SUGGESTED_FEE_RECIPIENT.into()); println!("Adding validator client {}", i); // Enable broadcast on every 4th node. @@ -175,7 +168,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { .await } else { network_1 - .add_validator_client(validator_config, i, files, i % 2 == 0) + .add_validator_client(validator_config, i, files) .await } .expect("should add validator"); @@ -184,25 +177,15 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { ); } + // Set all payloads as valid. This effectively assumes the EL is infalliable. + network.execution_nodes.write().iter().for_each(|node| { + node.server.all_payloads_valid(); + }); + let duration_to_genesis = network.duration_to_genesis().await; println!("Duration to genesis: {}", duration_to_genesis.as_secs()); sleep(duration_to_genesis).await; - if post_merge_sim { - let executor = executor.clone(); - let network_2 = network.clone(); - executor.spawn( - async move { - println!("Mining pow blocks"); - let mut interval = tokio::time::interval(Duration::from_secs(seconds_per_slot)); - for i in 1..=TERMINAL_BLOCK + 1 { - interval.tick().await; - let _ = network_2.mine_pow_blocks(i); - } - }, - "pow_mining", - ); - } /* * Start the checks that ensure the network performs as expected. * @@ -211,6 +194,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * tests start at the right time. Whilst this is works well for now, it's subject to * breakage by changes to the VC. */ + let network_1 = network.clone(); let ( finalization, @@ -221,13 +205,16 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { sync_aggregate, transition, light_client_update, + blobs, + start_node_with_delay, + sync, ) = futures::join!( // Check that the chain finalizes at the first given opportunity. checks::verify_first_finalization(network.clone(), slot_duration), // Check that a block is produced at every slot. checks::verify_full_block_production_up_to( network.clone(), - Epoch::new(END_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()), + Epoch::new(END_EPOCH).start_slot(slots_per_epoch), slot_duration, ), // Check that the chain starts with the expected validator count. @@ -246,41 +233,55 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { // Check that all nodes have transitioned to the required fork. checks::verify_fork_version( network.clone(), - if post_merge_sim { - Epoch::new(BELLATRIX_FORK_EPOCH) - } else { - Epoch::new(ALTAIR_FORK_EPOCH) - }, + Epoch::new(latest_fork_start_epoch), slot_duration, - if post_merge_sim { - bellatrix_fork_version - } else { - altair_fork_version - } + latest_fork_version, ), // Check that all sync aggregates are full. checks::verify_full_sync_aggregates_up_to( network.clone(), // Start checking for sync_aggregates at `FORK_EPOCH + 1` to account for // inefficiencies in finding subnet peers at the `fork_slot`. - Epoch::new(ALTAIR_FORK_EPOCH + 1).start_slot(MinimalEthSpec::slots_per_epoch()), - Epoch::new(END_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()), + Epoch::new(ALTAIR_FORK_EPOCH + 1).start_slot(slots_per_epoch), + Epoch::new(END_EPOCH).start_slot(slots_per_epoch), slot_duration, ), // Check that the transition block is finalized. checks::verify_transition_block_finalized( network.clone(), - Epoch::new(TERMINAL_BLOCK / MinimalEthSpec::slots_per_epoch()), + Epoch::new(TERMINAL_BLOCK / slots_per_epoch), slot_duration, - post_merge_sim + true, ), checks::verify_light_client_updates( network.clone(), // Sync aggregate available from slot 1 after Altair fork transition. - Epoch::new(ALTAIR_FORK_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()) + 1, - Epoch::new(END_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()), + Epoch::new(ALTAIR_FORK_EPOCH).start_slot(slots_per_epoch) + 1, + Epoch::new(END_EPOCH).start_slot(slots_per_epoch), slot_duration - ) + ), + checks::verify_full_blob_production_up_to( + network.clone(), + // Blobs should be available immediately after the Deneb fork. + Epoch::new(DENEB_FORK_EPOCH).start_slot(slots_per_epoch), + Epoch::new(END_EPOCH).start_slot(slots_per_epoch), + slot_duration + ), + network_1.add_beacon_node_with_delay( + beacon_config.clone(), + mock_execution_config.clone(), + END_EPOCH - 1, + slot_duration, + slots_per_epoch + ), + checks::ensure_node_synced_up_to_slot( + network.clone(), + // This must be set to be the node which was just created. Should be equal to + // `node_count`. + node_count, + Epoch::new(END_EPOCH).start_slot(slots_per_epoch), + slot_duration, + ), ); block_prod?; @@ -291,6 +292,9 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { sync_aggregate?; transition?; light_client_update?; + blobs?; + start_node_with_delay?; + sync?; // The `final_future` either completes immediately or never completes, depending on the value // of `continue_after_checks`. @@ -321,89 +325,3 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { Ok(()) } - -struct LocalNetworkParams { - eth1_block_time: Duration, - total_validator_count: usize, - deposit_amount: u64, - node_count: usize, - proposer_nodes: usize, - post_merge_sim: bool, -} - -async fn create_local_network( - LocalNetworkParams { - eth1_block_time, - total_validator_count, - deposit_amount, - node_count, - proposer_nodes, - post_merge_sim, - }: LocalNetworkParams, - context: RuntimeContext, -) -> Result<(LocalNetwork, ClientConfig), String> { - /* - * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit - * validators. - */ - let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; - let deposit_contract = anvil_eth1_instance.deposit_contract; - let chain_id = anvil_eth1_instance.anvil.chain_id(); - let anvil = anvil_eth1_instance.anvil; - let eth1_endpoint = - SensitiveUrl::parse(anvil.endpoint().as_str()).expect("Unable to parse anvil endpoint."); - let deposit_contract_address = deposit_contract.address(); - - // Start a timer that produces eth1 blocks on an interval. - tokio::spawn(async move { - let mut interval = tokio::time::interval(eth1_block_time); - loop { - interval.tick().await; - let _ = anvil.evm_mine().await; - } - }); - - // Submit deposits to the deposit contract. - tokio::spawn(async move { - for i in 0..total_validator_count { - println!("Submitting deposit for validator {}...", i); - let _ = deposit_contract - .deposit_deterministic_async::(i, deposit_amount) - .await; - } - }); - - let mut beacon_config = testing_client_config(); - - beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint); - beacon_config.eth1.deposit_contract_address = deposit_contract_address; - beacon_config.eth1.deposit_contract_deploy_block = 0; - beacon_config.eth1.lowest_cached_block_number = 0; - beacon_config.eth1.follow_distance = 1; - beacon_config.eth1.node_far_behind_seconds = 20; - beacon_config.dummy_eth1_backend = false; - beacon_config.sync_eth1_chain = true; - beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; - beacon_config.eth1.chain_id = Eth1Id::from(chain_id); - beacon_config.network.target_peers = node_count + proposer_nodes - 1; - - beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); - beacon_config.network.enable_light_client_server = true; - beacon_config.chain.enable_light_client_server = true; - beacon_config.http_api.enable_light_client_server = true; - - if post_merge_sim { - let el_config = execution_layer::Config { - execution_endpoint: Some( - SensitiveUrl::parse(&format!("http://localhost:{}", EXECUTION_PORT)).unwrap(), - ), - ..Default::default() - }; - - beacon_config.execution_layer = Some(el_config); - } - - let network = LocalNetwork::new(context, beacon_config.clone()).await?; - Ok((network, beacon_config)) -} diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index d30e44a1174..03cc17fab3e 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,7 +1,7 @@ use crate::local_network::LocalNetwork; use node_test_rig::eth2::types::{BlockId, FinalityCheckpointsData, StateId}; use std::time::Duration; -use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, Slot, Unsigned}; +use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Slot, Unsigned}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. @@ -234,7 +234,7 @@ pub async fn verify_transition_block_finalized( } let first = block_hashes[0]; - if first.into_root() != Hash256::zero() && block_hashes.iter().all(|&item| item == first) { + if block_hashes.iter().all(|&item| item == first) { Ok(()) } else { Err(format!( @@ -333,3 +333,173 @@ pub(crate) async fn verify_light_client_updates( Ok(()) } + +/// Checks that a node is synced with the network. +/// Useful for ensuring that a node which started after genesis is able to sync to the head. +pub async fn ensure_node_synced_up_to_slot( + network: LocalNetwork, + node_index: usize, + upto_slot: Slot, + slot_duration: Duration, +) -> Result<(), String> { + slot_delay(upto_slot, slot_duration).await; + let node = &network + .remote_nodes()? + .get(node_index) + .expect("Should get node") + .clone(); + + let head = node + .get_beacon_blocks::(BlockId::Head) + .await + .ok() + .flatten() + .ok_or(format!("No head block exists on node {node_index}"))? + .data; + + // Check the head block is synced with the rest of the network. + if head.slot() >= upto_slot { + Ok(()) + } else { + Err(format!( + "Head not synced for node {node_index}. Found {}; Should be {upto_slot}", + head.slot() + )) + } +} + +/// Verifies that there's been blobs produced at every slot with a block from `blob_start_slot` up +/// to and including `upto_slot`. +pub async fn verify_full_blob_production_up_to( + network: LocalNetwork, + blob_start_slot: Slot, + upto_slot: Slot, + slot_duration: Duration, +) -> Result<(), String> { + slot_delay(upto_slot, slot_duration).await; + let remote_nodes = network.remote_nodes()?; + let remote_node = remote_nodes.first().unwrap(); + + for slot in blob_start_slot.as_u64()..=upto_slot.as_u64() { + // Ensure block exists. + let block = remote_node + .get_beacon_blocks::(BlockId::Slot(Slot::new(slot))) + .await + .ok() + .flatten(); + + // Only check blobs if the block exists. If you also want to ensure full block production, use + // the `verify_full_block_production_up_to` function. + if block.is_some() { + remote_node + .get_blobs::(BlockId::Slot(Slot::new(slot)), None) + .await + .map_err(|e| format!("Failed to get blobs at slot {slot:?}: {e:?}"))? + .ok_or_else(|| format!("No blobs available at slot {slot:?}"))?; + } + } + + Ok(()) +} + +// Causes the beacon node at `node_index` to disconnect from the execution layer. +pub async fn disconnect_from_execution_layer( + network: LocalNetwork, + node_index: usize, +) -> Result<(), String> { + eprintln!("Disabling Execution Node {node_index}"); + + // Force the execution node to return the `syncing` status. + network.execution_nodes.read()[node_index] + .server + .all_payloads_syncing(false); + Ok(()) +} + +// Causes the beacon node at `node_index` to reconnect from the execution layer. +pub async fn reconnect_to_execution_layer( + network: LocalNetwork, + node_index: usize, +) -> Result<(), String> { + network.execution_nodes.read()[node_index] + .server + .all_payloads_valid(); + + eprintln!("Enabling Execution Node {node_index}"); + Ok(()) +} + +/// Ensure all validators have attested correctly. +pub async fn check_attestation_correctness( + network: LocalNetwork, + start_epoch: u64, + upto_epoch: u64, + slot_duration: Duration, + // Select which node to query. Will use this node to determine the global network performance. + node_index: usize, + acceptable_attestation_performance: f64, +) -> Result<(), String> { + epoch_delay(Epoch::new(upto_epoch), slot_duration, E::slots_per_epoch()).await; + + let remote_node = &network.remote_nodes()?[node_index]; + + let results = remote_node + .get_lighthouse_analysis_attestation_performance( + Epoch::new(start_epoch), + Epoch::new(upto_epoch - 2), + "global".to_string(), + ) + .await + .map_err(|e| format!("Unable to get attestation performance: {e}"))?; + + let mut active_successes: f64 = 0.0; + let mut head_successes: f64 = 0.0; + let mut target_successes: f64 = 0.0; + let mut source_successes: f64 = 0.0; + + let mut total: f64 = 0.0; + + for result in results { + for epochs in result.epochs.values() { + total += 1.0; + + if epochs.active { + active_successes += 1.0; + } + if epochs.head { + head_successes += 1.0; + } + if epochs.target { + target_successes += 1.0; + } + if epochs.source { + source_successes += 1.0; + } + } + } + let active_percent = active_successes / total * 100.0; + let head_percent = head_successes / total * 100.0; + let target_percent = target_successes / total * 100.0; + let source_percent = source_successes / total * 100.0; + + eprintln!("Total Attestations: {}", total); + eprintln!("Active: {}: {}%", active_successes, active_percent); + eprintln!("Head: {}: {}%", head_successes, head_percent); + eprintln!("Target: {}: {}%", target_successes, target_percent); + eprintln!("Source: {}: {}%", source_successes, source_percent); + + if active_percent < acceptable_attestation_performance { + return Err("Active percent was below required level".to_string()); + } + if head_percent < acceptable_attestation_performance { + return Err("Head percent was below required level".to_string()); + } + if target_percent < acceptable_attestation_performance { + return Err("Target percent was below required level".to_string()); + } + if source_percent < acceptable_attestation_performance { + return Err("Source percent was below required level".to_string()); + } + + Ok(()) +} diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index ff80201051f..00af7e560ce 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -6,120 +6,121 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .author("Sigma Prime ") .about("Options for interacting with simulator") .subcommand( - SubCommand::with_name("eth1-sim") - .about( - "Lighthouse Beacon Chain Simulator creates `n` beacon node and validator clients, \ - each with `v` validators. A deposit contract is deployed at the start of the \ - simulation using a local `anvil` instance (you must have `anvil` \ - installed and avaliable on your path). All beacon nodes independently listen \ - for genesis from the deposit contract, then start operating. \ - \ + SubCommand::with_name("basic-sim") + .about( + "Runs a Beacon Chain simulation with `n` beacon node and validator clients, \ + each with `v` validators. \ + The simulation runs with a post-Merge Genesis using `mock-el`. \ As the simulation runs, there are checks made to ensure that all components \ are running correctly. If any of these checks fail, the simulation will \ exit immediately.", - ) - .arg(Arg::with_name("nodes") + ) + .arg( + Arg::with_name("nodes") .short("n") .long("nodes") .takes_value(true) - .default_value("4") - .help("Number of beacon nodes")) - .arg(Arg::with_name("proposer-nodes") + .default_value("3") + .help("Number of beacon nodes"), + ) + .arg( + Arg::with_name("proposer-nodes") .short("p") - .long("proposer_nodes") + .long("proposer-nodes") .takes_value(true) - .default_value("2") - .help("Number of proposer-only beacon nodes")) - .arg(Arg::with_name("validators_per_node") + .default_value("3") + .help("Number of proposer-only beacon nodes"), + ) + .arg( + Arg::with_name("validators-per-node") .short("v") - .long("validators_per_node") + .long("validators-per-node") .takes_value(true) .default_value("20") - .help("Number of validators")) - .arg(Arg::with_name("speed_up_factor") + .help("Number of validators"), + ) + .arg( + Arg::with_name("speed-up-factor") .short("s") - .long("speed_up_factor") + .long("speed-up-factor") .takes_value(true) .default_value("3") - .help("Speed up factor. Please use a divisor of 12.")) - .arg(Arg::with_name("post-merge") - .short("m") - .long("post-merge") - .takes_value(false) - .help("Simulate the merge transition")) - .arg(Arg::with_name("continue_after_checks") + .help("Speed up factor. Please use a divisor of 12."), + ) + .arg( + Arg::with_name("debug-level") + .short("d") + .long("debug-level") + .takes_value(true) + .default_value("debug") + .help("Set the severity level of the logs."), + ) + .arg( + Arg::with_name("continue-after-checks") .short("c") .long("continue_after_checks") .takes_value(false) - .help("Continue after checks (default false)")) + .help("Continue after checks (default false)"), + ), ) .subcommand( - SubCommand::with_name("no-eth1-sim") - .about("Runs a simulator that bypasses the eth1 chain. Useful for faster testing of - components that don't rely upon eth1") - .arg(Arg::with_name("nodes") - .short("n") - .long("nodes") + SubCommand::with_name("fallback-sim") + .about( + "Runs a Beacon Chain simulation with `c` validator clients where each VC is \ + connected to `b` beacon nodes with `v` validators. \ + During the simulation, all but the last connected BN for each VC are \ + disconnected from the execution layer, which causes the VC to fallback to the \ + single remaining BN. \ + At the end of the simulation, there are checks made to ensure that all VCs \ + efficiently performed this fallback, within a certain tolerance. \ + Otherwise, the simulation will exit and an error will be reported.", + ) + .arg( + Arg::with_name("vc-count") + .short("c") + .long("vc-count") .takes_value(true) - .default_value("4") - .help("Number of beacon nodes")) - .arg(Arg::with_name("proposer-nodes") - .short("p") - .long("proposer_nodes") + .default_value("3") + .help("Number of validator clients."), + ) + .arg( + Arg::with_name("bns-per-vc") + .short("b") + .long("bns-per-vc") .takes_value(true) .default_value("2") - .help("Number of proposer-only beacon nodes")) - .arg(Arg::with_name("validators_per_node") + .help("Number of beacon nodes per validator client."), + ) + .arg( + Arg::with_name("validators-per-vc") .short("v") - .long("validators_per_node") + .long("validators-per-vc") .takes_value(true) .default_value("20") - .help("Number of validators")) - .arg(Arg::with_name("speed_up_factor") - .short("s") - .long("speed_up_factor") - .takes_value(true) - .default_value("3") - .help("Speed up factor")) - .arg(Arg::with_name("continue_after_checks") - .short("c") - .long("continue_after_checks") - .takes_value(false) - .help("Continue after checks (default false)")) - ) - .subcommand( - SubCommand::with_name("syncing-sim") - .about("Run the syncing simulation") - .arg( - Arg::with_name("speedup") - .short("s") - .long("speedup") - .takes_value(true) - .default_value("15") - .help("Speed up factor for eth1 blocks and slot production"), + .help("Number of validators per client."), ) .arg( - Arg::with_name("initial_delay") - .short("i") - .long("initial_delay") + Arg::with_name("speed-up-factor") + .short("s") + .long("speed-up-factor") .takes_value(true) - .default_value("5") - .help("Epoch delay for new beacon node to start syncing"), + .default_value("3") + .help("Speed up factor. Please use a divisor of 12."), ) .arg( - Arg::with_name("sync_timeout") - .long("sync_timeout") + Arg::with_name("debug-level") + .short("d") + .long("debug-level") .takes_value(true) - .default_value("10") - .help("Number of epochs after which newly added beacon nodes must be synced"), + .default_value("debug") + .help("Set the severity level of the logs."), ) .arg( - Arg::with_name("strategy") - .long("strategy") - .takes_value(true) - .default_value("all") - .possible_values(&["one-node", "two-nodes", "mixed", "all"]) - .help("Sync verification strategy to run."), + Arg::with_name("continue-after-checks") + .short("c") + .long("continue_after_checks") + .takes_value(false) + .help("Continue after checks (default false)"), ), ) } diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs new file mode 100644 index 00000000000..c9deeba04d9 --- /dev/null +++ b/testing/simulator/src/fallback_sim.rs @@ -0,0 +1,261 @@ +use crate::local_network::LocalNetworkParams; +use crate::{checks, LocalNetwork}; +use clap::ArgMatches; + +use crate::retry::with_retry; +use futures::prelude::*; +use node_test_rig::{ + environment::{EnvironmentBuilder, LoggerConfig}, + testing_validator_config, ValidatorFiles, +}; +use rayon::prelude::*; +use std::cmp::max; +use std::time::Duration; +use tokio::time::sleep; +use types::{Epoch, EthSpec, MinimalEthSpec}; + +const END_EPOCH: u64 = 16; +const GENESIS_DELAY: u64 = 32; +const ALTAIR_FORK_EPOCH: u64 = 0; +const BELLATRIX_FORK_EPOCH: u64 = 0; +const CAPELLA_FORK_EPOCH: u64 = 1; +const DENEB_FORK_EPOCH: u64 = 2; +//const ELECTRA_FORK_EPOCH: u64 = 3; + +// Since simulator tests are non-deterministic and there is a non-zero chance of missed +// attestations, define an acceptable network-wide attestation performance. +// +// This has potential to block CI so it should be set conservatively enough that spurious failures +// don't become very common, but not so conservatively that regressions to the fallback mechanism +// cannot be detected. +const ACCEPTABLE_FALLBACK_ATTESTATION_HIT_PERCENTAGE: f64 = 85.0; + +const SUGGESTED_FEE_RECIPIENT: [u8; 20] = + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; + +pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { + let vc_count = value_t!(matches, "vc-count", usize).expect("Missing validator-count default"); + let validators_per_vc = + value_t!(matches, "validators-per-vc", usize).expect("Missing validators-per-vc default"); + let bns_per_vc = value_t!(matches, "bns-per-vc", usize).expect("Missing bns-per-vc default"); + assert!(bns_per_vc > 1); + let speed_up_factor = + value_t!(matches, "speed-up-factor", u64).expect("Missing speed-up-factor default"); + let log_level = value_t!(matches, "debug-level", String).expect("Missing default log-level"); + let continue_after_checks = matches.is_present("continue-after-checks"); + + println!("Fallback Simulator:"); + println!(" vc-count: {}", vc_count); + println!(" validators-per-vc: {}", validators_per_vc); + println!(" bns-per-vc: {}", bns_per_vc); + println!(" speed-up-factor: {}", speed_up_factor); + println!(" continue-after-checks: {}", continue_after_checks); + + // Generate the directories and keystores required for the validator clients. + let validator_files = (0..vc_count) + .into_par_iter() + .map(|i| { + println!( + "Generating keystores for validator {} of {}", + i + 1, + vc_count + ); + + let indices = (i * validators_per_vc..(i + 1) * validators_per_vc).collect::>(); + ValidatorFiles::with_keystores(&indices).unwrap() + }) + .collect::>(); + + let mut env = EnvironmentBuilder::minimal() + .initialize_logger(LoggerConfig { + path: None, + debug_level: log_level.clone(), + logfile_debug_level: log_level, + log_format: None, + logfile_format: None, + log_color: false, + disable_log_timestamp: false, + max_log_size: 0, + max_log_number: 0, + compression: false, + is_restricted: true, + sse_logging: false, + })? + .multi_threaded_tokio_runtime()? + .build()?; + + let spec = &mut env.eth2_config.spec; + + let total_validator_count = validators_per_vc * vc_count; + let node_count = vc_count * bns_per_vc; + + let genesis_delay = GENESIS_DELAY; + + spec.seconds_per_slot /= speed_up_factor; + spec.seconds_per_slot = max(1, spec.seconds_per_slot); + spec.genesis_delay = genesis_delay; + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = total_validator_count as u64; + spec.altair_fork_epoch = Some(Epoch::new(ALTAIR_FORK_EPOCH)); + spec.bellatrix_fork_epoch = Some(Epoch::new(BELLATRIX_FORK_EPOCH)); + spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); + spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); + //spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); + + let slot_duration = Duration::from_secs(spec.seconds_per_slot); + let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); + + let disconnection_epoch = 1; + let epochs_disconnected = 14; + + let context = env.core_context(); + + let main_future = async { + /* + * Create a new `LocalNetwork` with one beacon node. + */ + let max_retries = 3; + let (network, beacon_config, mock_execution_config) = with_retry(max_retries, || { + Box::pin(LocalNetwork::create_local_network( + None, + None, + LocalNetworkParams { + validator_count: total_validator_count, + node_count, + proposer_nodes: 0, + genesis_delay, + }, + context.clone(), + )) + }) + .await?; + + // Add nodes to the network. + for _ in 0..node_count { + network + .add_beacon_node(beacon_config.clone(), mock_execution_config.clone(), false) + .await?; + } + + /* + * One by one, add validators to the network. + */ + let executor = context.executor.clone(); + for (i, files) in validator_files.into_iter().enumerate() { + let network_1 = network.clone(); + + let mut beacon_nodes = Vec::with_capacity(vc_count * bns_per_vc); + // Each VC gets a unique set of BNs which are not shared with any other VC. + for j in 0..bns_per_vc { + beacon_nodes.push(bns_per_vc * i + j) + } + + executor.spawn( + async move { + let mut validator_config = testing_validator_config(); + validator_config.fee_recipient = Some(SUGGESTED_FEE_RECIPIENT.into()); + println!("Adding validator client {}", i); + network_1 + .add_validator_client_with_fallbacks( + validator_config, + i, + beacon_nodes, + files, + ) + .await + .expect("should add validator"); + }, + "vc", + ); + } + + let duration_to_genesis = network.duration_to_genesis().await; + println!("Duration to genesis: {}", duration_to_genesis.as_secs()); + sleep(duration_to_genesis).await; + + let test_sequence = async { + checks::epoch_delay( + Epoch::new(disconnection_epoch), + slot_duration, + slots_per_epoch, + ) + .await; + // Iterate through each VC and disconnect all BNs but the last node for each VC. + for i in 0..vc_count { + for j in 0..(bns_per_vc - 1) { + let node_index = bns_per_vc * i + j; + checks::disconnect_from_execution_layer(network.clone(), node_index).await?; + } + } + checks::epoch_delay( + Epoch::new(epochs_disconnected), + slot_duration, + slots_per_epoch, + ) + .await; + // Enable all BNs. + for i in 0..node_count { + checks::reconnect_to_execution_layer(network.clone(), i).await?; + } + Ok::<(), String>(()) + }; + + /* + * Start the checks that ensure the network performs as expected. + * + * We start these checks immediately after the validators have started. This means we're + * relying on the validator futures to all return immediately after genesis so that these + * tests start at the right time. Whilst this is works well for now, it's subject to + * breakage by changes to the VC. + */ + + let (sequence, check_attestations, block_production) = futures::join!( + test_sequence, + checks::check_attestation_correctness( + network.clone(), + 0, + END_EPOCH, + slot_duration, + // Use the last node index as this will never have been disabled. + node_count - 1, + ACCEPTABLE_FALLBACK_ATTESTATION_HIT_PERCENTAGE, + ), + checks::verify_full_block_production_up_to( + network.clone(), + Epoch::new(END_EPOCH).start_slot(slots_per_epoch), + slot_duration, + ), + ); + sequence?; + block_production?; + check_attestations?; + + // The `final_future` either completes immediately or never completes, depending on the value + // of `continue_after_checks`. + + if continue_after_checks { + future::pending::<()>().await; + } + /* + * End the simulation by dropping the network. This will kill all running beacon nodes and + * validator clients. + */ + println!( + "Simulation complete. Finished with {} beacon nodes and {} validator clients", + network.beacon_node_count(), + network.validator_client_count() + ); + + // Be explicit about dropping the network, as this kills all the nodes. This ensures + // all the checks have adequate time to pass. + drop(network); + Ok::<(), String>(()) + }; + + env.runtime().block_on(main_future).unwrap(); + + env.fire_signal(); + env.shutdown_on_idle(); + + Ok(()) +} diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 018954a5d3b..63f2ec93537 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,26 +1,95 @@ +use crate::checks::epoch_delay; +use eth2_network_config::TRUSTED_SETUP_BYTES; use node_test_rig::{ environment::RuntimeContext, eth2::{types::StateId, BeaconNodeHttpClient}, - ClientConfig, LocalBeaconNode, LocalExecutionNode, LocalValidatorClient, MockExecutionConfig, - MockServerConfig, ValidatorConfig, ValidatorFiles, + testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode, LocalExecutionNode, + LocalValidatorClient, MockExecutionConfig, MockServerConfig, ValidatorConfig, ValidatorFiles, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use std::{ + net::Ipv4Addr, ops::Deref, - time::{SystemTime, UNIX_EPOCH}, + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, }; -use std::{sync::Arc, time::Duration}; -use types::{Epoch, EthSpec}; +use types::{ChainSpec, Epoch, EthSpec}; const BOOTNODE_PORT: u16 = 42424; const QUIC_PORT: u16 = 43424; -pub const INVALID_ADDRESS: &str = "http://127.0.0.1:42423"; pub const EXECUTION_PORT: u16 = 4000; -pub const TERMINAL_DIFFICULTY: u64 = 6400; -pub const TERMINAL_BLOCK: u64 = 64; +pub const TERMINAL_BLOCK: u64 = 0; + +pub struct LocalNetworkParams { + pub validator_count: usize, + pub node_count: usize, + pub proposer_nodes: usize, + pub genesis_delay: u64, +} + +fn default_client_config(network_params: LocalNetworkParams, genesis_time: u64) -> ClientConfig { + let mut beacon_config = testing_client_config(); + + beacon_config.genesis = ClientGenesis::InteropMerge { + validator_count: network_params.validator_count, + genesis_time, + }; + beacon_config.network.target_peers = + network_params.node_count + network_params.proposer_nodes - 1; + beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); + beacon_config.network.enable_light_client_server = true; + beacon_config.network.discv5_config.enable_packet_filter = false; + beacon_config.chain.enable_light_client_server = true; + beacon_config.http_api.enable_light_client_server = true; + beacon_config.chain.optimistic_finalized_sync = false; + beacon_config.trusted_setup = + serde_json::from_reader(TRUSTED_SETUP_BYTES).expect("Trusted setup bytes should be valid"); + + let el_config = execution_layer::Config { + execution_endpoint: Some( + SensitiveUrl::parse(&format!("http://localhost:{}", EXECUTION_PORT)).unwrap(), + ), + ..Default::default() + }; + beacon_config.execution_layer = Some(el_config); + beacon_config +} + +fn default_mock_execution_config( + spec: &ChainSpec, + genesis_time: u64, +) -> MockExecutionConfig { + let mut mock_execution_config = MockExecutionConfig { + server_config: MockServerConfig { + listen_port: EXECUTION_PORT, + ..Default::default() + }, + ..Default::default() + }; + + if let Some(capella_fork_epoch) = spec.capella_fork_epoch { + mock_execution_config.shanghai_time = Some( + genesis_time + + spec.seconds_per_slot * E::slots_per_epoch() * capella_fork_epoch.as_u64(), + ) + } + if let Some(deneb_fork_epoch) = spec.deneb_fork_epoch { + mock_execution_config.cancun_time = Some( + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * deneb_fork_epoch.as_u64(), + ) + } + if let Some(electra_fork_epoch) = spec.electra_fork_epoch { + mock_execution_config.prague_time = Some( + genesis_time + + spec.seconds_per_slot * E::slots_per_epoch() * electra_fork_epoch.as_u64(), + ) + } + + mock_execution_config +} /// Helper struct to reduce `Arc` usage. pub struct Inner { @@ -55,56 +124,41 @@ impl Deref for LocalNetwork { } impl LocalNetwork { - /// Creates a new network with a single `BeaconNode` and a connected `ExecutionNode`. - pub async fn new( + pub async fn create_local_network( + client_config: Option, + mock_execution_config: Option, + network_params: LocalNetworkParams, context: RuntimeContext, - mut beacon_config: ClientConfig, - ) -> Result { - beacon_config.network.set_ipv4_listening_address( - std::net::Ipv4Addr::UNSPECIFIED, - BOOTNODE_PORT, - BOOTNODE_PORT, - QUIC_PORT, - ); - beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); - beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); - beacon_config.network.discv5_config.table_filter = |_| true; + ) -> Result<(LocalNetwork, ClientConfig, MockExecutionConfig), String> { + let genesis_time: u64 = (SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|_| "should get system time")? + + Duration::from_secs(network_params.genesis_delay)) + .as_secs(); - let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer { - let mock_execution_config = MockExecutionConfig { - server_config: MockServerConfig { - listen_port: EXECUTION_PORT, - ..Default::default() - }, - terminal_block: TERMINAL_BLOCK, - terminal_difficulty: TERMINAL_DIFFICULTY.into(), - ..Default::default() - }; - let execution_node = LocalExecutionNode::new( - context.service_context("boot_node_el".into()), - mock_execution_config, - ); - el_config.default_datadir = execution_node.datadir.path().to_path_buf(); - el_config.secret_file = Some(execution_node.datadir.path().join("jwt.hex")); - el_config.execution_endpoint = - Some(SensitiveUrl::parse(&execution_node.server.url()).unwrap()); - vec![execution_node] + let beacon_config = if let Some(config) = client_config { + config } else { - vec![] + default_client_config(network_params, genesis_time) }; - let beacon_node = - LocalBeaconNode::production(context.service_context("boot_node".into()), beacon_config) - .await?; - Ok(Self { + let execution_config = if let Some(config) = mock_execution_config { + config + } else { + default_mock_execution_config::(&context.eth2_config().spec, genesis_time) + }; + + let network = Self { inner: Arc::new(Inner { context, - beacon_nodes: RwLock::new(vec![beacon_node]), + beacon_nodes: RwLock::new(vec![]), proposer_nodes: RwLock::new(vec![]), - execution_nodes: RwLock::new(execution_node), + execution_nodes: RwLock::new(vec![]), validator_clients: RwLock::new(vec![]), }), - }) + }; + + Ok((network, beacon_config, execution_config)) } /// Returns the number of beacon nodes in the network. @@ -131,77 +185,151 @@ impl LocalNetwork { self.validator_clients.read().len() } - /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. - pub async fn add_beacon_node( + async fn construct_boot_node( + &self, + mut beacon_config: ClientConfig, + mock_execution_config: MockExecutionConfig, + ) -> Result<(LocalBeaconNode, LocalExecutionNode), String> { + beacon_config.network.set_ipv4_listening_address( + std::net::Ipv4Addr::UNSPECIFIED, + BOOTNODE_PORT, + BOOTNODE_PORT, + QUIC_PORT, + ); + + beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); + beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); + beacon_config.network.discv5_config.table_filter = |_| true; + + let execution_node = LocalExecutionNode::new( + self.context.service_context("boot_node_el".into()), + mock_execution_config, + ); + + beacon_config.execution_layer = Some(execution_layer::Config { + execution_endpoint: Some(SensitiveUrl::parse(&execution_node.server.url()).unwrap()), + default_datadir: execution_node.datadir.path().to_path_buf(), + secret_file: Some(execution_node.datadir.path().join("jwt.hex")), + ..Default::default() + }); + + let beacon_node = LocalBeaconNode::production( + self.context.service_context("boot_node".into()), + beacon_config, + ) + .await?; + + Ok((beacon_node, execution_node)) + } + + async fn construct_beacon_node( &self, mut beacon_config: ClientConfig, + mut mock_execution_config: MockExecutionConfig, is_proposer: bool, - ) -> Result<(), String> { - let self_1 = self.clone(); - let count = self.beacon_node_count() as u16; - println!("Adding beacon node.."); - { - let read_lock = self.beacon_nodes.read(); + ) -> Result<(LocalBeaconNode, LocalExecutionNode), String> { + let count = (self.beacon_node_count() + self.proposer_node_count()) as u16; - let boot_node = read_lock.first().expect("should have at least one node"); + // Set config. + let libp2p_tcp_port = BOOTNODE_PORT + count; + let discv5_port = BOOTNODE_PORT + count; + beacon_config.network.set_ipv4_listening_address( + std::net::Ipv4Addr::UNSPECIFIED, + libp2p_tcp_port, + discv5_port, + QUIC_PORT + count, + ); + beacon_config.network.enr_udp4_port = Some(discv5_port.try_into().unwrap()); + beacon_config.network.enr_tcp4_port = Some(libp2p_tcp_port.try_into().unwrap()); + beacon_config.network.discv5_config.table_filter = |_| true; + beacon_config.network.proposer_only = is_proposer; - beacon_config.network.boot_nodes_enr.push( - boot_node - .client - .enr() - .expect("bootnode must have a network"), - ); - let count = (self.beacon_node_count() + self.proposer_node_count()) as u16; - let libp2p_tcp_port = BOOTNODE_PORT + count; - let discv5_port = BOOTNODE_PORT + count; - beacon_config.network.set_ipv4_listening_address( - std::net::Ipv4Addr::UNSPECIFIED, - libp2p_tcp_port, - discv5_port, - QUIC_PORT + count, - ); - beacon_config.network.enr_udp4_port = Some(discv5_port.try_into().unwrap()); - beacon_config.network.enr_tcp4_port = Some(libp2p_tcp_port.try_into().unwrap()); - beacon_config.network.discv5_config.table_filter = |_| true; - beacon_config.network.proposer_only = is_proposer; - } - if let Some(el_config) = &mut beacon_config.execution_layer { - let config = MockExecutionConfig { - server_config: MockServerConfig { - listen_port: EXECUTION_PORT + count, - ..Default::default() - }, - terminal_block: TERMINAL_BLOCK, - terminal_difficulty: TERMINAL_DIFFICULTY.into(), - ..Default::default() - }; - let execution_node = LocalExecutionNode::new( - self.context.service_context(format!("node_{}_el", count)), - config, - ); - el_config.default_datadir = execution_node.datadir.path().to_path_buf(); - el_config.secret_file = Some(execution_node.datadir.path().join("jwt.hex")); - el_config.execution_endpoint = - Some(SensitiveUrl::parse(&execution_node.server.url()).unwrap()); - self.execution_nodes.write().push(execution_node); - } + mock_execution_config.server_config.listen_port = EXECUTION_PORT + count; - // We create the beacon node without holding the lock, so that the lock isn't held - // across the await. This is only correct if this function never runs in parallel - // with itself (which at the time of writing, it does not). + // Construct execution node. + let execution_node = LocalExecutionNode::new( + self.context.service_context(format!("node_{}_el", count)), + mock_execution_config, + ); + + // Pair the beacon node and execution node. + beacon_config.execution_layer = Some(execution_layer::Config { + execution_endpoint: Some(SensitiveUrl::parse(&execution_node.server.url()).unwrap()), + default_datadir: execution_node.datadir.path().to_path_buf(), + secret_file: Some(execution_node.datadir.path().join("jwt.hex")), + ..Default::default() + }); + + // Construct beacon node using the config, let beacon_node = LocalBeaconNode::production( self.context.service_context(format!("node_{}", count)), beacon_config, ) .await?; + + Ok((beacon_node, execution_node)) + } + + /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. + pub async fn add_beacon_node( + &self, + mut beacon_config: ClientConfig, + mock_execution_config: MockExecutionConfig, + is_proposer: bool, + ) -> Result<(), String> { + let first_bn_exists: bool; + { + let read_lock = self.beacon_nodes.read(); + let boot_node = read_lock.first(); + first_bn_exists = boot_node.is_some(); + + if let Some(boot_node) = boot_node { + // Modify beacon_config to add boot node details. + beacon_config.network.boot_nodes_enr.push( + boot_node + .client + .enr() + .expect("Bootnode must have a network."), + ); + } + } + let (beacon_node, execution_node) = if first_bn_exists { + // Network already exists. We construct a new node. + self.construct_beacon_node(beacon_config, mock_execution_config, is_proposer) + .await? + } else { + // Network does not exist. We construct a boot node. + self.construct_boot_node(beacon_config, mock_execution_config) + .await? + }; + // Add nodes to the network. + self.execution_nodes.write().push(execution_node); if is_proposer { - self_1.proposer_nodes.write().push(beacon_node); + self.proposer_nodes.write().push(beacon_node); } else { - self_1.beacon_nodes.write().push(beacon_node); + self.beacon_nodes.write().push(beacon_node); } Ok(()) } + // Add a new node with a delay. This node will not have validators and is only used to test + // sync. + pub async fn add_beacon_node_with_delay( + &self, + beacon_config: ClientConfig, + mock_execution_config: MockExecutionConfig, + wait_until_epoch: u64, + slot_duration: Duration, + slots_per_epoch: u64, + ) -> Result<(), String> { + epoch_delay(Epoch::new(wait_until_epoch), slot_duration, slots_per_epoch).await; + + self.add_beacon_node(beacon_config, mock_execution_config, false) + .await?; + + Ok(()) + } + /// Adds a validator client to the network, connecting it to the beacon node with index /// `beacon_node`. pub async fn add_validator_client( @@ -209,7 +337,6 @@ impl LocalNetwork { mut validator_config: ValidatorConfig, beacon_node: usize, validator_files: ValidatorFiles, - invalid_first_beacon_node: bool, //to test beacon node fallbacks ) -> Result<(), String> { let context = self .context @@ -240,11 +367,7 @@ impl LocalNetwork { format!("http://{}:{}", socket_addr.ip(), socket_addr.port()).as_str(), ) .unwrap(); - validator_config.beacon_nodes = if invalid_first_beacon_node { - vec![SensitiveUrl::parse(INVALID_ADDRESS).unwrap(), beacon_node] - } else { - vec![beacon_node] - }; + validator_config.beacon_nodes = vec![beacon_node]; // If we have a proposer node established, use it. if let Some(proposer_socket_addr) = proposer_socket_addr { @@ -293,11 +416,11 @@ impl LocalNetwork { .http_api_listen_addr() .expect("Must have http started") }; - let beacon_node = SensitiveUrl::parse( + let beacon_node_url = SensitiveUrl::parse( format!("http://{}:{}", socket_addr.ip(), socket_addr.port()).as_str(), ) .unwrap(); - beacon_node_urls.push(beacon_node); + beacon_node_urls.push(beacon_node_url); } validator_config.beacon_nodes = beacon_node_urls; @@ -325,7 +448,7 @@ impl LocalNetwork { } /// Return current epoch of bootnode. - pub async fn bootnode_epoch(&self) -> Result { + pub async fn _bootnode_epoch(&self) -> Result { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); bootnode @@ -335,16 +458,6 @@ impl LocalNetwork { .map(|body| body.unwrap().data.finalized.epoch) } - pub fn mine_pow_blocks(&self, block_number: u64) -> Result<(), String> { - let execution_nodes = self.execution_nodes.read(); - for execution_node in execution_nodes.iter() { - let mut block_gen = execution_node.server.ctx.execution_block_generator.write(); - block_gen.insert_pow_block(block_number)?; - println!("Mined pow block {}", block_number); - } - Ok(()) - } - pub async fn duration_to_genesis(&self) -> Duration { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index e8af9c18067..d1a2d0dc672 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -1,10 +1,8 @@ -//! This crate provides a simluation that creates `n` beacon node and validator clients, each with -//! `v` validators. A deposit contract is deployed at the start of the simulation using a local -//! `anvil` instance (you must have `anvil` installed and avaliable on your path). All -//! beacon nodes independently listen for genesis from the deposit contract, then start operating. +//! This crate provides various simulations that create both beacon nodes and validator clients, +//! each with `v` validators. //! -//! As the simulation runs, there are checks made to ensure that all components are running -//! correctly. If any of these checks fail, the simulation will exit immediately. +//! When a simulation runs, there are checks made to ensure that all components are operating +//! as expected. If any of these checks fail, the simulation will exit immediately. //! //! ## Future works //! @@ -16,13 +14,12 @@ #[macro_use] extern crate clap; +mod basic_sim; mod checks; mod cli; -mod eth1_sim; +mod fallback_sim; mod local_network; -mod no_eth1_sim; mod retry; -mod sync_sim; use cli::cli_app; use env_logger::{Builder, Env}; @@ -37,21 +34,14 @@ fn main() { let matches = cli_app().get_matches(); match matches.subcommand() { - ("eth1-sim", Some(matches)) => match eth1_sim::run_eth1_sim(matches) { + ("basic-sim", Some(matches)) => match basic_sim::run_basic_sim(matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); std::process::exit(1) } }, - ("no-eth1-sim", Some(matches)) => match no_eth1_sim::run_no_eth1_sim(matches) { - Ok(()) => println!("Simulation exited successfully"), - Err(e) => { - eprintln!("Simulation exited with error: {}", e); - std::process::exit(1) - } - }, - ("syncing-sim", Some(matches)) => match sync_sim::run_syncing_sim(matches) { + ("fallback-sim", Some(matches)) => match fallback_sim::run_fallback_sim(matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs deleted file mode 100644 index fc18b1cd489..00000000000 --- a/testing/simulator/src/no_eth1_sim.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::{checks, LocalNetwork}; -use clap::ArgMatches; -use futures::prelude::*; -use node_test_rig::{ - environment::{EnvironmentBuilder, LoggerConfig}, - testing_client_config, testing_validator_config, ClientGenesis, ValidatorFiles, -}; -use rayon::prelude::*; -use std::cmp::max; -use std::net::Ipv4Addr; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use tokio::time::sleep; -use types::{Epoch, EthSpec, MainnetEthSpec}; - -pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { - let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); - let validators_per_node = value_t!(matches, "validators_per_node", usize) - .expect("missing validators_per_node default"); - let speed_up_factor = - value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default"); - let continue_after_checks = matches.is_present("continue_after_checks"); - - println!("Beacon Chain Simulator:"); - println!(" nodes:{}", node_count); - println!(" validators_per_node:{}", validators_per_node); - println!(" continue_after_checks:{}", continue_after_checks); - - // Generate the directories and keystores required for the validator clients. - let validator_files = (0..node_count) - .into_par_iter() - .map(|i| { - println!( - "Generating keystores for validator {} of {}", - i + 1, - node_count - ); - - let indices = - (i * validators_per_node..(i + 1) * validators_per_node).collect::>(); - ValidatorFiles::with_keystores(&indices).unwrap() - }) - .collect::>(); - - let mut env = EnvironmentBuilder::mainnet() - .initialize_logger(LoggerConfig { - path: None, - debug_level: String::from("debug"), - logfile_debug_level: String::from("debug"), - log_format: None, - logfile_format: None, - log_color: false, - disable_log_timestamp: false, - max_log_size: 0, - max_log_number: 0, - compression: false, - is_restricted: true, - sse_logging: false, - })? - .multi_threaded_tokio_runtime()? - .build()?; - - let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor); - - let spec = &mut env.eth2_config.spec; - - let total_validator_count = validators_per_node * node_count; - - spec.seconds_per_slot /= speed_up_factor; - spec.seconds_per_slot = max(1, spec.seconds_per_slot); - spec.eth1_follow_distance = 16; - spec.genesis_delay = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2; - spec.min_genesis_time = 0; - spec.min_genesis_active_validator_count = total_validator_count as u64; - spec.seconds_per_eth1_block = 1; - - let genesis_delay = Duration::from_secs(5); - let genesis_time = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|_| "should get system time")? - + genesis_delay; - - let slot_duration = Duration::from_secs(spec.seconds_per_slot); - - let context = env.core_context(); - - let mut beacon_config = testing_client_config(); - - beacon_config.genesis = ClientGenesis::Interop { - validator_count: total_validator_count, - genesis_time: genesis_time.as_secs(), - }; - beacon_config.dummy_eth1_backend = true; - beacon_config.sync_eth1_chain = true; - - beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); - - let main_future = async { - let network = LocalNetwork::new(context.clone(), beacon_config.clone()).await?; - /* - * One by one, add beacon nodes to the network. - */ - - for _ in 0..node_count - 1 { - network - .add_beacon_node(beacon_config.clone(), false) - .await?; - } - - /* - * Create a future that will add validator clients to the network. Each validator client is - * attached to a single corresponding beacon node. Spawn each validator in a new task. - */ - let executor = context.executor.clone(); - for (i, files) in validator_files.into_iter().enumerate() { - let network_1 = network.clone(); - executor.spawn( - async move { - println!("Adding validator client {}", i); - network_1 - .add_validator_client(testing_validator_config(), i, files, i % 2 == 0) - .await - .expect("should add validator"); - }, - "vc", - ); - } - - let duration_to_genesis = network.duration_to_genesis().await; - println!("Duration to genesis: {}", duration_to_genesis.as_secs()); - sleep(duration_to_genesis).await; - - let (finalization, block_prod) = futures::join!( - // Check that the chain finalizes at the first given opportunity. - checks::verify_first_finalization(network.clone(), slot_duration), - // Check that a block is produced at every slot. - checks::verify_full_block_production_up_to( - network.clone(), - Epoch::new(4).start_slot(MainnetEthSpec::slots_per_epoch()), - slot_duration, - ), - ); - finalization?; - block_prod?; - - // The `final_future` either completes immediately or never completes, depending on the value - // of `continue_after_checks`. - - if continue_after_checks { - future::pending::<()>().await; - } - /* - * End the simulation by dropping the network. This will kill all running beacon nodes and - * validator clients. - */ - println!( - "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count() + network.proposer_node_count(), - network.validator_client_count() - ); - - // Be explicit about dropping the network, as this kills all the nodes. This ensures - // all the checks have adequate time to pass. - drop(network); - Ok::<(), String>(()) - }; - - env.runtime().block_on(main_future).unwrap(); - - env.fire_signal(); - env.shutdown_on_idle(); - Ok(()) -} diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs deleted file mode 100644 index ba4ea4530af..00000000000 --- a/testing/simulator/src/sync_sim.rs +++ /dev/null @@ -1,390 +0,0 @@ -use crate::checks::{epoch_delay, verify_all_finalized_at}; -use crate::local_network::LocalNetwork; -use clap::ArgMatches; -use futures::prelude::*; -use node_test_rig::{ - environment::{EnvironmentBuilder, LoggerConfig}, - testing_client_config, ClientGenesis, ValidatorFiles, -}; -use node_test_rig::{testing_validator_config, ClientConfig}; -use std::cmp::max; -use std::net::Ipv4Addr; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use types::{Epoch, EthSpec}; - -pub fn run_syncing_sim(matches: &ArgMatches) -> Result<(), String> { - let initial_delay = value_t!(matches, "initial_delay", u64).unwrap(); - let sync_timeout = value_t!(matches, "sync_timeout", u64).unwrap(); - let speed_up_factor = value_t!(matches, "speedup", u64).unwrap(); - let strategy = value_t!(matches, "strategy", String).unwrap(); - - println!("Syncing Simulator:"); - println!(" initial_delay:{}", initial_delay); - println!(" sync timeout: {}", sync_timeout); - println!(" speed up factor:{}", speed_up_factor); - println!(" strategy:{}", strategy); - - let log_level = "debug"; - let log_format = None; - - syncing_sim( - speed_up_factor, - initial_delay, - sync_timeout, - strategy, - log_level, - log_format, - ) -} - -fn syncing_sim( - speed_up_factor: u64, - initial_delay: u64, - sync_timeout: u64, - strategy: String, - log_level: &str, - log_format: Option<&str>, -) -> Result<(), String> { - let mut env = EnvironmentBuilder::minimal() - .initialize_logger(LoggerConfig { - path: None, - debug_level: String::from(log_level), - logfile_debug_level: String::from("debug"), - log_format: log_format.map(String::from), - logfile_format: None, - log_color: false, - disable_log_timestamp: false, - max_log_size: 0, - max_log_number: 0, - compression: false, - is_restricted: true, - sse_logging: false, - })? - .multi_threaded_tokio_runtime()? - .build()?; - - let spec = &mut env.eth2_config.spec; - let end_after_checks = true; - let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor); - - // Set fork epochs to test syncing across fork boundaries - spec.altair_fork_epoch = Some(Epoch::new(1)); - spec.bellatrix_fork_epoch = Some(Epoch::new(2)); - spec.seconds_per_slot /= speed_up_factor; - spec.seconds_per_slot = max(1, spec.seconds_per_slot); - spec.eth1_follow_distance = 16; - spec.genesis_delay = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2; - spec.min_genesis_time = 0; - spec.min_genesis_active_validator_count = 64; - spec.seconds_per_eth1_block = 1; - - let num_validators = 8; - let slot_duration = Duration::from_secs(spec.seconds_per_slot); - let context = env.core_context(); - let mut beacon_config = testing_client_config(); - - let genesis_time = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|_| "should get system time")? - + Duration::from_secs(5); - beacon_config.genesis = ClientGenesis::Interop { - validator_count: num_validators, - genesis_time: genesis_time.as_secs(), - }; - beacon_config.dummy_eth1_backend = true; - beacon_config.sync_eth1_chain = true; - - beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); - - // Generate the directories and keystores required for the validator clients. - let validator_indices = (0..num_validators).collect::>(); - let validator_files = ValidatorFiles::with_keystores(&validator_indices).unwrap(); - - let main_future = async { - /* - * Create a new `LocalNetwork` with one beacon node. - */ - let network = LocalNetwork::new(context, beacon_config.clone()).await?; - - /* - * Add a validator client which handles all validators from the genesis state. - */ - network - .add_validator_client(testing_validator_config(), 0, validator_files, true) - .await?; - - // Check all syncing strategies one after other. - pick_strategy( - &strategy, - network.clone(), - beacon_config.clone(), - slot_duration, - initial_delay, - sync_timeout, - ) - .await?; - - // The `final_future` either completes immediately or never completes, depending on the value - // of `end_after_checks`. - - if !end_after_checks { - future::pending::<()>().await; - } - - /* - * End the simulation by dropping the network. This will kill all running beacon nodes and - * validator clients. - */ - println!( - "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), - network.validator_client_count() - ); - - // Be explicit about dropping the network, as this kills all the nodes. This ensures - // all the checks have adequate time to pass. - drop(network); - Ok::<(), String>(()) - }; - - env.runtime().block_on(main_future).unwrap(); - - env.fire_signal(); - env.shutdown_on_idle(); - - Ok(()) -} - -pub async fn pick_strategy( - strategy: &str, - network: LocalNetwork, - beacon_config: ClientConfig, - slot_duration: Duration, - initial_delay: u64, - sync_timeout: u64, -) -> Result<(), String> { - match strategy { - "one-node" => { - verify_one_node_sync( - network, - beacon_config, - slot_duration, - initial_delay, - sync_timeout, - ) - .await - } - "two-nodes" => { - verify_two_nodes_sync( - network, - beacon_config, - slot_duration, - initial_delay, - sync_timeout, - ) - .await - } - "mixed" => { - verify_in_between_sync( - network, - beacon_config, - slot_duration, - initial_delay, - sync_timeout, - ) - .await - } - "all" => { - verify_syncing( - network, - beacon_config, - slot_duration, - initial_delay, - sync_timeout, - ) - .await - } - _ => Err("Invalid strategy".into()), - } -} - -/// Verify one node added after `initial_delay` epochs is in sync -/// after `sync_timeout` epochs. -pub async fn verify_one_node_sync( - network: LocalNetwork, - beacon_config: ClientConfig, - slot_duration: Duration, - initial_delay: u64, - sync_timeout: u64, -) -> Result<(), String> { - let epoch_duration = slot_duration * (E::slots_per_epoch() as u32); - let network_c = network.clone(); - // Delay for `initial_delay` epochs before adding another node to start syncing - epoch_delay( - Epoch::new(initial_delay), - slot_duration, - E::slots_per_epoch(), - ) - .await; - // Add a beacon node - network.add_beacon_node(beacon_config, false).await?; - // Check every `epoch_duration` if nodes are synced - // limited to at most `sync_timeout` epochs - let mut interval = tokio::time::interval(epoch_duration); - let mut count = 0; - loop { - interval.tick().await; - if count >= sync_timeout || !check_still_syncing(&network_c).await? { - break; - } - count += 1; - } - let epoch = network.bootnode_epoch().await?; - verify_all_finalized_at(network, epoch) - .map_err(|e| format!("One node sync error: {}", e)) - .await -} - -/// Verify two nodes added after `initial_delay` epochs are in sync -/// after `sync_timeout` epochs. -pub async fn verify_two_nodes_sync( - network: LocalNetwork, - beacon_config: ClientConfig, - slot_duration: Duration, - initial_delay: u64, - sync_timeout: u64, -) -> Result<(), String> { - let epoch_duration = slot_duration * (E::slots_per_epoch() as u32); - let network_c = network.clone(); - // Delay for `initial_delay` epochs before adding another node to start syncing - epoch_delay( - Epoch::new(initial_delay), - slot_duration, - E::slots_per_epoch(), - ) - .await; - // Add beacon nodes - network - .add_beacon_node(beacon_config.clone(), false) - .await?; - network.add_beacon_node(beacon_config, false).await?; - // Check every `epoch_duration` if nodes are synced - // limited to at most `sync_timeout` epochs - let mut interval = tokio::time::interval(epoch_duration); - let mut count = 0; - loop { - interval.tick().await; - if count >= sync_timeout || !check_still_syncing(&network_c).await? { - break; - } - count += 1; - } - let epoch = network.bootnode_epoch().await?; - verify_all_finalized_at(network, epoch) - .map_err(|e| format!("One node sync error: {}", e)) - .await -} - -/// Add 2 syncing nodes after `initial_delay` epochs, -/// Add another node after `sync_timeout - 5` epochs and verify all are -/// in sync after `sync_timeout + 5` epochs. -pub async fn verify_in_between_sync( - network: LocalNetwork, - beacon_config: ClientConfig, - slot_duration: Duration, - initial_delay: u64, - sync_timeout: u64, -) -> Result<(), String> { - let epoch_duration = slot_duration * (E::slots_per_epoch() as u32); - let network_c = network.clone(); - // Delay for `initial_delay` epochs before adding another node to start syncing - let config1 = beacon_config.clone(); - epoch_delay( - Epoch::new(initial_delay), - slot_duration, - E::slots_per_epoch(), - ) - .await; - // Add two beacon nodes - network - .add_beacon_node(beacon_config.clone(), false) - .await?; - network.add_beacon_node(beacon_config, false).await?; - // Delay before adding additional syncing nodes. - epoch_delay( - Epoch::new(sync_timeout - 5), - slot_duration, - E::slots_per_epoch(), - ) - .await; - // Add a beacon node - network.add_beacon_node(config1.clone(), false).await?; - // Check every `epoch_duration` if nodes are synced - // limited to at most `sync_timeout` epochs - let mut interval = tokio::time::interval(epoch_duration); - let mut count = 0; - loop { - interval.tick().await; - if count >= sync_timeout || !check_still_syncing(&network_c).await? { - break; - } - count += 1; - } - let epoch = network.bootnode_epoch().await?; - verify_all_finalized_at(network, epoch) - .map_err(|e| format!("One node sync error: {}", e)) - .await -} - -/// Run syncing strategies one after other. -pub async fn verify_syncing( - network: LocalNetwork, - beacon_config: ClientConfig, - slot_duration: Duration, - initial_delay: u64, - sync_timeout: u64, -) -> Result<(), String> { - verify_one_node_sync( - network.clone(), - beacon_config.clone(), - slot_duration, - initial_delay, - sync_timeout, - ) - .await?; - println!("Completed one node sync"); - verify_two_nodes_sync( - network.clone(), - beacon_config.clone(), - slot_duration, - initial_delay, - sync_timeout, - ) - .await?; - println!("Completed two node sync"); - verify_in_between_sync( - network, - beacon_config, - slot_duration, - initial_delay, - sync_timeout, - ) - .await?; - println!("Completed in between sync"); - Ok(()) -} - -pub async fn check_still_syncing(network: &LocalNetwork) -> Result { - // get syncing status of nodes - let mut status = Vec::new(); - for remote_node in network.remote_nodes()? { - status.push( - remote_node - .get_node_syncing() - .await - .map(|body| body.data.is_syncing) - .map_err(|e| format!("Get syncing status via http failed: {:?}", e))?, - ) - } - Ok(status.iter().any(|is_syncing| *is_syncing)) -} diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 445d4f1a5d9..06d484a52bb 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -892,6 +892,7 @@ impl UnsignedBlock { } } +#[derive(Debug)] pub enum SignedBlock { Full(PublishBlockRequest), Blinded(Arc>), From f7aca97a55fe33f51987b1b72851ff506c72f31a Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 23 Apr 2024 01:06:39 +0900 Subject: [PATCH 06/13] Handle sync lookup request streams in network context (#5583) * by-root-stream-terminator * Fix tests * Resolve merge conflicts * Log report reason * Some lints and bugfixes (#23) * fix lints * bug fixes * Fix tests * Merge branch 'unstable' of https://github.com/sigp/lighthouse into handle-sync-lookup-requests * Pr 5583 review (#24) * add bad state warn log * add rust docs to new fields in `SyncNetworkContext` * remove timestamp todo * add back lookup verify error * remove TODOs --- .../network/src/sync/block_lookups/common.rs | 156 ++------- .../network/src/sync/block_lookups/mod.rs | 317 ++++++++---------- .../src/sync/block_lookups/parent_lookup.rs | 70 +--- .../sync/block_lookups/single_block_lookup.rs | 72 ++-- .../network/src/sync/block_lookups/tests.rs | 13 +- beacon_node/network/src/sync/manager.rs | 203 ++++++----- .../network/src/sync/network_context.rs | 202 +++++++++-- .../src/sync/network_context/requests.rs | 149 ++++++++ 8 files changed, 643 insertions(+), 539 deletions(-) create mode 100644 beacon_node/network/src/sync/network_context/requests.rs diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 8f7881eea8a..3bd39301b21 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -1,22 +1,21 @@ use crate::sync::block_lookups::parent_lookup::PARENT_FAIL_TOLERANCE; use crate::sync::block_lookups::single_block_lookup::{ - LookupRequestError, LookupVerifyError, SingleBlockLookup, SingleLookupRequestState, State, + LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; use crate::sync::block_lookups::{ BlobRequestState, BlockLookups, BlockRequestState, PeerId, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, }; use crate::sync::manager::{BlockProcessType, Id, SingleLookupReqId}; -use crate::sync::network_context::SyncNetworkContext; +use crate::sync::network_context::{ + BlobsByRootSingleBlockRequest, BlocksByRootSingleRequest, SyncNetworkContext, +}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::data_availability_checker::ChildComponents; -use beacon_chain::{get_block_root, BeaconChainTypes}; -use lighthouse_network::rpc::methods::BlobsByRootRequest; -use lighthouse_network::rpc::BlocksByRootRequest; -use std::ops::IndexMut; +use beacon_chain::BeaconChainTypes; use std::sync::Arc; use std::time::Duration; -use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; -use types::{BlobSidecar, ChainSpec, Hash256, SignedBeaconBlock}; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{Hash256, SignedBeaconBlock}; #[derive(Debug, Copy, Clone)] pub enum ResponseType { @@ -73,9 +72,6 @@ pub trait RequestState { /// The type of the request . type RequestType; - /// A block or blob response. - type ResponseType; - /// The type created after validation. type VerifiedResponseType: Clone; @@ -85,14 +81,11 @@ pub trait RequestState { /* Request building methods */ /// Construct a new request. - fn build_request( - &mut self, - spec: &ChainSpec, - ) -> Result<(PeerId, Self::RequestType), LookupRequestError> { + fn build_request(&mut self) -> Result<(PeerId, Self::RequestType), LookupRequestError> { // Verify and construct request. self.too_many_attempts()?; let peer = self.get_peer()?; - let request = self.new_request(spec); + let request = self.new_request(); Ok((peer, request)) } @@ -100,7 +93,7 @@ pub trait RequestState { fn build_request_and_send( &mut self, id: Id, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, ) -> Result<(), LookupRequestError> { // Check if request is necessary. if !self.get_state().is_awaiting_download() { @@ -108,7 +101,7 @@ pub trait RequestState { } // Construct request. - let (peer_id, request) = self.build_request(&cx.chain.spec)?; + let (peer_id, request) = self.build_request()?; // Update request state. let req_counter = self.get_state_mut().on_download_start(peer_id); @@ -144,61 +137,18 @@ pub trait RequestState { } /// Initialize `Self::RequestType`. - fn new_request(&self, spec: &ChainSpec) -> Self::RequestType; + fn new_request(&self) -> Self::RequestType; /// Send the request to the network service. fn make_request( id: SingleLookupReqId, peer_id: PeerId, request: Self::RequestType, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, ) -> Result<(), LookupRequestError>; /* Response handling methods */ - /// Verify the response is valid based on what we requested. - fn verify_response( - &mut self, - expected_block_root: Hash256, - peer_id: PeerId, - response: Option, - ) -> Result, LookupVerifyError> { - let result = match *self.get_state().get_state() { - State::AwaitingDownload => Err(LookupVerifyError::ExtraBlocksReturned), - State::Downloading { peer_id: _ } => { - // TODO: We requested a download from Downloading { peer_id }, but the network - // injects a response from a different peer_id. What should we do? The peer_id to - // track for scoring is the one that actually sent the response, not the state's - self.verify_response_inner(expected_block_root, response) - } - State::Processing { .. } | State::Processed { .. } => match response { - // We sent the block for processing and received an extra block. - Some(_) => Err(LookupVerifyError::ExtraBlocksReturned), - // This is simply the stream termination and we are already processing the block - None => Ok(None), - }, - }; - - match result { - Ok(Some(response)) => { - self.get_state_mut().on_download_success(peer_id); - Ok(Some(response)) - } - Ok(None) => Ok(None), - Err(e) => { - self.get_state_mut().on_download_failure(); - Err(e) - } - } - } - - /// The response verification unique to block or blobs. - fn verify_response_inner( - &mut self, - expected_block_root: Hash256, - response: Option, - ) -> Result, LookupVerifyError>; - /// A getter for the parent root of the response. Returns an `Option` because we won't know /// the blob parent if we don't end up getting any blobs in the response. fn get_parent_root(verified_response: &Self::VerifiedResponseType) -> Option; @@ -247,49 +197,24 @@ pub trait RequestState { } impl RequestState for BlockRequestState { - type RequestType = BlocksByRootRequest; - type ResponseType = Arc>; + type RequestType = BlocksByRootSingleRequest; type VerifiedResponseType = Arc>; type ReconstructedResponseType = RpcBlock; - fn new_request(&self, spec: &ChainSpec) -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![self.requested_block_root], spec) + fn new_request(&self) -> Self::RequestType { + BlocksByRootSingleRequest(self.requested_block_root) } fn make_request( id: SingleLookupReqId, peer_id: PeerId, request: Self::RequestType, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, ) -> Result<(), LookupRequestError> { cx.block_lookup_request(id, peer_id, request) .map_err(LookupRequestError::SendFailed) } - fn verify_response_inner( - &mut self, - expected_block_root: Hash256, - response: Option, - ) -> Result>>, LookupVerifyError> { - match response { - Some(block) => { - // Compute the block root using this specific function so that we can get timing - // metrics. - let block_root = get_block_root(&block); - if block_root != expected_block_root { - // return an error and drop the block - // NOTE: we take this is as a download failure to prevent counting the - // attempt as a chain failure, but simply a peer failure. - Err(LookupVerifyError::RootMismatch) - } else { - // Return the block for processing. - Ok(Some(block)) - } - } - None => Err(LookupVerifyError::NoBlockReturned), - } - } - fn get_parent_root(verified_response: &Arc>) -> Option { Some(verified_response.parent_root()) } @@ -340,60 +265,27 @@ impl RequestState for BlockRequestState } impl RequestState for BlobRequestState { - type RequestType = BlobsByRootRequest; - type ResponseType = Arc>; + type RequestType = BlobsByRootSingleBlockRequest; type VerifiedResponseType = FixedBlobSidecarList; type ReconstructedResponseType = FixedBlobSidecarList; - fn new_request(&self, spec: &ChainSpec) -> BlobsByRootRequest { - let blob_id_vec: Vec = self.requested_ids.clone().into(); - BlobsByRootRequest::new(blob_id_vec, spec) + fn new_request(&self) -> Self::RequestType { + BlobsByRootSingleBlockRequest { + block_root: self.block_root, + indices: self.requested_ids.indices(), + } } fn make_request( id: SingleLookupReqId, peer_id: PeerId, request: Self::RequestType, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, ) -> Result<(), LookupRequestError> { cx.blob_lookup_request(id, peer_id, request) .map_err(LookupRequestError::SendFailed) } - fn verify_response_inner( - &mut self, - expected_block_root: Hash256, - blob: Option, - ) -> Result>, LookupVerifyError> { - match blob { - Some(blob) => { - let received_id = blob.id(); - - if !self.requested_ids.contains(&received_id) { - return Err(LookupVerifyError::UnrequestedBlobId(received_id)); - } - if !blob.verify_blob_sidecar_inclusion_proof().unwrap_or(false) { - return Err(LookupVerifyError::InvalidInclusionProof); - } - if blob.block_root() != expected_block_root { - return Err(LookupVerifyError::UnrequestedHeader); - } - - // State should remain downloading until we receive the stream terminator. - self.requested_ids.remove(&received_id); - - // The inclusion proof check above ensures `blob.index` is < MAX_BLOBS_PER_BLOCK - let blob_index = blob.index; - *self.blob_download_queue.index_mut(blob_index as usize) = Some(blob); - Ok(None) - } - None => { - let blobs = std::mem::take(&mut self.blob_download_queue); - Ok(Some(blobs)) - } - } - } - fn get_parent_root(verified_response: &FixedBlobSidecarList) -> Option { verified_response .into_iter() diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index a5826bcb3d8..fa2683fb0f0 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,6 +1,6 @@ -use self::parent_lookup::ParentVerifyError; use self::single_block_lookup::SingleBlockLookup; use super::manager::BlockProcessingResult; +use super::network_context::{LookupFailure, LookupVerifyError}; use super::BatchProcessResult; use super::{manager::BlockProcessType, network_context::SyncNetworkContext}; use crate::metrics; @@ -21,7 +21,6 @@ pub use common::Lookup; pub use common::Parent; pub use common::RequestState; use fnv::FnvHashMap; -use lighthouse_network::rpc::RPCError; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; pub use single_block_lookup::{BlobRequestState, BlockRequestState}; @@ -133,7 +132,7 @@ impl BlockLookups { pub fn trigger_single_lookup( &mut self, mut single_block_lookup: SingleBlockLookup, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, ) { let block_root = single_block_lookup.block_root(); match single_block_lookup.request_block_and_blobs(cx) { @@ -319,40 +318,41 @@ impl BlockLookups { &mut self, lookup_id: SingleLookupReqId, peer_id: PeerId, - response: Option, + response: R::VerifiedResponseType, seen_timestamp: Duration, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, ) { let id = lookup_id.id; let response_type = R::response_type(); - let Some(lookup) = self.get_single_lookup::(lookup_id) else { - if response.is_some() { - // We don't have the ability to cancel in-flight RPC requests. So this can happen - // if we started this RPC request, and later saw the block/blobs via gossip. - debug!( - self.log, - "Block returned for single block lookup not present"; - "response_type" => ?response_type, - ); - } + let Some(mut lookup) = self.get_single_lookup::(lookup_id) else { + // We don't have the ability to cancel in-flight RPC requests. So this can happen + // if we started this RPC request, and later saw the block/blobs via gossip. + debug!( + self.log, + "Block returned for single block lookup not present"; + "response_type" => ?response_type, + ); return; }; let expected_block_root = lookup.block_root(); - if response.is_some() { - debug!(self.log, - "Peer returned response for single lookup"; - "peer_id" => %peer_id , - "id" => ?id, - "block_root" => ?expected_block_root, - "response_type" => ?response_type, - ); - } + debug!(self.log, + "Peer returned response for single lookup"; + "peer_id" => %peer_id , + "id" => ?id, + "block_root" => ?expected_block_root, + "response_type" => ?response_type, + ); - match self.single_lookup_response_inner::(peer_id, response, seen_timestamp, cx, lookup) - { - Ok(lookup) => { + match self.handle_verified_response::( + seen_timestamp, + cx, + BlockProcessType::SingleBlock { id: lookup.id }, + response, + &mut lookup, + ) { + Ok(_) => { self.single_block_lookups.insert(id, lookup); } Err(e) => { @@ -372,53 +372,10 @@ impl BlockLookups { /// Consolidates error handling for `single_lookup_response`. An `Err` here should always mean /// the lookup is dropped. - fn single_lookup_response_inner>( - &self, - peer_id: PeerId, - response: Option, - seen_timestamp: Duration, - cx: &SyncNetworkContext, - mut lookup: SingleBlockLookup, - ) -> Result, LookupRequestError> { - let response_type = R::response_type(); - let log = self.log.clone(); - let expected_block_root = lookup.block_root(); - let request_state = R::request_state_mut(&mut lookup); - - match request_state.verify_response(expected_block_root, peer_id, response) { - Ok(Some(verified_response)) => { - self.handle_verified_response::( - seen_timestamp, - cx, - BlockProcessType::SingleBlock { id: lookup.id }, - verified_response, - &mut lookup, - )?; - } - Ok(None) => {} - Err(e) => { - debug!( - log, - "Single lookup response verification failed, retrying"; - "block_root" => ?expected_block_root, - "peer_id" => %peer_id, - "response_type" => ?response_type, - "error" => ?e - ); - let msg = e.into(); - cx.report_peer(peer_id, PeerAction::LowToleranceError, msg); - - request_state.register_failure_downloading(); - lookup.request_block_and_blobs(cx)?; - } - } - Ok(lookup) - } - fn handle_verified_response>( &self, seen_timestamp: Duration, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, process_type: BlockProcessType, verified_response: R::VerifiedResponseType, lookup: &mut SingleBlockLookup, @@ -438,6 +395,10 @@ impl BlockLookups { }; if !delay_send { + R::request_state_mut(lookup) + .get_state_mut() + .on_download_success() + .map_err(LookupRequestError::BadState)?; self.send_block_for_processing( block_root, block, @@ -448,6 +409,10 @@ impl BlockLookups { } } CachedChild::DownloadIncomplete => { + R::request_state_mut(lookup) + .get_state_mut() + .on_download_success() + .map_err(LookupRequestError::BadState)?; // If this was the result of a block request, we can't determine if the block peer // did anything wrong. If we already had both a block and blobs response processed, // we should penalize the blobs peer because they did not provide all blobs on the @@ -458,14 +423,21 @@ impl BlockLookups { } lookup.request_block_and_blobs(cx)?; } - CachedChild::NotRequired => R::send_reconstructed_for_processing( - id, - self, - block_root, - R::verified_to_reconstructed(block_root, verified_response), - seen_timestamp, - cx, - )?, + CachedChild::NotRequired => { + R::request_state_mut(lookup) + .get_state_mut() + .on_download_success() + .map_err(LookupRequestError::BadState)?; + + R::send_reconstructed_for_processing( + id, + self, + block_root, + R::verified_to_reconstructed(block_root, verified_response), + seen_timestamp, + cx, + )? + } CachedChild::Err(e) => { warn!(self.log, "Consistency error in cached block"; "error" => ?e, @@ -511,26 +483,22 @@ impl BlockLookups { &mut self, id: SingleLookupReqId, peer_id: PeerId, - response: Option, + response: R::VerifiedResponseType, seen_timestamp: Duration, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, ) { let Some(mut parent_lookup) = self.get_parent_lookup::(id) else { - if response.is_some() { - debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id); - } + debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id); return; }; - if response.is_some() { - debug!(self.log, - "Peer returned response for parent lookup"; - "peer_id" => %peer_id , - "id" => ?id, - "block_root" => ?parent_lookup.current_parent_request.block_request_state.requested_block_root, - "response_type" => ?R::response_type(), - ); - } + debug!(self.log, + "Peer returned response for parent lookup"; + "peer_id" => %peer_id , + "id" => ?id, + "block_root" => ?parent_lookup.current_parent_request.block_request_state.requested_block_root, + "response_type" => ?R::response_type(), + ); match self.parent_lookup_response_inner::( peer_id, @@ -558,59 +526,17 @@ impl BlockLookups { fn parent_lookup_response_inner>( &mut self, peer_id: PeerId, - response: Option, + response: R::VerifiedResponseType, seen_timestamp: Duration, - cx: &SyncNetworkContext, - parent_lookup: &mut ParentLookup, - ) -> Result<(), RequestError> { - match parent_lookup.verify_response::(peer_id, response, &mut self.failed_chains) { - Ok(Some(verified_response)) => { - self.handle_verified_response::( - seen_timestamp, - cx, - BlockProcessType::ParentLookup { - chain_hash: parent_lookup.chain_hash(), - }, - verified_response, - &mut parent_lookup.current_parent_request, - )?; - } - Ok(None) => {} - Err(e) => self.handle_parent_verify_error::(peer_id, parent_lookup, e, cx)?, - }; - Ok(()) - } - - /// Handle logging and peer scoring for `ParentVerifyError`s during parent lookup requests. - fn handle_parent_verify_error>( - &mut self, - peer_id: PeerId, + cx: &mut SyncNetworkContext, parent_lookup: &mut ParentLookup, - e: ParentVerifyError, - cx: &SyncNetworkContext, ) -> Result<(), RequestError> { - match e { - ParentVerifyError::RootMismatch - | ParentVerifyError::NoBlockReturned - | ParentVerifyError::NotEnoughBlobsReturned - | ParentVerifyError::ExtraBlocksReturned - | ParentVerifyError::UnrequestedBlobId(_) - | ParentVerifyError::InvalidInclusionProof - | ParentVerifyError::UnrequestedHeader - | ParentVerifyError::ExtraBlobsReturned - | ParentVerifyError::InvalidIndex(_) => { - let e = e.into(); - warn!(self.log, "Peer sent invalid response to parent request"; - "peer_id" => %peer_id, "reason" => %e); - - // We do not tolerate these kinds of errors. We will accept a few but these are signs - // of a faulty peer. - cx.report_peer(peer_id, PeerAction::LowToleranceError, e); - - // We try again if possible. - parent_lookup.request_parent(cx)?; - } - ParentVerifyError::PreviousFailure { parent_root } => { + // check if the parent of this block isn't in the failed cache. If it is, this chain should + // be dropped and the peer downscored. + if let Some(parent_root) = R::get_parent_root(&response) { + if self.failed_chains.contains(&parent_root) { + let request_state = R::request_state_mut(&mut parent_lookup.current_parent_request); + request_state.register_failure_downloading(); debug!( self.log, "Parent chain ignored due to past failure"; @@ -624,8 +550,20 @@ impl BlockLookups { PeerAction::MidToleranceError, "bbroot_failed_chains", ); + return Ok(()); } } + + self.handle_verified_response::( + seen_timestamp, + cx, + BlockProcessType::ParentLookup { + chain_hash: parent_lookup.chain_hash(), + }, + response, + &mut parent_lookup.current_parent_request, + )?; + Ok(()) } @@ -665,8 +603,8 @@ impl BlockLookups { // This happens if the peer disconnects while the block is being // processed. Drop the request without extra penalty } - RequestError::BadState(_) => { - // Should never happen + RequestError::BadState(..) => { + warn!(self.log, "Failed to request parent"; "error" => e.as_static()); } } } @@ -675,12 +613,9 @@ impl BlockLookups { pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { /* Check disconnection for single lookups */ - self.single_block_lookups.retain(|id, req| { + self.single_block_lookups.retain(|_, req| { let should_drop_lookup = req.should_drop_lookup_on_disconnected_peer(peer_id, cx, &self.log); - if should_drop_lookup { - debug!(self.log, "Dropping lookup after peer disconnected"; "id" => id, "block_root" => %req.block_root()); - } !should_drop_lookup }); @@ -702,21 +637,28 @@ impl BlockLookups { &mut self, id: SingleLookupReqId, peer_id: &PeerId, - cx: &SyncNetworkContext, - error: RPCError, + cx: &mut SyncNetworkContext, + error: LookupFailure, ) { - let msg = error.as_static_str(); + // Only downscore lookup verify errors. RPC errors are downscored in the network handler. + if let LookupFailure::LookupVerifyError(e) = &error { + // Downscore peer even if lookup is not known + self.downscore_on_rpc_error(peer_id, e, cx); + } + let Some(mut parent_lookup) = self.get_parent_lookup::(id) else { debug!(self.log, "RPC failure for a block parent lookup request that was not found"; "peer_id" => %peer_id, - "error" => msg + "error" => %error ); return; }; R::request_state_mut(&mut parent_lookup.current_parent_request) .register_failure_downloading(); - trace!(self.log, "Parent lookup block request failed"; &parent_lookup, "error" => msg); + debug!(self.log, "Parent lookup block request failed"; + "chain_hash" => %parent_lookup.chain_hash(), "id" => ?id, "error" => %error + ); self.request_parent(parent_lookup, cx); @@ -731,13 +673,18 @@ impl BlockLookups { &mut self, id: SingleLookupReqId, peer_id: &PeerId, - cx: &SyncNetworkContext, - error: RPCError, + cx: &mut SyncNetworkContext, + error: LookupFailure, ) { - let msg = error.as_static_str(); + // Only downscore lookup verify errors. RPC errors are downscored in the network handler. + if let LookupFailure::LookupVerifyError(e) = &error { + // Downscore peer even if lookup is not known + self.downscore_on_rpc_error(peer_id, e, cx); + } + let log = self.log.clone(); let Some(mut lookup) = self.get_single_lookup::(id) else { - debug!(log, "Error response to dropped lookup"; "error" => ?error); + debug!(log, "Error response to dropped lookup"; "error" => %error); return; }; let block_root = lookup.block_root(); @@ -746,7 +693,7 @@ impl BlockLookups { trace!(log, "Single lookup failed"; "block_root" => ?block_root, - "error" => msg, + "error" => %error, "peer_id" => %peer_id, "response_type" => ?response_type ); @@ -758,7 +705,8 @@ impl BlockLookups { "error" => ?e, "block_root" => ?block_root, ); - self.single_block_lookups.remove(&id); + } else { + self.single_block_lookups.insert(id, lookup); } metrics::set_gauge( @@ -1006,20 +954,21 @@ impl BlockLookups { } BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown(_)) => { + let (chain_hash, blocks, hashes, block_request) = + parent_lookup.parts_for_processing(); + + let blocks = self.add_child_block_to_chain(chain_hash, blocks, cx).into(); + + let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); + // Check if the beacon processor is available let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { return trace!( self.log, "Dropping parent chain segment that was ready for processing."; - parent_lookup + "chain_hash" => %chain_hash, ); }; - let (chain_hash, blocks, hashes, block_request) = - parent_lookup.parts_for_processing(); - - let blocks = self.add_child_block_to_chain(chain_hash, blocks, cx).into(); - - let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); match beacon_processor.send_chain_segment(process_id, blocks) { Ok(_) => { @@ -1073,7 +1022,7 @@ impl BlockLookups { &mut self, chain_hash: Hash256, mut blocks: VecDeque>, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, ) -> VecDeque> { // Find the child block that spawned the parent lookup request and add it to the chain // to send for processing. @@ -1126,12 +1075,16 @@ impl BlockLookups { fn handle_parent_block_error( &mut self, outcome: BlockError<::EthSpec>, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, mut parent_lookup: ParentLookup, ) { // We should always have a block peer. - let Ok(block_peer_id) = parent_lookup.block_processing_peer() else { - return; + let block_peer_id = match parent_lookup.block_processing_peer() { + Ok(peer_id) => peer_id, + Err(e) => { + warn!(self.log, "Parent lookup in bad state"; "chain_hash" => %parent_lookup.chain_hash(), "error" => e); + return; + } }; // We may not have a blob peer, if there were no blobs required for this block. @@ -1178,7 +1131,7 @@ impl BlockLookups { &mut self, chain_hash: Hash256, result: BatchProcessResult, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, ) { let Some((_hashes, request)) = self.processing_parent_lookups.remove(&chain_hash) else { return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result); @@ -1339,7 +1292,11 @@ impl BlockLookups { /// Attempts to request the next unknown parent. This method handles peer scoring and dropping /// the lookup in the event of failure. - fn request_parent(&mut self, mut parent_lookup: ParentLookup, cx: &SyncNetworkContext) { + fn request_parent( + &mut self, + mut parent_lookup: ParentLookup, + cx: &mut SyncNetworkContext, + ) { let response = parent_lookup.request_parent(cx); match response { @@ -1368,6 +1325,20 @@ impl BlockLookups { self.parent_lookups.drain(..).len() } + pub fn downscore_on_rpc_error( + &self, + peer_id: &PeerId, + error: &LookupVerifyError, + cx: &SyncNetworkContext, + ) { + // Note: logging the report event here with the full error display. The log inside + // `report_peer` only includes a smaller string, like "invalid_data" + let error_str: &'static str = error.into(); + + debug!(self.log, "reporting peer for sync lookup error"; "error" => error_str); + cx.report_peer(*peer_id, PeerAction::LowToleranceError, error_str); + } + pub fn update_metrics(&self) { metrics::set_gauge( &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 55dd26b661e..b7a71860bff 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,7 +1,6 @@ -use super::single_block_lookup::{LookupRequestError, LookupVerifyError, SingleBlockLookup}; +use super::single_block_lookup::{LookupRequestError, SingleBlockLookup}; use super::{DownloadedBlock, PeerId}; use crate::sync::block_lookups::common::Parent; -use crate::sync::block_lookups::common::RequestState; use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::block_verification_types::RpcBlock; @@ -10,8 +9,6 @@ use beacon_chain::BeaconChainTypes; use std::collections::VecDeque; use std::sync::Arc; use store::Hash256; -use strum::IntoStaticStr; -use types::blob_sidecar::BlobIdentifier; /// How many attempts we try to find a parent of a block before we give up trying. pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; @@ -30,22 +27,8 @@ pub(crate) struct ParentLookup { pub current_parent_request: SingleBlockLookup, } -#[derive(Debug, PartialEq, Eq, IntoStaticStr)] -pub enum ParentVerifyError { - RootMismatch, - NoBlockReturned, - NotEnoughBlobsReturned, - ExtraBlocksReturned, - UnrequestedBlobId(BlobIdentifier), - InvalidInclusionProof, - UnrequestedHeader, - ExtraBlobsReturned, - InvalidIndex(u64), - PreviousFailure { parent_root: Hash256 }, -} - #[derive(Debug, PartialEq, Eq)] -pub enum RequestError { +pub(crate) enum RequestError { SendFailed(&'static str), ChainTooLong, /// We witnessed too many failures trying to complete this parent lookup. @@ -92,7 +75,7 @@ impl ParentLookup { } /// Attempts to request the next unknown parent. If the request fails, it should be removed. - pub fn request_parent(&mut self, cx: &SyncNetworkContext) -> Result<(), RequestError> { + pub fn request_parent(&mut self, cx: &mut SyncNetworkContext) -> Result<(), RequestError> { // check to make sure this request hasn't failed if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE { return Err(RequestError::ChainTooLong); @@ -186,34 +169,6 @@ impl ParentLookup { } } - /// Verifies that the received block is what we requested. If so, parent lookup now waits for - /// the processing result of the block. - pub fn verify_response>( - &mut self, - peer_id: PeerId, - block: Option, - failed_chains: &mut lru_cache::LRUTimeCache, - ) -> Result, ParentVerifyError> { - let expected_block_root = self.current_parent_request.block_root(); - let request_state = R::request_state_mut(&mut self.current_parent_request); - let root_and_verified = - request_state.verify_response(expected_block_root, peer_id, block)?; - - // check if the parent of this block isn't in the failed cache. If it is, this chain should - // be dropped and the peer downscored. - if let Some(parent_root) = root_and_verified - .as_ref() - .and_then(|block| R::get_parent_root(block)) - { - if failed_chains.contains(&parent_root) { - request_state.register_failure_downloading(); - return Err(ParentVerifyError::PreviousFailure { parent_root }); - } - } - - Ok(root_and_verified) - } - pub fn add_peer(&mut self, peer: PeerId) { self.current_parent_request.add_peer(peer) } @@ -228,23 +183,6 @@ impl ParentLookup { } } -impl From for ParentVerifyError { - fn from(e: LookupVerifyError) -> Self { - use LookupVerifyError as E; - match e { - E::RootMismatch => ParentVerifyError::RootMismatch, - E::NoBlockReturned => ParentVerifyError::NoBlockReturned, - E::ExtraBlocksReturned => ParentVerifyError::ExtraBlocksReturned, - E::UnrequestedBlobId(blob_id) => ParentVerifyError::UnrequestedBlobId(blob_id), - E::InvalidInclusionProof => ParentVerifyError::InvalidInclusionProof, - E::UnrequestedHeader => ParentVerifyError::UnrequestedHeader, - E::ExtraBlobsReturned => ParentVerifyError::ExtraBlobsReturned, - E::InvalidIndex(index) => ParentVerifyError::InvalidIndex(index), - E::NotEnoughBlobsReturned => ParentVerifyError::NotEnoughBlobsReturned, - } - } -} - impl From for RequestError { fn from(e: LookupRequestError) -> Self { use LookupRequestError as E; @@ -282,7 +220,7 @@ impl RequestError { } RequestError::TooManyAttempts { cannot_process: _ } => "too_many_downloading_attempts", RequestError::NoPeers => "no_peers", - RequestError::BadState(_) => "bad_state", + RequestError::BadState(..) => "bad_state", } } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 15d10c77c24..5bb663967d7 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -11,29 +11,16 @@ use beacon_chain::BeaconChainTypes; use itertools::Itertools; use lighthouse_network::PeerAction; use rand::seq::IteratorRandom; -use slog::{trace, Logger}; +use slog::{debug, Logger}; use std::collections::HashSet; use std::fmt::Debug; use std::marker::PhantomData; use std::sync::Arc; use store::Hash256; use strum::IntoStaticStr; -use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; +use types::blob_sidecar::FixedBlobSidecarList; use types::EthSpec; -#[derive(Debug, PartialEq, Eq, IntoStaticStr)] -pub enum LookupVerifyError { - RootMismatch, - NoBlockReturned, - ExtraBlocksReturned, - UnrequestedBlobId(BlobIdentifier), - InvalidInclusionProof, - UnrequestedHeader, - ExtraBlobsReturned, - NotEnoughBlobsReturned, - InvalidIndex(u64), -} - #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupRequestError { /// Too many failed attempts @@ -88,6 +75,7 @@ impl SingleBlockLookup { /// the next parent. pub fn update_requested_parent_block(&mut self, block_root: Hash256) { self.block_request_state.requested_block_root = block_root; + self.blob_request_state.block_root = block_root; self.block_request_state.state.state = State::AwaitingDownload; self.blob_request_state.state.state = State::AwaitingDownload; self.child_components = Some(ChildComponents::empty(block_root)); @@ -108,7 +96,7 @@ impl SingleBlockLookup { /// downloading the block and/or blobs. pub fn request_block_and_blobs( &mut self, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, ) -> Result<(), LookupRequestError> { let block_already_downloaded = self.block_already_downloaded(); let blobs_already_downloaded = self.blobs_already_downloaded(); @@ -216,7 +204,7 @@ impl SingleBlockLookup { pub fn should_drop_lookup_on_disconnected_peer( &mut self, peer_id: &PeerId, - cx: &SyncNetworkContext, + cx: &mut SyncNetworkContext, log: &Logger, ) -> bool { let block_root = self.block_root(); @@ -233,7 +221,7 @@ impl SingleBlockLookup { if block_peer_disconnected || blob_peer_disconnected { if let Err(e) = self.request_block_and_blobs(cx) { - trace!(log, "Single lookup failed on peer disconnection"; "block_root" => ?block_root, "error" => ?e); + debug!(log, "Single lookup failed on peer disconnection"; "block_root" => ?block_root, "error" => ?e); return true; } } @@ -318,6 +306,7 @@ pub struct BlobRequestState { /// from both block/blobs downloaded in the network layer and any blocks/blobs that exist in /// the data availability checker. pub requested_ids: MissingBlobs, + pub block_root: Hash256, /// Where we store blobs until we receive the stream terminator. pub blob_download_queue: FixedBlobSidecarList, pub state: SingleLookupRequestState, @@ -328,6 +317,7 @@ impl BlobRequestState { pub fn new(block_root: Hash256, peer_source: &[PeerId], is_deneb: bool) -> Self { let default_ids = MissingBlobs::new_without_block(block_root, is_deneb); Self { + block_root, requested_ids: default_ids, blob_download_queue: <_>::default(), state: SingleLookupRequestState::new(peer_source), @@ -416,11 +406,6 @@ impl SingleLookupRequestState { } } - // TODO: Should not leak the enum state - pub fn get_state(&self) -> &State { - &self.state - } - pub fn is_current_req_counter(&self, req_counter: u32) -> bool { self.req_counter == req_counter } @@ -460,8 +445,16 @@ impl SingleLookupRequestState { self.state = State::AwaitingDownload; } - pub fn on_download_success(&mut self, peer_id: PeerId) { - self.state = State::Processing { peer_id }; + pub fn on_download_success(&mut self) -> Result<(), String> { + match &self.state { + State::Downloading { peer_id } => { + self.state = State::Processing { peer_id: *peer_id }; + Ok(()) + } + other => Err(format!( + "request bad state, expected downloading got {other}" + )), + } } /// Registers a failure in processing a block. @@ -669,19 +662,9 @@ mod tests { ); as RequestState>::build_request( &mut sl.block_request_state, - &spec, ) .unwrap(); sl.block_request_state.state.state = State::Downloading { peer_id }; - - as RequestState>::verify_response( - &mut sl.block_request_state, - block.canonical_root(), - peer_id, - Some(block.into()), - ) - .unwrap() - .unwrap(); } #[test] @@ -714,7 +697,6 @@ mod tests { for _ in 1..TestLookup2::MAX_ATTEMPTS { as RequestState>::build_request( &mut sl.block_request_state, - &spec, ) .unwrap(); sl.block_request_state.state.on_download_failure(); @@ -723,30 +705,20 @@ mod tests { // Now we receive the block and send it for processing as RequestState>::build_request( &mut sl.block_request_state, - &spec, ) .unwrap(); sl.block_request_state.state.state = State::Downloading { peer_id }; - as RequestState>::verify_response( - &mut sl.block_request_state, - block.canonical_root(), - peer_id, - Some(block.into()), - ) - .unwrap() - .unwrap(); - // One processing failure maxes the available attempts sl.block_request_state.state.on_processing_failure(); assert_eq!( as RequestState>::build_request( &mut sl.block_request_state, - &spec - ), - Err(LookupRequestError::TooManyAttempts { + ) + .unwrap_err(), + LookupRequestError::TooManyAttempts { cannot_process: false - }) + } ) } } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 6d50fe63200..8e3b35ee5d3 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -15,9 +15,10 @@ use beacon_chain::test_utils::{ build_log, generate_rand_block_and_blobs, BeaconChainHarness, EphemeralHarnessType, NumBlobs, }; use beacon_processor::WorkEvent; -use lighthouse_network::rpc::RPCResponseErrorCode; +use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; use lighthouse_network::types::SyncState; use lighthouse_network::{NetworkGlobals, Request}; +use slog::info; use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; use store::MemoryStore; use tokio::sync::mpsc; @@ -67,6 +68,7 @@ struct TestRig { /// `rng` for generating test blocks and blobs. rng: XorShiftRng, fork_name: ForkName, + log: Logger, } const D: Duration = Duration::new(0, 0); @@ -124,6 +126,7 @@ impl TestRig { log.clone(), ), fork_name, + log, } } @@ -136,6 +139,10 @@ impl TestRig { } } + fn log(&self, msg: &str) { + info!(self.log, "TEST_RIG"; "msg" => msg); + } + fn after_deneb(&self) -> bool { matches!(self.fork_name, ForkName::Deneb | ForkName::Electra) } @@ -914,7 +921,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - // Fail downloading the block + rig.log("Fail downloading the block"); for i in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { let id = rig.expect_block_parent_request(parent_root); // Blobs are only requested in the first iteration as this test only retries blocks @@ -925,7 +932,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { rig.parent_lookup_failed_unavailable(id, peer_id); } - // Now fail processing a block in the parent request + rig.log("Now fail processing a block in the parent request"); for i in 0..PROCESSING_FAILURES { let id = rig.expect_block_parent_request(parent_root); // Blobs are only requested in the first iteration as this test only retries blocks diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a868a092d3d..23bd1010bfe 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -36,7 +36,7 @@ use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; use super::block_lookups::common::LookupType; use super::block_lookups::BlockLookups; -use super::network_context::{BlockOrBlob, RangeRequestId, SyncNetworkContext}; +use super::network_context::{BlockOrBlob, RangeRequestId, RpcEvent, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; @@ -320,42 +320,12 @@ impl SyncManager { fn inject_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { trace!(self.log, "Sync manager received a failed RPC"); match request_id { - RequestId::SingleBlock { id } => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_block_lookup_failed::>( - id, - &peer_id, - &self.network, - error, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_failed::>( - id, - &peer_id, - &self.network, - error, - ), - }, - RequestId::SingleBlob { id } => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_block_lookup_failed::>( - id, - &peer_id, - &self.network, - error, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_failed::>( - id, - &peer_id, - &self.network, - error, - ), - }, + RequestId::SingleBlock { id } => { + self.on_single_block_response(id, peer_id, RpcEvent::RPCError(error)) + } + RequestId::SingleBlob { id } => { + self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) + } RequestId::RangeBlockAndBlobs { id } => { if let Some(sender_id) = self.network.range_request_failed(id) { match sender_id { @@ -694,7 +664,7 @@ impl SyncManager { } ChainSegmentProcessId::ParentLookup(chain_hash) => self .block_lookups - .parent_chain_processed(chain_hash, result, &self.network), + .parent_chain_processed(chain_hash, result, &mut self.network), }, } } @@ -836,26 +806,14 @@ impl SyncManager { seen_timestamp: Duration, ) { match request_id { - RequestId::SingleBlock { id } => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_lookup_response::>( - id, - peer_id, - block, - seen_timestamp, - &self.network, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_response::>( - id, - peer_id, - block, - seen_timestamp, - &self.network, - ), - }, + RequestId::SingleBlock { id } => self.on_single_block_response( + id, + peer_id, + match block { + Some(block) => RpcEvent::Response(block, seen_timestamp), + None => RpcEvent::StreamTermination, + }, + ), RequestId::SingleBlob { .. } => { crit!(self.log, "Block received during blob request"; "peer_id" => %peer_id ); } @@ -865,6 +823,56 @@ impl SyncManager { } } + fn on_single_block_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + block: RpcEvent>>, + ) { + if let Some(resp) = self.network.on_single_block_response(id, block) { + match resp { + Ok((block, seen_timestamp)) => match id.lookup_type { + LookupType::Current => self + .block_lookups + .single_lookup_response::>( + id, + peer_id, + block, + seen_timestamp, + &mut self.network, + ), + LookupType::Parent => self + .block_lookups + .parent_lookup_response::>( + id, + peer_id, + block, + seen_timestamp, + &mut self.network, + ), + }, + Err(error) => match id.lookup_type { + LookupType::Current => self + .block_lookups + .single_block_lookup_failed::>( + id, + &peer_id, + &mut self.network, + error, + ), + LookupType::Parent => self + .block_lookups + .parent_lookup_failed::>( + id, + &peer_id, + &mut self.network, + error, + ), + }, + } + } + } + fn rpc_blob_received( &mut self, request_id: RequestId, @@ -876,32 +884,71 @@ impl SyncManager { RequestId::SingleBlock { .. } => { crit!(self.log, "Single blob received during block request"; "peer_id" => %peer_id ); } - RequestId::SingleBlob { id } => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_lookup_response::>( - id, - peer_id, - blob, - seen_timestamp, - &self.network, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_response::>( - id, - peer_id, - blob, - seen_timestamp, - &self.network, - ), - }, + RequestId::SingleBlob { id } => self.on_single_blob_response( + id, + peer_id, + match blob { + Some(blob) => RpcEvent::Response(blob, seen_timestamp), + None => RpcEvent::StreamTermination, + }, + ), RequestId::RangeBlockAndBlobs { id } => { self.range_block_and_blobs_response(id, peer_id, blob.into()) } } } + fn on_single_blob_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + blob: RpcEvent>>, + ) { + if let Some(resp) = self.network.on_single_blob_response(id, blob) { + match resp { + Ok((blobs, seen_timestamp)) => match id.lookup_type { + LookupType::Current => self + .block_lookups + .single_lookup_response::>( + id, + peer_id, + blobs, + seen_timestamp, + &mut self.network, + ), + LookupType::Parent => self + .block_lookups + .parent_lookup_response::>( + id, + peer_id, + blobs, + seen_timestamp, + &mut self.network, + ), + }, + + Err(error) => match id.lookup_type { + LookupType::Current => self + .block_lookups + .single_block_lookup_failed::>( + id, + &peer_id, + &mut self.network, + error, + ), + LookupType::Parent => self + .block_lookups + .parent_lookup_failed::>( + id, + &peer_id, + &mut self.network, + error, + ), + }, + } + } + } + /// Handles receiving a response for a range sync request that should have both blocks and /// blobs. fn range_block_and_blobs_response( diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 96f8de46fb7..fc91270c1dc 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1,6 +1,8 @@ //! Provides network functionality for the Syncing thread. This fundamentally wraps a network //! channel and stores a global RPC ID to perform requests. +use self::requests::{ActiveBlobsByRootRequest, ActiveBlocksByRootRequest}; +pub use self::requests::{BlobsByRootSingleBlockRequest, BlocksByRootSingleRequest}; use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use super::manager::{Id, RequestId as SyncRequestId}; use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; @@ -9,17 +11,23 @@ use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; use crate::sync::manager::SingleLookupReqId; use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; -use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason}; +use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; +pub use requests::LookupVerifyError; use slog::{debug, trace, warn}; use std::collections::hash_map::Entry; use std::sync::Arc; +use std::time::Duration; use tokio::sync::mpsc; +use types::blob_sidecar::FixedBlobSidecarList; use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; +mod requests; + pub struct BlocksAndBlobsByRangeResponse { pub sender_id: RangeRequestId, pub responses: Result>, String>, @@ -37,6 +45,41 @@ pub enum RangeRequestId { }, } +#[derive(Debug)] +pub enum RpcEvent { + StreamTermination, + Response(T, Duration), + RPCError(RPCError), +} + +pub type RpcProcessingResult = Option>; + +pub enum LookupFailure { + RpcError(RPCError), + LookupVerifyError(LookupVerifyError), +} + +impl std::fmt::Display for LookupFailure { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + LookupFailure::RpcError(e) => write!(f, "RPC Error: {:?}", e), + LookupFailure::LookupVerifyError(e) => write!(f, "Lookup Verify Error: {:?}", e), + } + } +} + +impl From for LookupFailure { + fn from(e: RPCError) -> Self { + LookupFailure::RpcError(e) + } +} + +impl From for LookupFailure { + fn from(e: LookupVerifyError) -> Self { + LookupFailure::LookupVerifyError(e) + } +} + /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. pub struct SyncNetworkContext { /// The network channel to relay messages to the Network service. @@ -45,6 +88,12 @@ pub struct SyncNetworkContext { /// A sequential ID for all RPC requests. request_id: Id, + /// A mapping of active BlocksByRoot requests, including both current slot and parent lookups. + blocks_by_root_requests: FnvHashMap, + + /// A mapping of active BlobsByRoot requests, including both current slot and parent lookups. + blobs_by_root_requests: FnvHashMap>, + /// BlocksByRange requests paired with BlobsByRange range_blocks_and_blobs_requests: FnvHashMap)>, @@ -91,6 +140,8 @@ impl SyncNetworkContext { network_send, execution_engine_state: EngineState::Online, // always assume `Online` at the start request_id: 1, + blocks_by_root_requests: <_>::default(), + blobs_by_root_requests: <_>::default(), range_blocks_and_blobs_requests: FnvHashMap::default(), network_beacon_processor, chain, @@ -245,62 +296,57 @@ impl SyncNetworkContext { } pub fn block_lookup_request( - &self, + &mut self, id: SingleLookupReqId, peer_id: PeerId, - request: BlocksByRootRequest, + request: BlocksByRootSingleRequest, ) -> Result<(), &'static str> { debug!( self.log, "Sending BlocksByRoot Request"; "method" => "BlocksByRoot", - "block_roots" => ?request.block_roots().to_vec(), + "block_root" => ?request.0, "peer" => %peer_id, "id" => ?id ); self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request: Request::BlocksByRoot(request), + request: Request::BlocksByRoot(request.into_request(&self.chain.spec)), request_id: RequestId::Sync(SyncRequestId::SingleBlock { id }), })?; + + self.blocks_by_root_requests + .insert(id, ActiveBlocksByRootRequest::new(request)); + Ok(()) } pub fn blob_lookup_request( - &self, + &mut self, id: SingleLookupReqId, - blob_peer_id: PeerId, - blob_request: BlobsByRootRequest, + peer_id: PeerId, + request: BlobsByRootSingleBlockRequest, ) -> Result<(), &'static str> { - if let Some(block_root) = blob_request - .blob_ids - .as_slice() - .first() - .map(|id| id.block_root) - { - let indices = blob_request - .blob_ids - .as_slice() - .iter() - .map(|id| id.index) - .collect::>(); - debug!( - self.log, - "Sending BlobsByRoot Request"; - "method" => "BlobsByRoot", - "block_root" => ?block_root, - "blob_indices" => ?indices, - "peer" => %blob_peer_id, - "id" => ?id - ); + debug!( + self.log, + "Sending BlobsByRoot Request"; + "method" => "BlobsByRoot", + "block_root" => ?request.block_root, + "blob_indices" => ?request.indices, + "peer" => %peer_id, + "id" => ?id + ); + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: Request::BlobsByRoot(request.clone().into_request(&self.chain.spec)), + request_id: RequestId::Sync(SyncRequestId::SingleBlob { id }), + })?; + + self.blobs_by_root_requests + .insert(id, ActiveBlobsByRootRequest::new(request)); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id: blob_peer_id, - request: Request::BlobsByRoot(blob_request), - request_id: RequestId::Sync(SyncRequestId::SingleBlob { id }), - })?; - } Ok(()) } @@ -329,7 +375,7 @@ impl SyncNetworkContext { /// Reports to the scoring algorithm the behaviour of a peer. pub fn report_peer(&self, peer_id: PeerId, action: PeerAction, msg: &'static str) { - debug!(self.log, "Sync reporting peer"; "peer_id" => %peer_id, "action" => %action); + debug!(self.log, "Sync reporting peer"; "peer_id" => %peer_id, "action" => %action, "msg" => %msg); self.network_send .send(NetworkMessage::ReportPeer { peer_id, @@ -405,4 +451,86 @@ impl SyncNetworkContext { self.range_blocks_and_blobs_requests .insert(id, (sender_id, info)); } + + // Request handlers + + pub fn on_single_block_response( + &mut self, + request_id: SingleLookupReqId, + block: RpcEvent>>, + ) -> RpcProcessingResult>> { + let Entry::Occupied(mut request) = self.blocks_by_root_requests.entry(request_id) else { + return None; + }; + + Some(match block { + RpcEvent::Response(block, seen_timestamp) => { + match request.get_mut().add_response(block) { + Ok(block) => Ok((block, seen_timestamp)), + Err(e) => { + // The request must be dropped after receiving an error. + request.remove(); + Err(e.into()) + } + } + } + RpcEvent::StreamTermination => match request.remove().terminate() { + Ok(_) => return None, + Err(e) => Err(e.into()), + }, + RpcEvent::RPCError(e) => { + request.remove(); + Err(e.into()) + } + }) + } + + pub fn on_single_blob_response( + &mut self, + request_id: SingleLookupReqId, + blob: RpcEvent>>, + ) -> RpcProcessingResult> { + let Entry::Occupied(mut request) = self.blobs_by_root_requests.entry(request_id) else { + return None; + }; + + Some(match blob { + RpcEvent::Response(blob, _) => match request.get_mut().add_response(blob) { + Ok(Some(blobs)) => to_fixed_blob_sidecar_list(blobs) + .map(|blobs| (blobs, timestamp_now())) + .map_err(Into::into), + Ok(None) => return None, + Err(e) => { + request.remove(); + Err(e.into()) + } + }, + RpcEvent::StreamTermination => { + // Stream terminator + match request.remove().terminate() { + Some(blobs) => to_fixed_blob_sidecar_list(blobs) + .map(|blobs| (blobs, timestamp_now())) + .map_err(Into::into), + None => return None, + } + } + RpcEvent::RPCError(e) => { + request.remove(); + Err(e.into()) + } + }) + } +} + +fn to_fixed_blob_sidecar_list( + blobs: Vec>>, +) -> Result, LookupVerifyError> { + let mut fixed_list = FixedBlobSidecarList::default(); + for blob in blobs.into_iter() { + let index = blob.index as usize; + *fixed_list + .get_mut(index) + .ok_or(LookupVerifyError::UnrequestedBlobIndex(index as u64))? = Some(blob) + } + Ok(fixed_list) } diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs new file mode 100644 index 00000000000..0522b7fa384 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -0,0 +1,149 @@ +use beacon_chain::get_block_root; +use lighthouse_network::rpc::{methods::BlobsByRootRequest, BlocksByRootRequest}; +use std::sync::Arc; +use strum::IntoStaticStr; +use types::{ + blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, +}; + +#[derive(Debug, PartialEq, Eq, IntoStaticStr)] +pub enum LookupVerifyError { + NoResponseReturned, + TooManyResponses, + UnrequestedBlockRoot(Hash256), + UnrequestedBlobIndex(u64), + InvalidInclusionProof, + DuplicateData, +} + +pub struct ActiveBlocksByRootRequest { + request: BlocksByRootSingleRequest, + resolved: bool, +} + +impl ActiveBlocksByRootRequest { + pub fn new(request: BlocksByRootSingleRequest) -> Self { + Self { + request, + resolved: false, + } + } + + /// Append a response to the single chunk request. If the chunk is valid, the request is + /// resolved immediately. + /// The active request SHOULD be dropped after `add_response` returns an error + pub fn add_response( + &mut self, + block: Arc>, + ) -> Result>, LookupVerifyError> { + if self.resolved { + return Err(LookupVerifyError::TooManyResponses); + } + + let block_root = get_block_root(&block); + if self.request.0 != block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); + } + + // Valid data, blocks by root expects a single response + self.resolved = true; + Ok(block) + } + + pub fn terminate(self) -> Result<(), LookupVerifyError> { + if self.resolved { + Ok(()) + } else { + Err(LookupVerifyError::NoResponseReturned) + } + } +} + +#[derive(Debug, Copy, Clone)] +pub struct BlocksByRootSingleRequest(pub Hash256); + +impl BlocksByRootSingleRequest { + pub fn into_request(self, spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![self.0], spec) + } +} + +#[derive(Debug, Clone)] +pub struct BlobsByRootSingleBlockRequest { + pub block_root: Hash256, + pub indices: Vec, +} + +impl BlobsByRootSingleBlockRequest { + pub fn into_request(self, spec: &ChainSpec) -> BlobsByRootRequest { + BlobsByRootRequest::new( + self.indices + .into_iter() + .map(|index| BlobIdentifier { + block_root: self.block_root, + index, + }) + .collect(), + spec, + ) + } +} + +pub struct ActiveBlobsByRootRequest { + request: BlobsByRootSingleBlockRequest, + blobs: Vec>>, + resolved: bool, +} + +impl ActiveBlobsByRootRequest { + pub fn new(request: BlobsByRootSingleBlockRequest) -> Self { + Self { + request, + blobs: vec![], + resolved: false, + } + } + + /// Appends a chunk to this multi-item request. If all expected chunks are received, this + /// method returns `Some`, resolving the request before the stream terminator. + /// The active request SHOULD be dropped after `add_response` returns an error + pub fn add_response( + &mut self, + blob: Arc>, + ) -> Result>>>, LookupVerifyError> { + if self.resolved { + return Err(LookupVerifyError::TooManyResponses); + } + + let block_root = blob.block_root(); + if self.request.block_root != block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); + } + if !blob.verify_blob_sidecar_inclusion_proof().unwrap_or(false) { + return Err(LookupVerifyError::InvalidInclusionProof); + } + if !self.request.indices.contains(&blob.index) { + return Err(LookupVerifyError::UnrequestedBlobIndex(blob.index)); + } + if self.blobs.iter().any(|b| b.index == blob.index) { + return Err(LookupVerifyError::DuplicateData); + } + + self.blobs.push(blob); + if self.blobs.len() >= self.request.indices.len() { + // All expected chunks received, return result early + self.resolved = true; + Ok(Some(std::mem::take(&mut self.blobs))) + } else { + Ok(None) + } + } + + pub fn terminate(self) -> Option>>> { + if self.resolved { + None + } else { + Some(self.blobs) + } + } +} From 532206e008d47a702e4a699bcf0c26041cf72cb6 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 23 Apr 2024 02:06:46 +1000 Subject: [PATCH 07/13] Fix stuck backfill when scheduled work queue is at capacity (#5575) * Fix stuck backfill and add regression test. * Remove unnecessary `yield_now` * Merge branch 'unstable' into fix-stuck-backfill * Revert previous change and add extra comment. * Merge branch 'unstable' into fix-stuck-backfill * Update tests to use configured event schedule instead of hard coded values. * Merge branch 'unstable' of https://github.com/sigp/lighthouse into fix-stuck-backfill --- beacon_node/beacon_processor/Cargo.toml | 5 +- beacon_node/beacon_processor/src/lib.rs | 2 +- .../src/work_reprocessing_queue.rs | 163 ++++++++++++++---- common/slot_clock/src/manual_slot_clock.rs | 6 + 4 files changed, 142 insertions(+), 34 deletions(-) diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 723b09b581c..6c49a28ec87 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -23,4 +23,7 @@ lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } parking_lot = { workspace = true } num_cpus = { workspace = true } -serde = { workspace = true } \ No newline at end of file +serde = { workspace = true } + +[dev-dependencies] +tokio = { workspace = true, features = ["test-util"] } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 9b83e9cacbc..fee55b39adc 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -851,7 +851,7 @@ impl BeaconProcessor { ready_work_tx, work_reprocessing_rx, &self.executor, - slot_clock, + Arc::new(slot_clock), self.log.clone(), maximum_gossip_clock_disparity, )?; diff --git a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs index c9be28444c8..496fa683d2c 100644 --- a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs @@ -22,6 +22,7 @@ use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::future::Future; use std::pin::Pin; +use std::sync::Arc; use std::task::Context; use std::time::Duration; use strum::AsRefStr; @@ -243,7 +244,7 @@ struct ReprocessQueue { attestation_delay_debounce: TimeLatch, lc_update_delay_debounce: TimeLatch, next_backfill_batch_event: Option>>, - slot_clock: Pin>, + slot_clock: Arc, } pub type QueuedLightClientUpdateId = usize; @@ -362,7 +363,7 @@ pub fn spawn_reprocess_scheduler( ready_work_tx: Sender, work_reprocessing_rx: Receiver, executor: &TaskExecutor, - slot_clock: S, + slot_clock: Arc, log: Logger, maximum_gossip_clock_disparity: Duration, ) -> Result<(), String> { @@ -370,34 +371,12 @@ pub fn spawn_reprocess_scheduler( if ADDITIONAL_QUEUED_BLOCK_DELAY >= maximum_gossip_clock_disparity { return Err("The block delay and gossip disparity don't match.".to_string()); } - let mut queue = ReprocessQueue { - work_reprocessing_rx, - ready_work_tx, - gossip_block_delay_queue: DelayQueue::new(), - rpc_block_delay_queue: DelayQueue::new(), - attestations_delay_queue: DelayQueue::new(), - lc_updates_delay_queue: DelayQueue::new(), - queued_gossip_block_roots: HashSet::new(), - queued_lc_updates: FnvHashMap::default(), - queued_aggregates: FnvHashMap::default(), - queued_unaggregates: FnvHashMap::default(), - awaiting_attestations_per_root: HashMap::new(), - awaiting_lc_updates_per_parent_root: HashMap::new(), - queued_backfill_batches: Vec::new(), - next_attestation: 0, - next_lc_update: 0, - early_block_debounce: TimeLatch::default(), - rpc_block_debounce: TimeLatch::default(), - attestation_delay_debounce: TimeLatch::default(), - lc_update_delay_debounce: TimeLatch::default(), - next_backfill_batch_event: None, - slot_clock: Box::pin(slot_clock.clone()), - }; + let mut queue = ReprocessQueue::new(ready_work_tx, work_reprocessing_rx, slot_clock); executor.spawn( async move { while let Some(msg) = queue.next().await { - queue.handle_message(msg, &slot_clock, &log); + queue.handle_message(msg, &log); } debug!( @@ -412,7 +391,37 @@ pub fn spawn_reprocess_scheduler( } impl ReprocessQueue { - fn handle_message(&mut self, msg: InboundEvent, slot_clock: &S, log: &Logger) { + fn new( + ready_work_tx: Sender, + work_reprocessing_rx: Receiver, + slot_clock: Arc, + ) -> Self { + ReprocessQueue { + work_reprocessing_rx, + ready_work_tx, + gossip_block_delay_queue: DelayQueue::new(), + rpc_block_delay_queue: DelayQueue::new(), + attestations_delay_queue: DelayQueue::new(), + lc_updates_delay_queue: DelayQueue::new(), + queued_gossip_block_roots: HashSet::new(), + queued_lc_updates: FnvHashMap::default(), + queued_aggregates: FnvHashMap::default(), + queued_unaggregates: FnvHashMap::default(), + awaiting_attestations_per_root: HashMap::new(), + awaiting_lc_updates_per_parent_root: HashMap::new(), + queued_backfill_batches: Vec::new(), + next_attestation: 0, + next_lc_update: 0, + early_block_debounce: TimeLatch::default(), + rpc_block_debounce: TimeLatch::default(), + attestation_delay_debounce: TimeLatch::default(), + lc_update_delay_debounce: TimeLatch::default(), + next_backfill_batch_event: None, + slot_clock, + } + } + + fn handle_message(&mut self, msg: InboundEvent, log: &Logger) { use ReprocessQueueMessage::*; match msg { // Some block has been indicated as "early" and should be processed when the @@ -426,7 +435,7 @@ impl ReprocessQueue { return; } - if let Some(duration_till_slot) = slot_clock.duration_to_slot(block_slot) { + if let Some(duration_till_slot) = self.slot_clock.duration_to_slot(block_slot) { // Check to ensure this won't over-fill the queue. if self.queued_gossip_block_roots.len() >= MAXIMUM_QUEUED_BLOCKS { if self.early_block_debounce.elapsed() { @@ -459,7 +468,7 @@ impl ReprocessQueue { // This logic is slightly awkward since `SlotClock::duration_to_slot` // doesn't distinguish between a slot that has already arrived and an // error reading the slot clock. - if let Some(now) = slot_clock.now() { + if let Some(now) = self.slot_clock.now() { if block_slot <= now && self .ready_work_tx @@ -860,7 +869,8 @@ impl ReprocessQueue { } } InboundEvent::ReadyBackfillSync(queued_backfill_batch) => { - let millis_from_slot_start = slot_clock + let millis_from_slot_start = self + .slot_clock .millis_from_current_slot_start() .map_or("null".to_string(), |duration| { duration.as_millis().to_string() @@ -886,7 +896,12 @@ impl ReprocessQueue { "Failed to send scheduled backfill work"; "info" => "sending work back to queue" ); - self.queued_backfill_batches.insert(0, batch) + self.queued_backfill_batches.insert(0, batch); + + // only recompute if there is no `next_backfill_batch_event` already scheduled + if self.next_backfill_batch_event.is_none() { + self.recompute_next_backfill_batch_event(); + } } // The message was not sent and we didn't get the correct // return result. This is a logic error. @@ -963,7 +978,11 @@ impl ReprocessQueue { #[cfg(test)] mod tests { use super::*; - use slot_clock::TestingSlotClock; + use logging::test_logger; + use slot_clock::{ManualSlotClock, TestingSlotClock}; + use std::ops::Add; + use std::sync::Arc; + use task_executor::test_utils::TestRuntime; #[test] fn backfill_processing_schedule_calculation() { @@ -1002,4 +1021,84 @@ mod tests { duration_to_next_slot + event_times[0] ); } + + // Regression test for issue #5504. + // See: https://github.com/sigp/lighthouse/issues/5504#issuecomment-2050930045 + #[tokio::test] + async fn backfill_schedule_failed_should_reschedule() { + let runtime = TestRuntime::default(); + let log = test_logger(); + let (work_reprocessing_tx, work_reprocessing_rx) = mpsc::channel(1); + let (ready_work_tx, mut ready_work_rx) = mpsc::channel(1); + let slot_duration = 12; + let slot_clock = Arc::new(testing_slot_clock(slot_duration)); + + spawn_reprocess_scheduler( + ready_work_tx.clone(), + work_reprocessing_rx, + &runtime.task_executor, + slot_clock.clone(), + log, + Duration::from_millis(500), + ) + .unwrap(); + + // Pause time so it only advances manually + tokio::time::pause(); + + // Send some random work to `ready_work_tx` to fill up the capacity first. + ready_work_tx + .try_send(ReadyWork::IgnoredRpcBlock(IgnoredRpcBlock { + process_fn: Box::new(|| {}), + })) + .unwrap(); + + // Now queue a backfill sync batch. + work_reprocessing_tx + .try_send(ReprocessQueueMessage::BackfillSync(QueuedBackfillBatch( + Box::pin(async {}), + ))) + .unwrap(); + tokio::task::yield_now().await; + + // Advance the time by more than 1/2 the slot to trigger a scheduled backfill batch to be sent. + // This should fail as the `ready_work` channel is at capacity, and it should be rescheduled. + let duration_to_next_event = + ReprocessQueue::duration_until_next_backfill_batch_event(slot_clock.as_ref()); + let one_ms = Duration::from_millis(1); + advance_time(&slot_clock, duration_to_next_event.add(one_ms)).await; + + // Now drain the `ready_work` channel. + assert!(matches!( + ready_work_rx.try_recv(), + Ok(ReadyWork::IgnoredRpcBlock { .. }) + )); + assert!(ready_work_rx.try_recv().is_err()); + + // Advance time again, and assert that the re-scheduled batch is successfully sent. + let duration_to_next_event = + ReprocessQueue::duration_until_next_backfill_batch_event(slot_clock.as_ref()); + advance_time(&slot_clock, duration_to_next_event.add(one_ms)).await; + assert!(matches!( + ready_work_rx.try_recv(), + Ok(ReadyWork::BackfillSync { .. }) + )); + } + + /// Advances slot clock and test clock time by the same duration. + async fn advance_time(slot_clock: &ManualSlotClock, duration: Duration) { + slot_clock.advance_time(duration); + tokio::time::advance(duration).await; + // NOTE: The `tokio::time::advance` fn actually calls `yield_now()` after advancing the + // clock. Why do we need an extra `yield_now`? + tokio::task::yield_now().await; + } + + fn testing_slot_clock(slot_duration: u64) -> ManualSlotClock { + TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(slot_duration), + ) + } } diff --git a/common/slot_clock/src/manual_slot_clock.rs b/common/slot_clock/src/manual_slot_clock.rs index 7b42fa9062d..1d71533de15 100644 --- a/common/slot_clock/src/manual_slot_clock.rs +++ b/common/slot_clock/src/manual_slot_clock.rs @@ -1,5 +1,6 @@ use super::SlotClock; use parking_lot::RwLock; +use std::ops::Add; use std::sync::Arc; use std::time::Duration; use types::Slot; @@ -41,6 +42,11 @@ impl ManualSlotClock { *self.current_time.write() = duration; } + pub fn advance_time(&self, duration: Duration) { + let current_time = *self.current_time.read(); + *self.current_time.write() = current_time.add(duration); + } + pub fn advance_slot(&self) { self.set_slot(self.now().unwrap().as_u64() + 1) } From ad7f0e0cdb471749d21ce978383e18c8db6ad4bd Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Tue, 23 Apr 2024 00:07:43 +0800 Subject: [PATCH 08/13] Delete repetitive execute command in local testnet scripts (#5611) * Delete repetitive execute * Merge branch 'unstable' of https://github.com/sigp/lighthouse into local-testnet --- scripts/local_testnet/start_local_testnet.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 77422095130..be91d069985 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -112,9 +112,6 @@ sleeping 3 execute_command_add_PID el_bootnode.log ./el_bootnode.sh sleeping 3 -execute_command_add_PID el_bootnode.log ./el_bootnode.sh -sleeping 1 - # Start beacon nodes BN_udp_tcp_base=9000 BN_http_port_base=8000 From 82b131d37fef84b93c4a3477ee8093a0b39f81e7 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Mon, 22 Apr 2024 19:21:21 -0500 Subject: [PATCH 09/13] Electra: Add New Containers (#5607) * Electra: Add New Containers --- consensus/types/src/consolidation.rs | 35 ++++++++++++++++++ consensus/types/src/deposit_receipt.rs | 37 +++++++++++++++++++ .../src/execution_layer_withdrawal_request.rs | 34 +++++++++++++++++ consensus/types/src/lib.rs | 14 +++++++ .../types/src/pending_balance_deposit.rs | 33 +++++++++++++++++ consensus/types/src/pending_consolidation.rs | 33 +++++++++++++++++ .../types/src/pending_partial_withdrawal.rs | 35 ++++++++++++++++++ consensus/types/src/signed_consolidation.rs | 32 ++++++++++++++++ 8 files changed, 253 insertions(+) create mode 100644 consensus/types/src/consolidation.rs create mode 100644 consensus/types/src/deposit_receipt.rs create mode 100644 consensus/types/src/execution_layer_withdrawal_request.rs create mode 100644 consensus/types/src/pending_balance_deposit.rs create mode 100644 consensus/types/src/pending_consolidation.rs create mode 100644 consensus/types/src/pending_partial_withdrawal.rs create mode 100644 consensus/types/src/signed_consolidation.rs diff --git a/consensus/types/src/consolidation.rs b/consensus/types/src/consolidation.rs new file mode 100644 index 00000000000..09a2d4bb0c3 --- /dev/null +++ b/consensus/types/src/consolidation.rs @@ -0,0 +1,35 @@ +use crate::test_utils::TestRandom; +use crate::Epoch; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct Consolidation { + #[serde(with = "serde_utils::quoted_u64")] + pub source_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub target_index: u64, + pub epoch: Epoch, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(Consolidation); +} diff --git a/consensus/types/src/deposit_receipt.rs b/consensus/types/src/deposit_receipt.rs new file mode 100644 index 00000000000..6a08f717f3d --- /dev/null +++ b/consensus/types/src/deposit_receipt.rs @@ -0,0 +1,37 @@ +use crate::test_utils::TestRandom; +use crate::{Hash256, PublicKeyBytes, Signature}; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct DepositReceipt { + pub pubkey: PublicKeyBytes, + pub withdrawal_credentials: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub amount: u64, + pub signature: Signature, + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(DepositReceipt); +} diff --git a/consensus/types/src/execution_layer_withdrawal_request.rs b/consensus/types/src/execution_layer_withdrawal_request.rs new file mode 100644 index 00000000000..b1d814c2834 --- /dev/null +++ b/consensus/types/src/execution_layer_withdrawal_request.rs @@ -0,0 +1,34 @@ +use crate::test_utils::TestRandom; +use crate::{Address, PublicKeyBytes}; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct ExecutionLayerWithdrawalRequest { + pub source_address: Address, + pub validator_pubkey: PublicKeyBytes, + #[serde(with = "serde_utils::quoted_u64")] + pub amount: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(ExecutionLayerWithdrawalRequest); +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 6551ebc1dda..dee55789398 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -29,16 +29,19 @@ pub mod bls_to_execution_change; pub mod builder_bid; pub mod chain_spec; pub mod checkpoint; +pub mod consolidation; pub mod consts; pub mod contribution_and_proof; pub mod deposit; pub mod deposit_data; pub mod deposit_message; +pub mod deposit_receipt; pub mod deposit_tree_snapshot; pub mod enr_fork_id; pub mod eth1_data; pub mod eth_spec; pub mod execution_block_hash; +pub mod execution_layer_withdrawal_request; pub mod execution_payload; pub mod execution_payload_header; pub mod fork; @@ -54,6 +57,9 @@ pub mod light_client_finality_update; pub mod light_client_optimistic_update; pub mod light_client_update; pub mod pending_attestation; +pub mod pending_balance_deposit; +pub mod pending_consolidation; +pub mod pending_partial_withdrawal; pub mod proposer_preparation_data; pub mod proposer_slashing; pub mod relative_epoch; @@ -63,6 +69,7 @@ pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; pub mod signed_bls_to_execution_change; +pub mod signed_consolidation; pub mod signed_contribution_and_proof; pub mod signed_voluntary_exit; pub mod signing_data; @@ -133,10 +140,12 @@ pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ ConfigAndPreset, ConfigAndPresetCapella, ConfigAndPresetDeneb, ConfigAndPresetElectra, }; +pub use crate::consolidation::Consolidation; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::deposit_message::DepositMessage; +pub use crate::deposit_receipt::DepositReceipt; pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; pub use crate::enr_fork_id::EnrForkId; pub use crate::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; @@ -144,6 +153,7 @@ pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::ExecutionBlockHeader; +pub use crate::execution_layer_withdrawal_request::ExecutionLayerWithdrawalRequest; pub use crate::execution_payload::{ ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadMerge, ExecutionPayloadRef, Transaction, Transactions, Withdrawals, @@ -189,6 +199,9 @@ pub use crate::payload::{ FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; +pub use crate::pending_balance_deposit::PendingBalanceDeposit; +pub use crate::pending_consolidation::PendingConsolidation; +pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; pub use crate::preset::{ AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, }; @@ -207,6 +220,7 @@ pub use crate::signed_beacon_block::{ }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; +pub use crate::signed_consolidation::SignedConsolidation; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_data::{SignedRoot, SigningData}; diff --git a/consensus/types/src/pending_balance_deposit.rs b/consensus/types/src/pending_balance_deposit.rs new file mode 100644 index 00000000000..a2bce577f87 --- /dev/null +++ b/consensus/types/src/pending_balance_deposit.rs @@ -0,0 +1,33 @@ +use crate::test_utils::TestRandom; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct PendingBalanceDeposit { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub amount: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(PendingBalanceDeposit); +} diff --git a/consensus/types/src/pending_consolidation.rs b/consensus/types/src/pending_consolidation.rs new file mode 100644 index 00000000000..6e0b74a7383 --- /dev/null +++ b/consensus/types/src/pending_consolidation.rs @@ -0,0 +1,33 @@ +use crate::test_utils::TestRandom; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct PendingConsolidation { + #[serde(with = "serde_utils::quoted_u64")] + pub source_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub target_index: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(PendingConsolidation); +} diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/pending_partial_withdrawal.rs new file mode 100644 index 00000000000..e5ace7b2736 --- /dev/null +++ b/consensus/types/src/pending_partial_withdrawal.rs @@ -0,0 +1,35 @@ +use crate::test_utils::TestRandom; +use crate::Epoch; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct PendingPartialWithdrawal { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub amount: u64, + pub withdrawable_epoch: Epoch, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(PendingPartialWithdrawal); +} diff --git a/consensus/types/src/signed_consolidation.rs b/consensus/types/src/signed_consolidation.rs new file mode 100644 index 00000000000..f004ec23bd4 --- /dev/null +++ b/consensus/types/src/signed_consolidation.rs @@ -0,0 +1,32 @@ +use crate::test_utils::TestRandom; +use crate::{Consolidation, Signature}; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct SignedConsolidation { + pub message: Consolidation, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(SignedConsolidation); +} From 72a33604b3f1bd5ba9e23812ae53b1df4f29e405 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 23 Apr 2024 23:13:34 +1000 Subject: [PATCH 10/13] Add timing for block availability (#5510) * Add timing for block availability * Attestation metrics analysis * Prettier printing * Add some metrics and timings to track late blocks * Update to latest unstable * fmt * Merge latest unstable * Small tweaks * Try pushing blob timing down into verification * Simplify for clippy --- beacon_node/beacon_chain/src/beacon_chain.rs | 65 +++++----- .../beacon_chain/src/blob_verification.rs | 42 +++++-- .../beacon_chain/src/block_times_cache.rs | 89 +++++++++++++- .../beacon_chain/src/block_verification.rs | 3 +- .../beacon_chain/src/canonical_head.rs | 115 +++++++++++++++--- .../src/data_availability_checker.rs | 23 +++- .../src/data_availability_checker/error.rs | 4 +- .../overflow_lru_cache.rs | 7 ++ beacon_node/beacon_chain/src/metrics.rs | 72 ++++++----- beacon_node/execution_layer/src/lib.rs | 24 ++-- beacon_node/network/src/metrics.rs | 63 ++++------ .../gossip_methods.rs | 40 +++--- 12 files changed, 391 insertions(+), 156 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b3790024f81..c59c5e8ed10 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2953,7 +2953,7 @@ impl BeaconChain { } /// Wraps `process_block` in logic to cache the block's commitments in the processing cache - /// and evict if the block was imported or erred. + /// and evict if the block was imported or errored. pub async fn process_block_with_early_caching>( self: &Arc, block_root: Hash256, @@ -2998,22 +2998,20 @@ impl BeaconChain { // Increment the Prometheus counter for block processing requests. metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); + let block_slot = unverified_block.block().slot(); + // Set observed time if not already set. Usually this should be set by gossip or RPC, // but just in case we set it again here (useful for tests). - if let (Some(seen_timestamp), Some(current_slot)) = - (self.slot_clock.now_duration(), self.slot_clock.now()) - { + if let Some(seen_timestamp) = self.slot_clock.now_duration() { self.block_times_cache.write().set_time_observed( block_root, - current_slot, + block_slot, seen_timestamp, None, None, ); } - let block_slot = unverified_block.block().slot(); - // A small closure to group the verification and import errors. let chain = self.clone(); let import_block = async move { @@ -3024,6 +3022,15 @@ impl BeaconChain { )?; publish_fn()?; let executed_block = chain.into_executed_block(execution_pending).await?; + // Record the time it took to ask the execution layer. + if let Some(seen_timestamp) = self.slot_clock.now_duration() { + self.block_times_cache.write().set_execution_time( + block_root, + block_slot, + seen_timestamp, + ) + } + match executed_block { ExecutedBlock::Available(block) => { self.import_available_block(Box::new(block)).await @@ -3090,8 +3097,8 @@ impl BeaconChain { } } - /// Accepts a fully-verified block and awaits on it's payload verification handle to - /// get a fully `ExecutedBlock` + /// Accepts a fully-verified block and awaits on its payload verification handle to + /// get a fully `ExecutedBlock`. /// /// An error is returned if the verification handle couldn't be awaited. pub async fn into_executed_block( @@ -3224,10 +3231,6 @@ impl BeaconChain { ) -> Result> { match availability { Availability::Available(block) => { - // This is the time since start of the slot where all the components of the block have become available - let delay = - get_slot_delay_ms(timestamp_now(), block.block.slot(), &self.slot_clock); - metrics::observe_duration(&metrics::BLOCK_AVAILABILITY_DELAY, delay); // Block is fully available, import into fork choice self.import_available_block(block).await } @@ -3256,6 +3259,15 @@ impl BeaconChain { consensus_context, } = import_data; + // Record the time at which this block's blobs became available. + if let Some(blobs_available) = block.blobs_available_timestamp() { + self.block_times_cache.write().set_time_blob_observed( + block_root, + block.slot(), + blobs_available, + ); + } + // import let chain = self.clone(); let block_root = self @@ -3396,6 +3408,14 @@ impl BeaconChain { "Early attester cache insert failed"; "error" => ?e ); + } else { + let attestable_timestamp = + self.slot_clock.now_duration().unwrap_or_default(); + self.block_times_cache.write().set_time_attestable( + block_root, + signed_block.slot(), + attestable_timestamp, + ) } } else { warn!( @@ -3885,25 +3905,6 @@ impl BeaconChain { ); } - // Do not store metrics if the block was > 4 slots old, this helps prevent noise during - // sync. - if block_delay_total < self.slot_clock.slot_duration() * 4 { - // Observe the delay between when we observed the block and when we imported it. - let block_delays = self.block_times_cache.read().get_block_delays( - block_root, - self.slot_clock - .start_of(current_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - metrics::observe_duration( - &metrics::BEACON_BLOCK_IMPORTED_OBSERVED_DELAY_TIME, - block_delays - .imported - .unwrap_or_else(|| Duration::from_secs(0)), - ); - } - if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_block_subscribers() { event_handler.register(EventKind::Block(SseBlock { diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 1fb61702006..a1ae260d930 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -14,6 +14,7 @@ use merkle_proof::MerkleTreeError; use slog::{debug, warn}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; +use std::time::Duration; use tree_hash::TreeHash; use types::blob_sidecar::BlobIdentifier; use types::{ @@ -214,7 +215,10 @@ impl GossipVerifiedBlob { pub fn __assumed_valid(blob: Arc>) -> Self { Self { block_root: blob.block_root(), - blob: KzgVerifiedBlob { blob }, + blob: KzgVerifiedBlob { + blob, + seen_timestamp: Duration::from_secs(0), + }, } } pub fn id(&self) -> BlobIdentifier { @@ -260,6 +264,8 @@ impl GossipVerifiedBlob { #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedBlob { blob: Arc>, + #[ssz(skip_serializing, skip_deserializing)] + seen_timestamp: Duration, } impl PartialOrd for KzgVerifiedBlob { @@ -275,8 +281,12 @@ impl Ord for KzgVerifiedBlob { } impl KzgVerifiedBlob { - pub fn new(blob: Arc>, kzg: &Kzg) -> Result { - verify_kzg_for_blob(blob, kzg) + pub fn new( + blob: Arc>, + kzg: &Kzg, + seen_timestamp: Duration, + ) -> Result { + verify_kzg_for_blob(blob, kzg, seen_timestamp) } pub fn to_blob(self) -> Arc> { self.blob @@ -294,12 +304,18 @@ impl KzgVerifiedBlob { pub fn blob_index(&self) -> u64 { self.blob.index } + pub fn seen_timestamp(&self) -> Duration { + self.seen_timestamp + } /// Construct a `KzgVerifiedBlob` that is assumed to be valid. /// /// This should ONLY be used for testing. #[cfg(test)] pub fn __assumed_valid(blob: Arc>) -> Self { - Self { blob } + Self { + blob, + seen_timestamp: Duration::from_secs(0), + } } } @@ -309,9 +325,13 @@ impl KzgVerifiedBlob { pub fn verify_kzg_for_blob( blob: Arc>, kzg: &Kzg, + seen_timestamp: Duration, ) -> Result, KzgError> { validate_blob::(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof)?; - Ok(KzgVerifiedBlob { blob }) + Ok(KzgVerifiedBlob { + blob, + seen_timestamp, + }) } pub struct KzgVerifiedBlobList { @@ -322,13 +342,17 @@ impl KzgVerifiedBlobList { pub fn new>>>( blob_list: I, kzg: &Kzg, + seen_timestamp: Duration, ) -> Result { let blobs = blob_list.into_iter().collect::>(); verify_kzg_for_blob_list(blobs.iter(), kzg)?; Ok(Self { verified_blobs: blobs .into_iter() - .map(|blob| KzgVerifiedBlob { blob }) + .map(|blob| KzgVerifiedBlob { + blob, + seen_timestamp, + }) .collect(), }) } @@ -374,6 +398,8 @@ pub fn validate_blob_sidecar_for_gossip( let blob_epoch = blob_slot.epoch(T::EthSpec::slots_per_epoch()); let signed_block_header = &blob_sidecar.signed_block_header; + let seen_timestamp = chain.slot_clock.now_duration().unwrap_or_default(); + // This condition is not possible if we have received the blob from the network // since we only subscribe to `MaxBlobsPerBlock` subnets over gossip network. // We include this check only for completeness. @@ -641,8 +667,8 @@ pub fn validate_blob_sidecar_for_gossip( .kzg .as_ref() .ok_or(GossipBlobError::KzgNotInitialized)?; - let kzg_verified_blob = - KzgVerifiedBlob::new(blob_sidecar, kzg).map_err(GossipBlobError::KzgError)?; + let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar, kzg, seen_timestamp) + .map_err(GossipBlobError::KzgError)?; Ok(GossipVerifiedBlob { block_root, diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index c5293bcb0ee..db547a1186c 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -18,6 +18,9 @@ type BlockRoot = Hash256; #[derive(Clone, Default)] pub struct Timestamps { pub observed: Option, + pub all_blobs_observed: Option, + pub execution_time: Option, + pub attestable: Option, pub imported: Option, pub set_as_head: Option, } @@ -25,8 +28,25 @@ pub struct Timestamps { // Helps arrange delay data so it is more relevant to metrics. #[derive(Debug, Default)] pub struct BlockDelays { + /// Time after start of slot we saw the block. pub observed: Option, + /// The time after the start of the slot we saw all blobs. + pub all_blobs_observed: Option, + /// The time it took to get verification from the EL for the block. + pub execution_time: Option, + /// The delay from the start of the slot before the block became available + /// + /// Equal to max(`observed + execution_time`, `all_blobs_observed`). + pub available: Option, + /// Time after `available`. + pub attestable: Option, + /// Time + /// ALSO time after `available`. + /// + /// We need to use `available` again rather than `attestable` to handle the case where the block + /// does not get added to the early-attester cache. pub imported: Option, + /// Time after `imported`. pub set_as_head: Option, } @@ -35,14 +55,34 @@ impl BlockDelays { let observed = times .observed .and_then(|observed_time| observed_time.checked_sub(slot_start_time)); + let all_blobs_observed = times + .all_blobs_observed + .and_then(|all_blobs_observed| all_blobs_observed.checked_sub(slot_start_time)); + let execution_time = times + .execution_time + .and_then(|execution_time| execution_time.checked_sub(times.observed?)); + // Duration since UNIX epoch at which block became available. + let available_time = times.execution_time.map(|execution_time| { + std::cmp::max(execution_time, times.all_blobs_observed.unwrap_or_default()) + }); + // Duration from the start of the slot until the block became available. + let available_delay = + available_time.and_then(|available_time| available_time.checked_sub(slot_start_time)); + let attestable = times + .attestable + .and_then(|attestable_time| attestable_time.checked_sub(slot_start_time)); let imported = times .imported - .and_then(|imported_time| imported_time.checked_sub(times.observed?)); + .and_then(|imported_time| imported_time.checked_sub(available_time?)); let set_as_head = times .set_as_head .and_then(|set_as_head_time| set_as_head_time.checked_sub(times.imported?)); BlockDelays { observed, + all_blobs_observed, + execution_time, + available: available_delay, + attestable, imported, set_as_head, } @@ -109,6 +149,53 @@ impl BlockTimesCache { } } + pub fn set_time_blob_observed( + &mut self, + block_root: BlockRoot, + slot: Slot, + timestamp: Duration, + ) { + let block_times = self + .cache + .entry(block_root) + .or_insert_with(|| BlockTimesCacheValue::new(slot)); + if block_times + .timestamps + .all_blobs_observed + .map_or(true, |prev| timestamp > prev) + { + block_times.timestamps.all_blobs_observed = Some(timestamp); + } + } + + pub fn set_execution_time(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { + let block_times = self + .cache + .entry(block_root) + .or_insert_with(|| BlockTimesCacheValue::new(slot)); + if block_times + .timestamps + .execution_time + .map_or(true, |prev| timestamp < prev) + { + block_times.timestamps.execution_time = Some(timestamp); + } + } + + pub fn set_time_attestable(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { + let block_times = self + .cache + .entry(block_root) + .or_insert_with(|| BlockTimesCacheValue::new(slot)); + if block_times + .timestamps + .attestable + .map_or(true, |prev| timestamp < prev) + { + block_times.timestamps.attestable = Some(timestamp); + } + } + pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { let block_times = self .cache diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 3cd8a7f259b..38648949f9c 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -666,8 +666,7 @@ type PayloadVerificationHandle = /// - Parent is known /// - Signatures /// - State root check -/// - Per block processing -/// - Blobs sidecar has been validated if present +/// - Block processing /// /// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid /// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index ced4eda05cf..734575d2c0d 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1405,13 +1405,6 @@ fn observe_head_block_delays( // Do not store metrics if the block was > 4 slots old, this helps prevent noise during // sync. if !block_from_sync { - // Observe the total block delay. This is the delay between the time the slot started - // and when the block was set as head. - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME, - block_delay_total, - ); - // Observe the delay between when we imported the block and when we set the block as // head. let block_delays = block_times_cache.get_block_delays( @@ -1421,34 +1414,120 @@ fn observe_head_block_delays( .unwrap_or_else(|| Duration::from_secs(0)), ); - metrics::observe_duration( - &metrics::BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME, + // Update all the metrics + + // Convention here is to use "Time" to indicate the duration of the event and "Delay" + // to indicate the time since the start of the slot. + // + // Observe the total block delay. This is the delay between the time the slot started + // and when the block was set as head. + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_TOTAL, + block_delay_total.as_millis() as i64, + ); + + // The time at which the beacon block was first observed to be processed + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_OBSERVED_SLOT_START, block_delays .observed - .unwrap_or_else(|| Duration::from_secs(0)), + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + + // The time from the start of the slot when all blobs have been observed. Technically this + // is the time we last saw a blob related to this block/slot. + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_ALL_OBSERVED_SLOT_START, + block_delays + .all_blobs_observed + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, ); - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME, + // The time it took to check the validity with the EL + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_EXECUTION_TIME, + block_delays + .execution_time + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + + // The time the block became available after the start of the slot. Available here means + // that all the blobs have arrived and the block has been verified by the execution layer. + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_AVAILABLE_SLOT_START, + block_delays + .available + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + + // The time the block became attestable after the start of the slot. + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_ATTESTABLE_SLOT_START, + block_delays + .attestable + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + + // The time the block was imported since becoming available. + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_IMPORTED_TIME, + block_delays + .imported + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + + // The time the block was imported and setting it as head + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_HEAD_IMPORTED_TIME, block_delays .set_as_head - .unwrap_or_else(|| Duration::from_secs(0)), + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, ); // If the block was enshrined as head too late for attestations to be created for it, // log a debug warning and increment a metric. + let format_delay = |delay: &Option| { + delay.map_or("unknown".to_string(), |d| format!("{}", d.as_millis())) + }; if late_head { - metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL); + metrics::inc_counter(&metrics::BEACON_BLOCK_DELAY_HEAD_SLOT_START_EXCEEDED_TOTAL); debug!( log, "Delayed head block"; "block_root" => ?head_block_root, "proposer_index" => head_block_proposer_index, "slot" => head_block_slot, - "block_delay" => ?block_delay_total, - "observed_delay" => ?block_delays.observed, - "imported_delay" => ?block_delays.imported, - "set_as_head_delay" => ?block_delays.set_as_head, + "total_delay_ms" => block_delay_total.as_millis(), + "observed_delay_ms" => format_delay(&block_delays.observed), + "blob_delay_ms" => format_delay(&block_delays.all_blobs_observed), + "execution_time_ms" => format_delay(&block_delays.execution_time), + "available_delay_ms" => format_delay(&block_delays.available), + "attestable_delay_ms" => format_delay(&block_delays.attestable), + "imported_time_ms" => format_delay(&block_delays.imported), + "set_as_head_time_ms" => format_delay(&block_delays.set_as_head), + ); + } else { + debug!( + log, + "On-time head block"; + "block_root" => ?head_block_root, + "proposer_index" => head_block_proposer_index, + "slot" => head_block_slot, + "total_delay_ms" => block_delay_total.as_millis(), + "observed_delay_ms" => format_delay(&block_delays.observed), + "blob_delay_ms" => format_delay(&block_delays.all_blobs_observed), + "execution_time_ms" => format_delay(&block_delays.execution_time), + "available_delay_ms" => format_delay(&block_delays.available), + "attestable_delay_ms" => format_delay(&block_delays.attestable), + "imported_time_ms" => format_delay(&block_delays.imported), + "set_as_head_time_ms" => format_delay(&block_delays.set_as_head), ); } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 3ef105c6d34..dd0d97b1dae 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -14,6 +14,7 @@ use std::fmt; use std::fmt::Debug; use std::num::NonZeroUsize; use std::sync::Arc; +use std::time::Duration; use task_executor::TaskExecutor; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; @@ -176,8 +177,14 @@ impl DataAvailabilityChecker { return Err(AvailabilityCheckError::KzgNotInitialized); }; - let verified_blobs = KzgVerifiedBlobList::new(Vec::from(blobs).into_iter().flatten(), kzg) - .map_err(AvailabilityCheckError::Kzg)?; + let seen_timestamp = self + .slot_clock + .now_duration() + .ok_or(AvailabilityCheckError::SlotClockError)?; + + let verified_blobs = + KzgVerifiedBlobList::new(Vec::from(blobs).into_iter().flatten(), kzg, seen_timestamp) + .map_err(AvailabilityCheckError::Kzg)?; self.availability_cache .put_kzg_verified_blobs(block_root, verified_blobs) @@ -225,6 +232,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: None, + blobs_available_timestamp: None, })) } } @@ -244,6 +252,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: verified_blobs, + blobs_available_timestamp: None, })) } } @@ -289,6 +298,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: None, + blobs_available_timestamp: None, })) } } @@ -303,6 +313,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: verified_blobs, + blobs_available_timestamp: None, })) } } @@ -462,6 +473,8 @@ pub struct AvailableBlock { block_root: Hash256, block: Arc>, blobs: Option>, + /// Timestamp at which this block first became available (UNIX timestamp, time since 1970). + blobs_available_timestamp: Option, } impl AvailableBlock { @@ -474,6 +487,7 @@ impl AvailableBlock { block_root, block, blobs, + blobs_available_timestamp: None, } } @@ -488,6 +502,10 @@ impl AvailableBlock { self.blobs.as_ref() } + pub fn blobs_available_timestamp(&self) -> Option { + self.blobs_available_timestamp + } + pub fn deconstruct( self, ) -> ( @@ -499,6 +517,7 @@ impl AvailableBlock { block_root, block, blobs, + blobs_available_timestamp: _, } = self; (block_root, block, blobs) } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index 0804fe3b9ab..6c524786bfa 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -19,6 +19,7 @@ pub enum Error { ParentStateMissing(Hash256), BlockReplayError(state_processing::BlockReplayError), RebuildingStateCaches(BeaconStateError), + SlotClockError, } pub enum ErrorCategory { @@ -39,7 +40,8 @@ impl Error { | Error::Unexpected | Error::ParentStateMissing(_) | Error::BlockReplayError(_) - | Error::RebuildingStateCaches(_) => ErrorCategory::Internal, + | Error::RebuildingStateCaches(_) + | Error::SlotClockError => ErrorCategory::Internal, Error::Kzg(_) | Error::BlobIndexInvalid(_) | Error::KzgCommitmentMismatch { .. } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index edd981e6ddb..f4c1bc308c0 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -204,6 +204,12 @@ impl PendingComponents { executed_block, } = self; + let blobs_available_timestamp = verified_blobs + .iter() + .flatten() + .map(|blob| blob.seen_timestamp()) + .max(); + let Some(diet_executed_block) = executed_block else { return Err(AvailabilityCheckError::Unexpected); }; @@ -231,6 +237,7 @@ impl PendingComponents { block_root, block, blobs: Some(verified_blobs), + blobs_available_timestamp, }; Ok(Availability::Available(Box::new( AvailableExecutedBlock::new(available_block, import_data, payload_verification_outcome), diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 6cb0b6fd766..df718413cc0 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -847,37 +847,55 @@ lazy_static! { "Number of attester slashings seen", &["src", "validator"] ); +} + +// Prevent recursion limit +lazy_static! { /* * Block Delay Metrics */ - pub static ref BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_observed_slot_start_delay_time", - "Duration between the start of the block's slot and the time the block was observed.", - // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - decimal_buckets(-1,2) - ); - pub static ref BEACON_BLOCK_IMPORTED_OBSERVED_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_imported_observed_delay_time", - "Duration between the time the block was observed and the time when it was imported.", - // [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5] - decimal_buckets(-2,0) - ); - pub static ref BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_head_imported_delay_time", - "Duration between the time the block was imported and the time when it was set as head.", - // [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5] - decimal_buckets(-2,-1) - ); - pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_head_slot_start_delay_time", + pub static ref BEACON_BLOCK_DELAY_TOTAL: Result = try_create_int_gauge( + "beacon_block_delay_total", "Duration between the start of the block's slot and the time when it was set as head.", - // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - decimal_buckets(-1,2) ); - pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL: Result = try_create_int_counter( - "beacon_block_head_slot_start_delay_exceeded_total", - "Triggered when the duration between the start of the block's slot and the current time \ + + pub static ref BEACON_BLOCK_DELAY_OBSERVED_SLOT_START: Result = try_create_int_gauge( + "beacon_block_delay_observed_slot_start", + "Duration between the start of the block's slot and the time the block was observed.", + ); + + pub static ref BEACON_BLOB_DELAY_ALL_OBSERVED_SLOT_START: Result = try_create_int_gauge( + "beacon_blob_delay_all_observed_slot_start", + "Duration between the start of the block's slot and the time the block was observed.", + ); + + pub static ref BEACON_BLOCK_DELAY_EXECUTION_TIME: Result = try_create_int_gauge( + "beacon_block_delay_execution_time", + "The duration in verifying the block with the execution layer.", + ); + + pub static ref BEACON_BLOCK_DELAY_AVAILABLE_SLOT_START: Result = try_create_int_gauge( + "beacon_block_delay_available_slot_start", + "Duration between the time that block became available and the start of the slot.", + ); + pub static ref BEACON_BLOCK_DELAY_ATTESTABLE_SLOT_START: Result = try_create_int_gauge( + "beacon_block_delay_attestable_slot_start", + "Duration between the time that block became attestable and the start of the slot.", + ); + + pub static ref BEACON_BLOCK_DELAY_IMPORTED_TIME: Result = try_create_int_gauge( + "beacon_block_delay_imported_time", + "Duration between the time the block became available and the time when it was imported.", + ); + + pub static ref BEACON_BLOCK_DELAY_HEAD_IMPORTED_TIME: Result = try_create_int_gauge( + "beacon_block_delay_head_imported_time", + "Duration between the time that block was imported and the time when it was set as head.", + ); + pub static ref BEACON_BLOCK_DELAY_HEAD_SLOT_START_EXCEEDED_TOTAL: Result = try_create_int_counter( + "beacon_block_delay_head_slot_start_exceeded_total", + "A counter that is triggered when the duration between the start of the block's slot and the current time \ will result in failed attestations.", ); @@ -1130,11 +1148,9 @@ lazy_static! { /* * Availability related metrics */ - pub static ref BLOCK_AVAILABILITY_DELAY: Result = try_create_histogram_with_buckets( + pub static ref BLOCK_AVAILABILITY_DELAY: Result = try_create_int_gauge( "block_availability_delay", "Duration between start of the slot and the time at which all components of the block are available.", - // Create a custom bucket list for greater granularity in block delay - Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) ); /* diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 22410976c9d..3e7bf7f561d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -24,7 +24,7 @@ use payload_status::process_payload_status; pub use payload_status::PayloadStatus; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, info, trace, warn, Logger}; +use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; use std::fmt; @@ -1331,15 +1331,11 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metrics::NEW_PAYLOAD], ); + let timer = std::time::Instant::now(); + let block_number = new_payload_request.block_number(); let block_hash = new_payload_request.block_hash(); - trace!( - self.log(), - "Issuing engine_newPayload"; - "parent_hash" => ?new_payload_request.parent_hash(), - "block_hash" => ?block_hash, - "block_number" => ?new_payload_request.block_number(), - ); + let parent_hash = new_payload_request.parent_hash(); let result = self .engine() @@ -1347,9 +1343,19 @@ impl ExecutionLayer { .await; if let Ok(status) = &result { + let status_str = <&'static str>::from(status.status); metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_PAYLOAD_STATUS, - &["new_payload", status.status.into()], + &["new_payload", status_str], + ); + debug!( + self.log(), + "Processed engine_newPayload"; + "status" => status_str, + "parent_hash" => ?parent_hash, + "block_hash" => ?block_hash, + "block_number" => block_number, + "response_time_ms" => timer.elapsed().as_millis() ); } *self.inner.last_new_payload_errored.write().await = result.is_err(); diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index d19a41a28fc..d3804fbed8d 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -248,50 +248,41 @@ lazy_static! { /* * Block Delay Metrics */ - pub static ref BEACON_BLOCK_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_gossip_propagation_verification_delay_time", - "Duration between when the block is received and when it is verified for propagation.", - // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] - decimal_buckets(-3,-1) - ); - pub static ref BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_block_gossip_slot_start_delay_time", - "Duration between when the block is received and the start of the slot it belongs to.", - // Create a custom bucket list for greater granularity in block delay - Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) - // NOTE: Previous values, which we may want to switch back to. - // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - //decimal_buckets(-1,2) - + pub static ref BEACON_BLOCK_DELAY_GOSSIP: Result = try_create_int_gauge( + "beacon_block_delay_gossip", + "The first time we see this block from gossip as a delay from the start of the slot" + ); + pub static ref BEACON_BLOCK_DELAY_GOSSIP_VERIFICATION: Result = try_create_int_gauge( + "beacon_block_delay_gossip_verification", + "Keeps track of the time delay from the start of the slot to the point we propagate the block" ); - pub static ref BEACON_BLOCK_LAST_DELAY: Result = try_create_int_gauge( - "beacon_block_last_delay", - "Keeps track of the last block's delay from the start of the slot" + pub static ref BEACON_BLOCK_DELAY_FULL_VERIFICATION: Result = try_create_int_gauge( + "beacon_block_delay_full_verification", + "The time it takes to verify a beacon block." ); - pub static ref BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( - "beacon_block_gossip_arrived_late_total", + pub static ref BEACON_BLOCK_DELAY_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( + "beacon_block_delay_gossip_arrived_late_total", "Count of times when a gossip block arrived from the network later than the attestation deadline.", ); /* * Blob Delay Metrics */ - pub static ref BEACON_BLOB_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_blob_gossip_propagation_verification_delay_time", - "Duration between when the blob is received over gossip and when it is verified for propagation.", - // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] - decimal_buckets(-3,-1) - ); - pub static ref BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( - "beacon_blob_gossip_slot_start_delay_time", - "Duration between when the blob is received over gossip and the start of the slot it belongs to.", - // Create a custom bucket list for greater granularity in block delay - Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) - // NOTE: Previous values, which we may want to switch back to. - // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - //decimal_buckets(-1,2) + pub static ref BEACON_BLOB_DELAY_GOSSIP: Result = try_create_int_gauge( + "beacon_blob_delay_gossip_last_delay", + "The first time we see this blob as a delay from the start of the slot" + ); + + pub static ref BEACON_BLOB_DELAY_GOSSIP_VERIFICATION: Result = try_create_int_gauge( + "beacon_blob_delay_gossip_verification", + "Keeps track of the time delay from the start of the slot to the point we propagate the blob" + ); + pub static ref BEACON_BLOB_DELAY_FULL_VERIFICATION: Result = try_create_int_gauge( + "beacon_blob_last_full_verification_delay", + "The time it takes to verify a beacon blob" ); + pub static ref BEACON_BLOB_RPC_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_blob_rpc_slot_start_delay_time", "Duration between when a blob is received over rpc and the start of the slot it belongs to.", @@ -302,10 +293,6 @@ lazy_static! { //decimal_buckets(-1,2) ); - pub static ref BEACON_BLOB_LAST_DELAY: Result = try_create_int_gauge( - "beacon_blob_last_delay", - "Keeps track of the last blob's delay from the start of the slot" - ); pub static ref BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( "beacon_blob_gossip_arrived_late_total", diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index f7bba900372..7b8826bd853 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -27,7 +27,7 @@ use std::fs; use std::io::Write; use std::path::PathBuf; use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ @@ -615,8 +615,7 @@ impl NetworkBeaconProcessor { let commitment = blob_sidecar.kzg_commitment; let delay = get_slot_delay_ms(seen_duration, slot, &self.chain.slot_clock); // Log metrics to track delay from other nodes on the network. - metrics::observe_duration(&metrics::BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME, delay); - metrics::set_gauge(&metrics::BEACON_BLOB_LAST_DELAY, delay.as_millis() as i64); + metrics::set_gauge(&metrics::BEACON_BLOB_DELAY_GOSSIP, delay.as_millis() as i64); match self .chain .verify_blob_sidecar_for_gossip(blob_sidecar, blob_index) @@ -654,9 +653,9 @@ impl NetworkBeaconProcessor { .ok() .and_then(|now| now.checked_sub(seen_duration)) { - metrics::observe_duration( - &metrics::BEACON_BLOB_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME, - duration, + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_GOSSIP_VERIFICATION, + duration.as_millis() as i64, ); } self.process_gossip_verified_blob(peer_id, gossip_verified_blob, seen_duration) @@ -747,9 +746,9 @@ impl NetworkBeaconProcessor { self: &Arc, peer_id: PeerId, verified_blob: GossipVerifiedBlob, - // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { + let processing_start_time = Instant::now(); let block_root = verified_blob.block_root(); let blob_slot = verified_blob.slot(); let blob_index = verified_blob.id().index; @@ -764,6 +763,11 @@ impl NetworkBeaconProcessor { "block_root" => %block_root ); self.chain.recompute_head_at_current_slot().await; + + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, + processing_start_time.elapsed().as_millis() as i64, + ); } Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { trace!( @@ -865,12 +869,9 @@ impl NetworkBeaconProcessor { let block_delay = get_block_delay_ms(seen_duration, block.message(), &self.chain.slot_clock); // Log metrics to track delay from other nodes on the network. - metrics::observe_duration( - &metrics::BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME, - block_delay, - ); + metrics::set_gauge( - &metrics::BEACON_BLOCK_LAST_DELAY, + &metrics::BEACON_BLOCK_DELAY_GOSSIP, block_delay.as_millis() as i64, ); @@ -898,7 +899,7 @@ impl NetworkBeaconProcessor { let verified_block = match verification_result { Ok(verified_block) => { if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() { - metrics::inc_counter(&metrics::BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL); + metrics::inc_counter(&metrics::BEACON_BLOCK_DELAY_GOSSIP_ARRIVED_LATE_TOTAL); debug!( self.log, "Gossip block arrived late"; @@ -923,9 +924,9 @@ impl NetworkBeaconProcessor { .ok() .and_then(|now| now.checked_sub(seen_duration)) { - metrics::observe_duration( - &metrics::BEACON_BLOCK_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME, - duration, + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_GOSSIP_VERIFICATION, + duration.as_millis() as i64, ); } @@ -1130,9 +1131,9 @@ impl NetworkBeaconProcessor { verified_block: GossipVerifiedBlock, reprocess_tx: mpsc::Sender, invalid_block_storage: InvalidBlockStorage, - // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { + let processing_start_time = Instant::now(); let block = verified_block.block.block_cloned(); let block_root = verified_block.block_root; @@ -1168,6 +1169,11 @@ impl NetworkBeaconProcessor { ); self.chain.recompute_head_at_current_slot().await; + + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_FULL_VERIFICATION, + processing_start_time.elapsed().as_millis() as i64, + ); } Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { trace!( From 05fbbdd840304e34a34216ed8970cc32fc971f1e Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 23 Apr 2024 09:46:49 -0500 Subject: [PATCH 11/13] Electra: Add Preset, Constants, & Config (#5606) * Electra: Add Presets, Constants, & Config --- consensus/types/src/chain_spec.rs | 93 ++++++++++++++++++++++++++++++- consensus/types/src/eth_spec.rs | 81 ++++++++++++++++++++++++--- 2 files changed, 165 insertions(+), 9 deletions(-) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index e9345ab14ea..e4f27d6873c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -25,6 +25,7 @@ pub enum Domain { SyncCommittee, ContributionAndProof, SyncCommitteeSelectionProof, + Consolidation, ApplicationMask(ApplicationDomain), } @@ -76,6 +77,7 @@ pub struct ChainSpec { pub genesis_fork_version: [u8; 4], pub bls_withdrawal_prefix_byte: u8, pub eth1_address_withdrawal_prefix_byte: u8, + pub compounding_withdrawal_prefix_byte: u8, /* * Time parameters @@ -108,6 +110,7 @@ pub struct ChainSpec { pub(crate) domain_voluntary_exit: u32, pub(crate) domain_selection_proof: u32, pub(crate) domain_aggregate_and_proof: u32, + pub(crate) domain_consolidation: u32, /* * Fork choice @@ -177,6 +180,15 @@ pub struct ChainSpec { pub electra_fork_version: [u8; 4], /// The Electra fork epoch is optional, with `None` representing "Electra never happens". pub electra_fork_epoch: Option, + pub unset_deposit_receipts_start_index: u64, + pub full_exit_request_amount: u64, + pub min_activation_balance: u64, + pub max_effective_balance_electra: u64, + pub min_slashing_penalty_quotient_electra: u64, + pub whistleblower_reward_quotient_electra: u64, + pub max_pending_partials_per_withdrawals_sweep: u64, + pub min_per_epoch_churn_limit_electra: u64, + pub max_per_epoch_activation_exit_churn_limit: u64, /* * Networking @@ -364,7 +376,9 @@ impl ChainSpec { state: &BeaconState, ) -> u64 { let fork_name = state.fork_name_unchecked(); - if fork_name >= ForkName::Merge { + if fork_name >= ForkName::Electra { + self.min_slashing_penalty_quotient_electra + } else if fork_name >= ForkName::Merge { self.min_slashing_penalty_quotient_bellatrix } else if fork_name >= ForkName::Altair { self.min_slashing_penalty_quotient_altair @@ -418,6 +432,7 @@ impl ChainSpec { Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), Domain::BlsToExecutionChange => self.domain_bls_to_execution_change, + Domain::Consolidation => self.domain_consolidation, } } @@ -602,6 +617,7 @@ impl ChainSpec { genesis_fork_version: [0; 4], bls_withdrawal_prefix_byte: 0x00, eth1_address_withdrawal_prefix_byte: 0x01, + compounding_withdrawal_prefix_byte: 0x02, /* * Time parameters @@ -635,6 +651,7 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_consolidation: 0x0B, /* * Fork choice @@ -709,6 +726,30 @@ impl ChainSpec { */ electra_fork_version: [0x05, 00, 00, 00], electra_fork_epoch: None, + unset_deposit_receipts_start_index: u64::MAX, + full_exit_request_amount: 0, + min_activation_balance: option_wrapper(|| { + u64::checked_pow(2, 5)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_effective_balance_electra: option_wrapper(|| { + u64::checked_pow(2, 11)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + min_slashing_penalty_quotient_electra: u64::checked_pow(2, 12) + .expect("pow does not overflow"), + whistleblower_reward_quotient_electra: u64::checked_pow(2, 12) + .expect("pow does not overflow"), + max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 3) + .expect("pow does not overflow"), + min_per_epoch_churn_limit_electra: option_wrapper(|| { + u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_per_epoch_activation_exit_churn_limit: option_wrapper(|| { + u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), /* * Network specific @@ -874,6 +915,7 @@ impl ChainSpec { genesis_fork_version: [0x00, 0x00, 0x00, 0x64], bls_withdrawal_prefix_byte: 0x00, eth1_address_withdrawal_prefix_byte: 0x01, + compounding_withdrawal_prefix_byte: 0x02, /* * Time parameters @@ -907,6 +949,7 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_consolidation: 0x0B, /* * Fork choice @@ -983,6 +1026,30 @@ impl ChainSpec { */ electra_fork_version: [0x05, 0x00, 0x00, 0x64], electra_fork_epoch: None, + unset_deposit_receipts_start_index: u64::MAX, + full_exit_request_amount: 0, + min_activation_balance: option_wrapper(|| { + u64::checked_pow(2, 5)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_effective_balance_electra: option_wrapper(|| { + u64::checked_pow(2, 11)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + min_slashing_penalty_quotient_electra: u64::checked_pow(2, 12) + .expect("pow does not overflow"), + whistleblower_reward_quotient_electra: u64::checked_pow(2, 12) + .expect("pow does not overflow"), + max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 3) + .expect("pow does not overflow"), + min_per_epoch_churn_limit_electra: option_wrapper(|| { + u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_per_epoch_activation_exit_churn_limit: option_wrapper(|| { + u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), /* * Network specific @@ -1206,6 +1273,13 @@ pub struct Config { #[serde(default = "default_blob_sidecar_subnet_count")] #[serde(with = "serde_utils::quoted_u64")] blob_sidecar_subnet_count: u64, + + #[serde(default = "default_min_per_epoch_churn_limit_electra")] + #[serde(with = "serde_utils::quoted_u64")] + min_per_epoch_churn_limit_electra: u64, + #[serde(default = "default_max_per_epoch_activation_exit_churn_limit")] + #[serde(with = "serde_utils::quoted_u64")] + max_per_epoch_activation_exit_churn_limit: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1320,6 +1394,14 @@ const fn default_blob_sidecar_subnet_count() -> u64 { 6 } +const fn default_min_per_epoch_churn_limit_electra() -> u64 { + 128_000_000_000 +} + +const fn default_max_per_epoch_activation_exit_churn_limit() -> u64 { + 256_000_000_000 +} + const fn default_epochs_per_subnet_subscription() -> u64 { 256 } @@ -1496,6 +1578,10 @@ impl Config { max_request_blob_sidecars: spec.max_request_blob_sidecars, min_epochs_for_blob_sidecars_requests: spec.min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count: spec.blob_sidecar_subnet_count, + + min_per_epoch_churn_limit_electra: spec.min_per_epoch_churn_limit_electra, + max_per_epoch_activation_exit_churn_limit: spec + .max_per_epoch_activation_exit_churn_limit, } } @@ -1563,6 +1649,8 @@ impl Config { max_request_blob_sidecars, min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count, + min_per_epoch_churn_limit_electra, + max_per_epoch_activation_exit_churn_limit, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -1623,6 +1711,8 @@ impl Config { max_request_blob_sidecars, min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count, + min_per_epoch_churn_limit_electra, + max_per_epoch_activation_exit_churn_limit, // We need to re-derive any values that might have changed in the config. max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks), @@ -1695,6 +1785,7 @@ mod tests { &spec, ); test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); + test_domain(Domain::Consolidation, spec.domain_consolidation, &spec); // The builder domain index is zero let builder_domain_pre_mask = [0; 4]; diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index a2972b722a2..a700c5e9abb 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -3,8 +3,9 @@ use crate::*; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ - bit::B0, UInt, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U16, U16777216, - U2, U2048, U256, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, U8192, + bit::B0, UInt, U0, U1, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U134217728, + U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, + U8192, }; use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; @@ -137,7 +138,14 @@ pub trait EthSpec: /* * New in Electra */ - type ElectraPlaceholder: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PendingBalanceDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PendingPartialWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PendingConsolidationsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxConsolidations: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxDepositReceiptsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxAttesterSlashingsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxAttestationsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxWithdrawalRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; fn default_spec() -> ChainSpec; @@ -284,8 +292,44 @@ pub trait EthSpec: Self::KzgCommitmentInclusionProofDepth::to_usize() } - fn electra_placeholder() -> usize { - Self::ElectraPlaceholder::to_usize() + /// Returns the `PENDING_BALANCE_DEPOSITS_LIMIT` constant for this specification. + fn pending_balance_deposits_limit() -> usize { + Self::PendingBalanceDepositsLimit::to_usize() + } + + /// Returns the `PENDING_PARTIAL_WITHDRAWALS_LIMIT` constant for this specification. + fn pending_partial_withdrawals_limit() -> usize { + Self::PendingPartialWithdrawalsLimit::to_usize() + } + + /// Returns the `PENDING_CONSOLIDATIONS_LIMIT` constant for this specification. + fn pending_consolidations_limit() -> usize { + Self::PendingConsolidationsLimit::to_usize() + } + + /// Returns the `MAX_CONSOLIDATIONS` constant for this specification. + fn max_consolidations() -> usize { + Self::MaxConsolidations::to_usize() + } + + /// Returns the `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` constant for this specification. + fn max_deposit_receipts_per_payload() -> usize { + Self::MaxDepositReceiptsPerPayload::to_usize() + } + + /// Returns the `MAX_ATTESTER_SLASHINGS_ELECTRA` constant for this specification. + fn max_attester_slashings_electra() -> usize { + Self::MaxAttesterSlashingsElectra::to_usize() + } + + /// Returns the `MAX_ATTESTATIONS_ELECTRA` constant for this specification. + fn max_attestations_electra() -> usize { + Self::MaxAttestationsElectra::to_usize() + } + + /// Returns the `MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD` constant for this specification. + fn max_withdrawal_requests_per_payload() -> usize { + Self::MaxWithdrawalRequestsPerPayload::to_usize() } } @@ -337,7 +381,14 @@ impl EthSpec for MainnetEthSpec { type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U16; - type ElectraPlaceholder = U16; + type PendingBalanceDepositsLimit = U134217728; + type PendingPartialWithdrawalsLimit = U134217728; + type PendingConsolidationsLimit = U262144; + type MaxConsolidations = U1; + type MaxDepositReceiptsPerPayload = U8192; + type MaxAttesterSlashingsElectra = U1; + type MaxAttestationsElectra = U8; + type MaxWithdrawalRequestsPerPayload = U16; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -390,7 +441,14 @@ impl EthSpec for MinimalEthSpec { MaxBlsToExecutionChanges, MaxBlobsPerBlock, BytesPerFieldElement, - ElectraPlaceholder + PendingBalanceDepositsLimit, + PendingPartialWithdrawalsLimit, + PendingConsolidationsLimit, + MaxConsolidations, + MaxDepositReceiptsPerPayload, + MaxAttesterSlashingsElectra, + MaxAttestationsElectra, + MaxWithdrawalRequestsPerPayload }); fn default_spec() -> ChainSpec { @@ -442,7 +500,14 @@ impl EthSpec for GnosisEthSpec { type BytesPerFieldElement = U32; type BytesPerBlob = U131072; type KzgCommitmentInclusionProofDepth = U17; - type ElectraPlaceholder = U16; + type PendingBalanceDepositsLimit = U134217728; + type PendingPartialWithdrawalsLimit = U134217728; + type PendingConsolidationsLimit = U262144; + type MaxConsolidations = U1; + type MaxDepositReceiptsPerPayload = U8192; + type MaxAttesterSlashingsElectra = U1; + type MaxAttestationsElectra = U8; + type MaxWithdrawalRequestsPerPayload = U16; fn default_spec() -> ChainSpec { ChainSpec::gnosis() From 76460ba838cf806613cb788d489020ae71c11ea0 Mon Sep 17 00:00:00 2001 From: antondlr Date: Tue, 23 Apr 2024 19:58:47 +0200 Subject: [PATCH 12/13] Only `portable` builds (docker) (#5614) * portable builds by default, build multiarch lcli --- .github/workflows/docker.yml | 113 ++++++++++++++++++----------------- lcli/Dockerfile.cross | 6 ++ 2 files changed, 65 insertions(+), 54 deletions(-) create mode 100644 lcli/Dockerfile.cross diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 54b355e631d..d1a8c9f6144 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -15,8 +15,6 @@ concurrency: env: DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - IMAGE_NAME: ${{ github.repository_owner}}/lighthouse - LCLI_IMAGE_NAME: ${{ github.repository_owner }}/lcli # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} @@ -49,19 +47,15 @@ jobs: VERSION: ${{ env.VERSION }} VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: - name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }} + name: build-docker-${{ matrix.binary }}-${{ matrix.cpu_arch }}${{ matrix.features.version_suffix }} # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release"]') || 'ubuntu-22.04' }} strategy: matrix: - binary: [aarch64, - aarch64-portable, - x86_64, - x86_64-portable] - features: [ - {version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"}, - {version_suffix: "-dev", env: "jemalloc,spec-minimal"} - ] + binary: [lighthouse, + lcli] + cpu_arch: [aarch64, + x86_64] include: - profile: maxperf @@ -69,7 +63,6 @@ jobs: env: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} - FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} steps: - uses: actions/checkout@v4 - name: Update Rust @@ -78,27 +71,40 @@ jobs: - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Cross build Lighthouse binary + + - name: Sets env vars for Lighthouse + if: startsWith(matrix.binary, 'lighthouse') + run: | + echo "CROSS_FEATURES=gnosis,spec-minimal,slasher-lmdb,jemalloc" >> $GITHUB_ENV + + - name: Set `make` command for lighthouse + if: startsWith(matrix.binary, 'lighthouse') + run: | + echo "MAKE_CMD=build-${{ matrix.cpu_arch }}-portable" >> $GITHUB_ENV + + - name: Set `make` command for lcli + if: startsWith(matrix.binary, 'lcli') + run: | + echo "MAKE_CMD=build-lcli-${{ matrix.cpu_arch }}" >> $GITHUB_ENV + + - name: Cross build binaries run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }} + env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ env.CROSS_FEATURES }} make ${{ env.MAKE_CMD }} + - name: Make bin dir run: mkdir ./bin - - name: Move cross-built binary into Docker scope (if ARM) - if: startsWith(matrix.binary, 'aarch64') - run: mv ./target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin - - name: Move cross-built binary into Docker scope (if x86_64) - if: startsWith(matrix.binary, 'x86_64') - run: mv ./target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin + + - name: Move cross-built binary into Docker scope + run: mv ./target/${{ matrix.cpu_arch }}-unknown-linux-gnu/${{ matrix.profile }}/${{ matrix.binary }} ./bin + - name: Map aarch64 to arm64 short arch - if: startsWith(matrix.binary, 'aarch64') + if: startsWith(matrix.cpu_arch, 'aarch64') run: echo "SHORT_ARCH=arm64" >> $GITHUB_ENV + - name: Map x86_64 to amd64 short arch - if: startsWith(matrix.binary, 'x86_64') + if: startsWith(matrix.cpu_arch, 'x86_64') run: echo "SHORT_ARCH=amd64" >> $GITHUB_ENV; - - name: Set modernity suffix - if: endsWith(matrix.binary, '-portable') != true - run: echo "MODERNITY_SUFFIX=-modern" >> $GITHUB_ENV; - name: Install QEMU if: env.SELF_HOSTED_RUNNERS == 'false' @@ -108,22 +114,41 @@ jobs: if: env.SELF_HOSTED_RUNNERS == 'false' uses: docker/setup-buildx-action@v3 - - name: Build and push + - name: Build and push (Lighthouse) + if: startsWith(matrix.binary, 'lighthouse') uses: docker/build-push-action@v5 with: file: ./Dockerfile.cross context: . platforms: linux/${{ env.SHORT_ARCH }} push: true - tags: ${{ env.IMAGE_NAME }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}${{ env.MODERNITY_SUFFIX }}${{ env.FEATURE_SUFFIX }} + tags: | + ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} + ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-dev + ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-modern + ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-modern-dev + + - name: Build and push (lcli) + if: startsWith(matrix.binary, 'lcli') + uses: docker/build-push-action@v5 + with: + file: ./lcli/Dockerfile.cross + context: . + platforms: linux/${{ env.SHORT_ARCH }} + push: true + + tags: | + ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} + build-docker-multiarch: - name: build-docker-multiarch${{ matrix.modernity }} + name: build-docker-${{ matrix.binary }}-multiarch runs-on: ubuntu-22.04 - needs: [build-docker-single-arch, extract-version] strategy: matrix: - modernity: ["", "-modern"] + binary: [lighthouse, + lcli] + needs: [build-docker-single-arch, extract-version] env: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} @@ -135,29 +160,9 @@ jobs: run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Create and push multiarch manifest + - name: Create and push multiarch manifests run: | - docker buildx imagetools create -t ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} \ - ${IMAGE_NAME}:${VERSION}-arm64${VERSION_SUFFIX}${{ matrix.modernity }} \ - ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; + docker buildx imagetools create -t ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}${VERSION_SUFFIX} \ + ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ + ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; - build-docker-lcli: - runs-on: ubuntu-22.04 - needs: [extract-version] - env: - VERSION: ${{ needs.extract-version.outputs.VERSION }} - VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} - steps: - - uses: actions/checkout@v4 - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Build lcli and push - uses: docker/build-push-action@v5 - with: - build-args: | - FEATURES=portable - context: . - push: true - file: ./lcli/Dockerfile - tags: ${{ env.LCLI_IMAGE_NAME }}:${{ env.VERSION }}${{ env.VERSION_SUFFIX }} diff --git a/lcli/Dockerfile.cross b/lcli/Dockerfile.cross new file mode 100644 index 00000000000..979688c9cf6 --- /dev/null +++ b/lcli/Dockerfile.cross @@ -0,0 +1,6 @@ +# This image is meant to enable cross-architecture builds. +# It assumes the lcli binary has already been +# compiled for `$TARGETPLATFORM` and moved to `./bin`. +FROM --platform=$TARGETPLATFORM ubuntu:22.04 +RUN apt update && apt -y upgrade && apt clean && rm -rf /var/lib/apt/lists/* +COPY ./bin/lcli /usr/local/bin/lcli From 4cad1fcbbe1315e2d7d6171a797db7ce4e806f72 Mon Sep 17 00:00:00 2001 From: antondlr Date: Tue, 23 Apr 2024 22:12:55 +0200 Subject: [PATCH 13/13] Add missing lcli targets in makefile (#5633) * Add missing lcli targets to Makefile --- Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Makefile b/Makefile index 4b2d0f6c5d5..4072ab1e6d8 100644 --- a/Makefile +++ b/Makefile @@ -82,6 +82,11 @@ build-aarch64: build-aarch64-portable: cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked +build-lcli-x86_64: + cross build --bin lcli --target x86_64-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked +build-lcli-aarch64: + cross build --bin lcli --target aarch64-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked + # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary cp $(1)/lighthouse $(BIN_DIR)/lighthouse