diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 81421844157..30e4211b88c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -79,15 +79,6 @@ jobs: if: startsWith(matrix.arch, 'x86_64-windows') run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - # ============================== - # Windows & Mac dependencies - # ============================== - - name: Install Protoc - if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows') - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - # ============================== # Builds # ============================== diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index ff7a9cf2f10..ab31b3a92bc 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -60,10 +60,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in release @@ -83,7 +79,7 @@ jobs: node-version: '14' - name: Install windows build tools run: | - choco install python protoc visualstudio2019-workload-vctools -y + choco install python visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 @@ -108,10 +104,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run beacon_chain tests for all known forks run: make test-beacon-chain op-pool-tests: @@ -122,10 +114,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run operation_pool tests for all known forks run: make test-op-pool slasher-tests: @@ -148,10 +136,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in debug @@ -164,10 +148,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: @@ -180,10 +160,6 @@ jobs: - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == false run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run consensus-spec-tests with blst, milagro and fake_crypto run: make test-ef dockerfile-ubuntu: @@ -206,10 +182,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim that starts from an eth1 contract @@ -222,10 +194,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim and go through the merge transition @@ -238,10 +206,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run the beacon chain sim without an eth1 connection run: cargo run --release --bin simulator no-eth1-sim syncing-simulator-ubuntu: @@ -252,10 +216,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 - name: Run the syncing simulator @@ -268,10 +228,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install geth run: | sudo add-apt-repository -y ppa:ethereum/ethereum @@ -303,10 +259,6 @@ jobs: dotnet-version: '6.0.201' - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run exec engine integration tests in release run: make test-exec-engine check-benchmarks: @@ -317,10 +269,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it run: make check-benches clippy: @@ -331,10 +279,6 @@ jobs: - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness @@ -347,10 +291,6 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run cargo check run: cargo check --workspace arbitrary-check: @@ -389,10 +329,6 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps run: cargo install cargo-udeps --locked --force - name: Create Cargo config dir @@ -410,7 +346,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install dependencies - run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler + run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang - name: Use Rust beta run: rustup override set beta - name: Run make diff --git a/Cross.toml b/Cross.toml index 9c3e441cba5..d5f7a5d5068 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,5 +1,5 @@ [target.x86_64-unknown-linux-gnu] -dockerfile = './scripts/cross/Dockerfile' +pre-build = ["apt-get install -y cmake clang-3.9"] [target.aarch64-unknown-linux-gnu] -dockerfile = './scripts/cross/Dockerfile' +pre-build = ["apt-get install -y cmake clang-3.9"] diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 2371096e530..386de008186 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -7,12 +7,9 @@ use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, - libp2p::{ - core::connection::ConnectionId, - swarm::{ - behaviour::{ConnectionEstablished, FromSwarm}, - NetworkBehaviour, - }, + libp2p::swarm::{ + behaviour::{ConnectionEstablished, FromSwarm}, + ConnectionId, NetworkBehaviour, }, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, @@ -167,7 +164,7 @@ pub async fn create_api_server_on_port( local_addr: EXTERNAL_ADDR.parse().unwrap(), send_back_addr: EXTERNAL_ADDR.parse().unwrap(), }; - let connection_id = ConnectionId::new(1); + let connection_id = ConnectionId::new_unchecked(1); pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, connection_id, diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index 9b156942112..785206b757b 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,6 +1,6 @@ use crate::Context; use beacon_chain::BeaconChainTypes; -use lighthouse_metrics::{Encoder, TextEncoder}; +use lighthouse_metrics::TextEncoder; use lighthouse_network::prometheus_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; @@ -9,7 +9,7 @@ pub use lighthouse_metrics::*; pub fn gather_prometheus_metrics( ctx: &Context, ) -> std::result::Result { - let mut buffer = vec![]; + let mut buffer = String::new(); let encoder = TextEncoder::new(); // There are two categories of metrics: @@ -50,7 +50,7 @@ pub fn gather_prometheus_metrics( } encoder - .encode(&lighthouse_metrics::gather(), &mut buffer) + .encode_utf8(&lighthouse_metrics::gather(), &mut buffer) .unwrap(); // encode gossipsub metrics also if they exist if let Some(registry) = ctx.gossipsub_registry.as_ref() { @@ -59,5 +59,5 @@ pub fn gather_prometheus_metrics( } } - String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) + Ok(buffer) } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 6d056d83505..f71845fed25 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -discv5 = { version = "0.3.0", features = ["libp2p"]} +discv5 = { version = "0.3.1", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } ssz_types = "0.5.3" @@ -40,15 +40,15 @@ directory = { path = "../../common/directory" } regex = "1.5.5" strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" -prometheus-client = "0.18.0" +prometheus-client = "0.21.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.3.0" void = "1" [dependencies.libp2p] -version = "0.50.0" +version = "0.52" default-features = false -features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] +features = ["websocket", "identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] [dev-dependencies] slog-term = "2.6.0" diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index eb29369c531..3b88932a369 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -6,10 +6,7 @@ use directory::{ DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, }; use discv5::{Discv5Config, Discv5ConfigBuilder}; -use libp2p::gossipsub::{ - FastMessageId, GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId, - RawGossipsubMessage, ValidationMode, -}; +use libp2p::gossipsub; use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -83,7 +80,7 @@ pub struct Config { /// Gossipsub configuration parameters. #[serde(skip)] - pub gs_config: GossipsubConfig, + pub gs_config: gossipsub::Config, /// Discv5 configuration parameters. #[serde(skip)] @@ -265,7 +262,7 @@ impl Default for Config { // Note: Using the default config here. Use `gossipsub_config` function for getting // Lighthouse specific configuration for gossipsub. - let gs_config = GossipsubConfigBuilder::default() + let gs_config = gossipsub::ConfigBuilder::default() .build() .expect("valid gossipsub configuration"); @@ -416,16 +413,16 @@ impl From for NetworkLoad { } /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. -pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> GossipsubConfig { +pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> gossipsub::Config { // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(topic, data) for content addressing - let fast_gossip_message_id = |message: &RawGossipsubMessage| { + let fast_gossip_message_id = |message: &gossipsub::RawMessage| { let data = [message.topic.as_str().as_bytes(), &message.data].concat(); - FastMessageId::from(&Sha256::digest(data)[..8]) + gossipsub::FastMessageId::from(&Sha256::digest(data)[..8]) }; fn prefix( prefix: [u8; 4], - message: &GossipsubMessage, + message: &gossipsub::Message, fork_context: Arc, ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); @@ -454,8 +451,8 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos } let is_merge_enabled = fork_context.fork_exists(ForkName::Merge); - let gossip_message_id = move |message: &GossipsubMessage| { - MessageId::from( + let gossip_message_id = move |message: &gossipsub::Message| { + gossipsub::MessageId::from( &Sha256::digest( prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message, fork_context.clone()).as_slice(), )[..20], @@ -464,7 +461,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos let load = NetworkLoad::from(network_load); - GossipsubConfigBuilder::default() + gossipsub::ConfigBuilder::default() .max_transmit_size(gossip_max_size(is_merge_enabled)) .heartbeat_interval(load.heartbeat_interval) .mesh_n(load.mesh_n) @@ -477,7 +474,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large .history_gossip(load.history_gossip) .validate_messages() // require validation before propagation - .validation_mode(ValidationMode::Anonymous) + .validation_mode(gossipsub::ValidationMode::Anonymous) .duplicate_cache_time(DUPLICATE_CACHE_TIME) .message_id_fn(gossip_message_id) .fast_message_id_fn(fast_gossip_message_id) diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index f85c4b3e5cb..ef22f816a77 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -7,7 +7,7 @@ use super::ENR_FILENAME; use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; use discv5::enr::EnrKey; -use libp2p::core::identity::Keypair; +use libp2p::identity::Keypair; use slog::{debug, warn}; use ssz::{Decode, Encode}; use ssz_types::BitVector; @@ -133,7 +133,7 @@ pub fn build_or_load_enr( // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. - let enr_key = CombinedKey::from_libp2p(&local_key)?; + let enr_key = CombinedKey::from_libp2p(local_key)?; let mut local_enr = build_enr::(&enr_key, config, enr_fork_id)?; use_or_load_enr(&enr_key, &mut local_enr, config, log)?; diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 5ce0c55cacd..753da6292ca 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -1,10 +1,9 @@ //! ENR extension trait to support libp2p integration. + use crate::{Enr, Multiaddr, PeerId}; use discv5::enr::{CombinedKey, CombinedPublicKey}; -use libp2p::{ - core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}, - identity::secp256k1, -}; +use libp2p::core::multiaddr::Protocol; +use libp2p::identity::{ed25519, secp256k1, KeyType, Keypair, PublicKey}; use tiny_keccak::{Hasher, Keccak}; /// Extend ENR for libp2p types. @@ -38,7 +37,8 @@ pub trait CombinedKeyPublicExt { /// Extend ENR CombinedKey for conversion to libp2p keys. pub trait CombinedKeyExt { /// Converts a libp2p key into an ENR combined key. - fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result; + fn from_libp2p(key: Keypair) -> Result; + /// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`]. fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey; } @@ -93,14 +93,14 @@ impl EnrExt for Enr { if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -108,14 +108,14 @@ impl EnrExt for Enr { if let Some(udp6) = self.udp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Udp(udp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } if let Some(tcp6) = self.tcp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Tcp(tcp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -133,7 +133,7 @@ impl EnrExt for Enr { if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Tcp(tcp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -141,7 +141,7 @@ impl EnrExt for Enr { if let Some(tcp6) = self.tcp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Tcp(tcp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -159,7 +159,7 @@ impl EnrExt for Enr { if let Some(udp) = self.udp4() { let mut multiaddr: Multiaddr = ip.into(); multiaddr.push(Protocol::Udp(udp)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -167,7 +167,7 @@ impl EnrExt for Enr { if let Some(udp6) = self.udp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Udp(udp6)); - multiaddr.push(Protocol::P2p(peer_id.into())); + multiaddr.push(Protocol::P2p(peer_id)); multiaddrs.push(multiaddr); } } @@ -204,18 +204,16 @@ impl CombinedKeyPublicExt for CombinedPublicKey { match self { Self::Secp256k1(pk) => { let pk_bytes = pk.to_sec1_bytes(); - let libp2p_pk = libp2p::core::PublicKey::Secp256k1( - libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes) - .expect("valid public key"), - ); + let libp2p_pk: PublicKey = secp256k1::PublicKey::try_from_bytes(&pk_bytes) + .expect("valid public key") + .into(); PeerId::from_public_key(&libp2p_pk) } Self::Ed25519(pk) => { let pk_bytes = pk.to_bytes(); - let libp2p_pk = libp2p::core::PublicKey::Ed25519( - libp2p::core::identity::ed25519::PublicKey::decode(&pk_bytes) - .expect("valid public key"), - ); + let libp2p_pk: PublicKey = ed25519::PublicKey::try_from_bytes(&pk_bytes) + .expect("valid public key") + .into(); PeerId::from_public_key(&libp2p_pk) } } @@ -223,18 +221,25 @@ impl CombinedKeyPublicExt for CombinedPublicKey { } impl CombinedKeyExt for CombinedKey { - fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result { - match key { - Keypair::Secp256k1(key) => Ok(CombinedKey::from_secp256k1(key)), - Keypair::Ed25519(key) => { + fn from_libp2p(key: Keypair) -> Result { + match key.key_type() { + KeyType::Secp256k1 => { + let key = key.try_into_secp256k1().expect("right key type"); + let secret = + discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes()) + .expect("libp2p key must be valid"); + Ok(CombinedKey::Secp256k1(secret)) + } + KeyType::Ed25519 => { + let key = key.try_into_ed25519().expect("right key type"); let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes( - &(key.encode()[..32]) + &(key.to_bytes()[..32]) .try_into() .expect("libp2p key must be valid"), ); Ok(CombinedKey::from(ed_keypair)) } - Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), + _ => Err("Unsupported keypair kind"), } } fn from_secp256k1(key: &secp256k1::Keypair) -> Self { @@ -251,37 +256,46 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result { - let uncompressed_key_bytes = &pk.encode_uncompressed()[1..]; + })?; + + match public_key.key_type() { + KeyType::Secp256k1 => { + let pk = public_key + .clone() + .try_into_secp256k1() + .expect("right key type"); + let uncompressed_key_bytes = &pk.to_bytes_uncompressed()[1..]; let mut output = [0_u8; 32]; let mut hasher = Keccak::v256(); hasher.update(uncompressed_key_bytes); hasher.finalize(&mut output); Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length")) } - PublicKey::Ed25519(pk) => { - let uncompressed_key_bytes = pk.encode(); + KeyType::Ed25519 => { + let pk = public_key + .clone() + .try_into_ed25519() + .expect("right key type"); + let uncompressed_key_bytes = pk.to_bytes(); let mut output = [0_u8; 32]; let mut hasher = Keccak::v256(); hasher.update(&uncompressed_key_bytes); hasher.finalize(&mut output); Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length")) } - PublicKey::Ecdsa(_) => Err(format!( - "Unsupported public key (Ecdsa) from peer {}", - peer_id - )), + + _ => Err(format!("Unsupported public key from peer {}", peer_id)), } } #[cfg(test)] mod tests { + use super::*; #[test] @@ -290,9 +304,9 @@ mod tests { let sk_bytes = hex::decode(sk_hex).unwrap(); let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_slice(&sk_bytes).unwrap(); - let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap(); - let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into(); - let libp2p_kp = Keypair::Secp256k1(secp256k1_kp); + let libp2p_sk = secp256k1::SecretKey::try_from_bytes(sk_bytes).unwrap(); + let secp256k1_kp: secp256k1::Keypair = libp2p_sk.into(); + let libp2p_kp: Keypair = secp256k1_kp.into(); let peer_id = libp2p_kp.public().to_peer_id(); let enr = discv5::enr::EnrBuilder::new("v4") @@ -311,9 +325,9 @@ mod tests { &sk_bytes.clone().try_into().unwrap(), ); - let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap(); - let secp256k1_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into(); - let libp2p_kp = Keypair::Ed25519(secp256k1_kp); + let libp2p_sk = ed25519::SecretKey::try_from_bytes(sk_bytes).unwrap(); + let secp256k1_kp: ed25519::Keypair = libp2p_sk.into(); + let libp2p_kp: Keypair = secp256k1_kp.into(); let peer_id = libp2p_kp.public().to_peer_id(); let enr = discv5::enr::EnrBuilder::new("v4") diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index d4d0baef6b7..0f8ddc53c1b 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -16,19 +16,20 @@ pub use enr::{ Eth2Enr, }; pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; -pub use libp2p::core::identity::{Keypair, PublicKey}; +pub use libp2p::identity::{Keypair, PublicKey}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; use libp2p::multiaddr::Protocol; use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; -use libp2p::swarm::AddressScore; +use libp2p::swarm::THandlerInEvent; pub use libp2p::{ - core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, + core::{ConnectedPoint, Multiaddr}, + identity::PeerId, swarm::{ - dummy::ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction as NBAction, - NotifyHandler, PollParameters, SubstreamProtocol, + dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, + PollParameters, SubstreamProtocol, ToSwarm, }, }; use lru::LruCache; @@ -191,7 +192,7 @@ pub struct Discovery { impl Discovery { /// NOTE: Creating discovery requires running within a tokio execution environment. pub async fn new( - local_key: &Keypair, + local_key: Keypair, config: &NetworkConfig, network_globals: Arc>, log: &slog::Logger, @@ -925,22 +926,51 @@ impl Discovery { impl NetworkBehaviour for Discovery { // Discovery is not a real NetworkBehaviour... type ConnectionHandler = ConnectionHandler; - type OutEvent = DiscoveredPeers; + type ToSwarm = DiscoveredPeers; - fn new_handler(&mut self) -> Self::ConnectionHandler { - ConnectionHandler + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _local_addr: &Multiaddr, + _remote_addr: &Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + // TODO: we might want to check discovery's banned ips here in the future. + Ok(ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _addr: &Multiaddr, + _role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + Ok(ConnectionHandler) + } + + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + _event: void::Void, + ) { } - // Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them. - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - if let Some(enr) = self.enr_of_peer(peer_id) { + fn handle_pending_outbound_connection( + &mut self, + _connection_id: ConnectionId, + maybe_peer: Option, + _addresses: &[Multiaddr], + _effective_role: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + if let Some(enr) = maybe_peer.and_then(|peer_id| self.enr_of_peer(&peer_id)) { // ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP // port is removed, which is assumed to be associated with the discv5 protocol (and // therefore irrelevant for other libp2p components). - enr.multiaddr_tcp() + Ok(enr.multiaddr_tcp()) } else { - // PeerId is not known - Vec::new() + Ok(vec![]) } } @@ -949,7 +979,7 @@ impl NetworkBehaviour for Discovery { &mut self, cx: &mut Context, _: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll>> { if !self.started { return Poll::Pending; } @@ -960,7 +990,7 @@ impl NetworkBehaviour for Discovery { // Drive the queries and return any results from completed queries if let Some(peers) = self.poll_queries(cx) { // return the result to the peer manager - return Poll::Ready(NBAction::GenerateEvent(DiscoveredPeers { peers })); + return Poll::Ready(ToSwarm::GenerateEvent(DiscoveredPeers { peers })); } // Process the server event stream @@ -1034,10 +1064,7 @@ impl NetworkBehaviour for Discovery { if let Some(address) = addr { // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling // should handle this. - return Poll::Ready(NBAction::ReportObservedAddr { - address, - score: AddressScore::Finite(1), - }); + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(address)); } } Discv5Event::EnrAdded { .. } @@ -1065,8 +1092,9 @@ impl NetworkBehaviour for Discovery { | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => { + | FromSwarm::NewExternalAddrCandidate(_) + | FromSwarm::ExternalAddrExpired(_) + | FromSwarm::ExternalAddrConfirmed(_) => { // Ignore events not relevant to discovery } } @@ -1077,10 +1105,8 @@ impl Discovery { fn on_dial_failure(&mut self, peer_id: Option, error: &DialError) { if let Some(peer_id) = peer_id { match error { - DialError::Banned - | DialError::LocalPeerId - | DialError::InvalidPeerId(_) - | DialError::ConnectionIo(_) + DialError::LocalPeerId { .. } + | DialError::Denied { .. } | DialError::NoAddresses | DialError::Transport(_) | DialError::WrongPeerId { .. } => { @@ -1088,9 +1114,7 @@ impl Discovery { debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); self.disconnect_peer(&peer_id); } - DialError::ConnectionLimit(_) - | DialError::DialPeerConditionFalse(_) - | DialError::Aborted => {} + DialError::DialPeerConditionFalse(_) | DialError::Aborted => {} } } } @@ -1139,8 +1163,8 @@ mod tests { false, &log, ); - let keypair = Keypair::Secp256k1(keypair); - Discovery::new(&keypair, &config, Arc::new(globals), &log) + let keypair = keypair.into(); + Discovery::new(keypair, &config, Arc::new(globals), &log) .await .unwrap() } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 50991044522..99832b00432 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -21,7 +21,8 @@ use std::{ use strum::IntoEnumIterator; use types::{EthSpec, SyncSubnetId}; -pub use libp2p::core::{identity::Keypair, Multiaddr}; +pub use libp2p::core::Multiaddr; +pub use libp2p::identity::Keypair; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod peerdb; diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 24de83a61da..ce374bb9ab4 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -1,12 +1,14 @@ +//! Implementation of [`NetworkBehaviour`] for the [`PeerManager`]. + use std::task::{Context, Poll}; use futures::StreamExt; use libp2p::core::ConnectedPoint; +use libp2p::identity::PeerId; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use libp2p::PeerId; +use libp2p::swarm::{ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; use slog::{debug, error}; use types::EthSpec; @@ -19,20 +21,24 @@ use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; impl NetworkBehaviour for PeerManager { type ConnectionHandler = ConnectionHandler; - - type OutEvent = PeerManagerEvent; + type ToSwarm = PeerManagerEvent; /* Required trait members */ - fn new_handler(&mut self) -> Self::ConnectionHandler { - ConnectionHandler + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + _event: libp2p::swarm::THandlerOutEvent, + ) { + // no events from the dummy handler } fn poll( &mut self, cx: &mut Context<'_>, _params: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll> { // perform the heartbeat when necessary while self.heartbeat.poll_tick(cx).is_ready() { self.heartbeat(); @@ -84,19 +90,17 @@ impl NetworkBehaviour for PeerManager { } if !self.events.is_empty() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); + return Poll::Ready(ToSwarm::GenerateEvent(self.events.remove(0))); } else { self.events.shrink_to_fit(); } if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_first() { self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr); - let handler = self.new_handler(); - return Poll::Ready(NetworkBehaviourAction::Dial { + return Poll::Ready(ToSwarm::Dial { opts: DialOpts::peer_id(peer_id) .condition(PeerCondition::Disconnected) .build(), - handler, }); } @@ -110,13 +114,31 @@ impl NetworkBehaviour for PeerManager { endpoint, other_established, .. - }) => self.on_connection_established(peer_id, endpoint, other_established), + }) => { + // NOTE: We still need to handle the [`ConnectionEstablished`] because the + // [`NetworkBehaviour::handle_established_inbound_connection`] and + // [`NetworkBehaviour::handle_established_outbound_connection`] are fallible. This + // means another behaviour can kill the connection early, and we can't assume a + // peer as connected until this event is received. + self.on_connection_established(peer_id, endpoint, other_established) + } FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, remaining_established, .. }) => self.on_connection_closed(peer_id, remaining_established), - FromSwarm::DialFailure(DialFailure { peer_id, .. }) => self.on_dial_failure(peer_id), + FromSwarm::DialFailure(DialFailure { + peer_id, + error, + connection_id: _, + }) => { + debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %error); + self.on_dial_failure(peer_id); + } + FromSwarm::ExternalAddrConfirmed(_) => { + // TODO: we likely want to check this against our assumed external tcp + // address + } FromSwarm::AddressChange(_) | FromSwarm::ListenFailure(_) | FromSwarm::NewListener(_) @@ -124,13 +146,35 @@ impl NetworkBehaviour for PeerManager { | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => { + | FromSwarm::NewExternalAddrCandidate(_) + | FromSwarm::ExternalAddrExpired(_) => { // The rest of the events we ignore since they are handled in their associated // `SwarmEvent` } } } + + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _local_addr: &libp2p::Multiaddr, + _remote_addr: &libp2p::Multiaddr, + ) -> Result, libp2p::swarm::ConnectionDenied> { + // TODO: we might want to check if we accept this peer or not in the future. + Ok(ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _addr: &libp2p::Multiaddr, + _role_override: libp2p::core::Endpoint, + ) -> Result, libp2p::swarm::ConnectionDenied> { + // TODO: we might want to check if we accept this peer or not in the future. + Ok(ConnectionHandler) + } } impl PeerManager { diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 8f2338ed15f..3c885920b73 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -7,7 +7,7 @@ use crate::rpc::{ use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, StreamExt}; -use libp2p::core::{InboundUpgrade, ProtocolName, UpgradeInfo}; +use libp2p::core::{InboundUpgrade, UpgradeInfo}; use ssz::Encode; use ssz_types::VariableList; use std::io; @@ -305,6 +305,12 @@ pub struct ProtocolId { protocol_id: String, } +impl AsRef for ProtocolId { + fn as_ref(&self) -> &str { + self.protocol_id.as_ref() + } +} + impl ProtocolId { /// Returns min and max size for messages of given protocol id requests. pub fn rpc_request_limits(&self) -> RpcLimits { @@ -395,12 +401,6 @@ impl ProtocolId { } } -impl ProtocolName for ProtocolId { - fn protocol_name(&self) -> &[u8] { - self.protocol_id.as_bytes() - } -} - /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index b02f6037fe0..4d167e46739 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -87,7 +87,7 @@ impl SelfRateLimiter { } /// Checks if the rate limiter allows the request. If it's allowed, returns the - /// [`NetworkBehaviourAction`] that should be emitted. When not allowed, the request is delayed + /// [`ToSwarm`] that should be emitted. When not allowed, the request is delayed /// until it can be sent. pub fn allows( &mut self, @@ -118,7 +118,7 @@ impl SelfRateLimiter { } /// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the - /// request, the [`NetworkBehaviourAction`] that should be emitted is returned. If the request + /// request, the [`ToSwarm`] that should be emitted is returned. If the request /// should be delayed, it's returned with the duration to wait. fn try_send_request( limiter: &mut RateLimiter, diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 7d20b87ad1c..6c52a07c14a 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -3,21 +3,27 @@ use crate::peer_manager::PeerManager; use crate::rpc::{ReqId, RPC}; use crate::types::SnappyTransform; -use libp2p::gossipsub::subscription_filter::{ - MaxCountSubscriptionFilter, WhitelistSubscriptionFilter, -}; -use libp2p::gossipsub::Gossipsub as BaseGossipsub; -use libp2p::identify::Behaviour as Identify; +use libp2p::gossipsub; +use libp2p::identify; use libp2p::swarm::NetworkBehaviour; use types::EthSpec; use super::api_types::RequestId; -pub type SubscriptionFilter = MaxCountSubscriptionFilter; -pub type Gossipsub = BaseGossipsub; +pub type SubscriptionFilter = + gossipsub::MaxCountSubscriptionFilter; +pub type Gossipsub = gossipsub::Behaviour; #[derive(NetworkBehaviour)] -pub(crate) struct Behaviour { +pub(crate) struct Behaviour +where + AppReqId: ReqId, + TSpec: EthSpec, +{ + /// Peers banned. + pub banned_peers: libp2p::allow_block_list::Behaviour, + /// Keep track of active and pending connections to enforce hard limits. + pub connection_limits: libp2p::connection_limits::Behaviour, /// The routing pub-sub mechanism for eth2. pub gossipsub: Gossipsub, /// The Eth2 RPC specified in the wire-0 protocol. @@ -27,7 +33,7 @@ pub(crate) struct Behaviour { /// Keep regular connection to peers and disconnect if absent. // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. /// Provides IP addresses and peer information. - pub identify: Identify, + pub identify: identify::Behaviour, /// The peer manager that keeps track of peer's reputation and status. pub peer_manager: PeerManager, } diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index 88becd686e5..b058fc0ff13 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -1,7 +1,8 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::{error, TopicHash}; use libp2p::gossipsub::{ - GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, + Config as GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, + TopicScoreParams, }; use std::cmp::max; use std::collections::HashMap; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index f1c47eb69a9..133aa646db6 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -22,15 +22,12 @@ use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use libp2p::bandwidth::BandwidthSinks; -use libp2p::gossipsub::error::PublishError; -use libp2p::gossipsub::metrics::Config as GossipsubMetricsConfig; -use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter; use libp2p::gossipsub::{ - GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, + self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, }; -use libp2p::identify::{Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent}; +use libp2p::identify; use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; -use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p::swarm::{Swarm, SwarmBuilder, SwarmEvent}; use libp2p::PeerId; use slog::{crit, debug, info, o, trace, warn}; @@ -68,10 +65,6 @@ pub enum NetworkEvent { PeerConnectedIncoming(PeerId), /// A peer has disconnected. PeerDisconnected(PeerId), - /// The peer needs to be banned. - PeerBanned(PeerId), - /// The peer has been unbanned. - PeerUnbanned(PeerId), /// An RPC Request that was sent failed. RPCFailed { /// The id of the failed request. @@ -233,7 +226,7 @@ impl Network { let update_gossipsub_scores = tokio::time::interval(params.decay_interval); let possible_fork_digests = ctx.fork_context.all_fork_digests(); - let filter = MaxCountSubscriptionFilter { + let filter = gossipsub::MaxCountSubscriptionFilter { filter: utils::create_whitelist_filter( possible_fork_digests, ctx.chain_spec.attestation_subnet_count, @@ -248,7 +241,7 @@ impl Network { // If metrics are enabled for gossipsub build the configuration let gossipsub_metrics = ctx .gossipsub_registry - .map(|registry| (registry, GossipsubMetricsConfig::default())); + .map(|registry| (registry, Default::default())); let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( @@ -277,26 +270,32 @@ impl Network { let discovery = { // Build and start the discovery sub-behaviour - let mut discovery = - Discovery::new(&local_keypair, &config, network_globals.clone(), &log).await?; + let mut discovery = Discovery::new( + local_keypair.clone(), + &config, + network_globals.clone(), + &log, + ) + .await?; // start searching for peers discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); discovery }; let identify = { + let local_public_key = local_keypair.public(); let identify_config = if config.private { - IdentifyConfig::new( + identify::Config::new( "".into(), - local_keypair.public(), // Still send legitimate public key + local_public_key, // Still send legitimate public key ) .with_cache_size(0) } else { - IdentifyConfig::new("eth2/1.0.0".into(), local_keypair.public()) + identify::Config::new("eth2/1.0.0".into(), local_public_key) .with_agent_version(lighthouse_version::version_with_platform()) .with_cache_size(0) }; - Identify::new(identify_config) + identify::Behaviour::new(identify_config) }; let peer_manager = { @@ -309,13 +308,38 @@ impl Network { PeerManager::new(peer_manager_cfg, network_globals.clone(), &log)? }; + let connection_limits = { + let limits = libp2p::connection_limits::ConnectionLimits::default() + .with_max_pending_incoming(Some(5)) + .with_max_pending_outgoing(Some(16)) + .with_max_established_incoming(Some( + (config.target_peers as f32 + * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) + .ceil() as u32, + )) + .with_max_established_outgoing(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, + )) + .with_max_established(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) + .ceil() as u32, + )) + .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); + + libp2p::connection_limits::Behaviour::new(limits) + }; + + let banned_peers = libp2p::allow_block_list::Behaviour::default(); + let behaviour = { Behaviour { + banned_peers, gossipsub, eth2_rpc, discovery, identify, peer_manager, + connection_limits, } }; @@ -333,22 +357,6 @@ impl Network { } // sets up the libp2p connection limits - let limits = ConnectionLimits::default() - .with_max_pending_incoming(Some(5)) - .with_max_pending_outgoing(Some(16)) - .with_max_established_incoming(Some( - (config.target_peers as f32 - * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) - .ceil() as u32, - )) - .with_max_established_outgoing(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, - )) - .with_max_established(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) - .ceil() as u32, - )) - .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); ( SwarmBuilder::with_executor( @@ -358,8 +366,7 @@ impl Network { Executor(executor), ) .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) - .connection_event_buffer_size(64) - .connection_limits(limits) + .per_connection_event_buffer_size(4) .build(), bandwidth, ) @@ -400,7 +407,7 @@ impl Network { match self.swarm.listen_on(listen_multiaddr.clone()) { Ok(_) => { let mut log_address = listen_multiaddr; - log_address.push(MProtocol::P2p(enr.peer_id().into())); + log_address.push(MProtocol::P2p(enr.peer_id())); info!(self.log, "Listening established"; "address" => %log_address); } Err(err) => { @@ -497,7 +504,7 @@ impl Network { &mut self.swarm.behaviour_mut().discovery } /// Provides IP addresses and peer information. - pub fn identify_mut(&mut self) -> &mut Identify { + pub fn identify_mut(&mut self) -> &mut identify::Behaviour { &mut self.swarm.behaviour_mut().identify } /// The peer manager that keeps track of peer's reputation and status. @@ -518,7 +525,7 @@ impl Network { &self.swarm.behaviour().discovery } /// Provides IP addresses and peer information. - pub fn identify(&self) -> &Identify { + pub fn identify(&self) -> &identify::Behaviour { &self.swarm.behaviour().identify } /// The peer manager that keeps track of peer's reputation and status. @@ -1036,9 +1043,12 @@ impl Network { /* Sub-behaviour event handling functions */ /// Handle a gossipsub event. - fn inject_gs_event(&mut self, event: GossipsubEvent) -> Option> { + fn inject_gs_event( + &mut self, + event: gossipsub::Event, + ) -> Option> { match event { - GossipsubEvent::Message { + gossipsub::Event::Message { propagation_source, message_id: id, message: gs_msg, @@ -1068,7 +1078,7 @@ impl Network { } } } - GossipsubEvent::Subscribed { peer_id, topic } => { + gossipsub::Event::Subscribed { peer_id, topic } => { if let Ok(topic) = GossipTopic::decode(topic.as_str()) { if let Some(subnet_id) = topic.subnet_id() { self.network_globals @@ -1109,7 +1119,7 @@ impl Network { } } } - GossipsubEvent::Unsubscribed { peer_id, topic } => { + gossipsub::Event::Unsubscribed { peer_id, topic } => { if let Some(subnet_id) = subnet_from_topic_hash(&topic) { self.network_globals .peers @@ -1117,7 +1127,7 @@ impl Network { .remove_subscription(&peer_id, &subnet_id); } } - GossipsubEvent::GossipsubNotSupported { peer_id } => { + gossipsub::Event::GossipsubNotSupported { peer_id } => { debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); self.peer_manager_mut().report_peer( &peer_id, @@ -1335,10 +1345,10 @@ impl Network { /// Handle an identify event. fn inject_identify_event( &mut self, - event: IdentifyEvent, + event: identify::Event, ) -> Option> { match event { - IdentifyEvent::Received { peer_id, mut info } => { + identify::Event::Received { peer_id, mut info } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { debug!( self.log, @@ -1349,9 +1359,9 @@ impl Network { // send peer info to the peer manager. self.peer_manager_mut().identify(&peer_id, &info); } - IdentifyEvent::Sent { .. } => {} - IdentifyEvent::Error { .. } => {} - IdentifyEvent::Pushed { .. } => {} + identify::Event::Sent { .. } => {} + identify::Event::Error { .. } => {} + identify::Event::Pushed { .. } => {} } None } @@ -1372,14 +1382,17 @@ impl Network { Some(NetworkEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { - self.swarm.ban_peer_id(peer_id); + self.swarm.behaviour_mut().banned_peers.block_peer(peer_id); self.discovery_mut().ban_peer(&peer_id, associated_ips); - Some(NetworkEvent::PeerBanned(peer_id)) + None } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { - self.swarm.unban_peer_id(peer_id); + self.swarm + .behaviour_mut() + .banned_peers + .unblock_peer(peer_id); self.discovery_mut().unban_peer(&peer_id, associated_ips); - Some(NetworkEvent::PeerUnbanned(peer_id)) + None } PeerManagerEvent::Status(peer_id) => { // it's time to status. We don't keep a beacon chain reference here, so we inform @@ -1426,17 +1439,20 @@ impl Network { let maybe_event = match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { // Handle sub-behaviour events. + BehaviourEvent::BannedPeers(void) => void::unreachable(void), BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), }, SwarmEvent::ConnectionEstablished { .. } => None, SwarmEvent::ConnectionClosed { .. } => None, SwarmEvent::IncomingConnection { local_addr, send_back_addr, + connection_id: _, } => { trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); None @@ -1445,19 +1461,41 @@ impl Network { local_addr, send_back_addr, error, + connection_id: _, } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); - None - } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); + let error_repr = match error { + libp2p::swarm::ListenError::Aborted => { + "Incoming connection aborted".to_string() + } + libp2p::swarm::ListenError::WrongPeerId { obtained, endpoint } => { + format!("Wrong peer id, obtained {obtained}, endpoint {endpoint:?}") + } + libp2p::swarm::ListenError::LocalPeerId { endpoint } => { + format!("Dialing local peer id {endpoint:?}") + } + libp2p::swarm::ListenError::Denied { cause } => { + format!("Connection was denied with cause {cause}") + } + libp2p::swarm::ListenError::Transport(t) => match t { + libp2p::TransportError::MultiaddrNotSupported(m) => { + format!("Transport error: Multiaddr not supported: {m}") + } + libp2p::TransportError::Other(e) => { + format!("Transport error: other: {e}") + } + }, + }; + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => error_repr); None } - SwarmEvent::BannedPeer { - peer_id, - endpoint: _, + SwarmEvent::OutgoingConnectionError { + peer_id: _, + error: _, + connection_id: _, } => { - debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); + // The Behaviour event is more general than the swarm event here. It includes + // connection failures. So we use that log for now, in the peer manager + // behaviour implementation. None } SwarmEvent::NewListenAddr { address, .. } => { @@ -1486,7 +1524,13 @@ impl Network { None } } - SwarmEvent::Dialing(_) => None, + SwarmEvent::Dialing { + peer_id, + connection_id: _, + } => { + debug!(self.log, "Swarm Dialing"; "peer_id" => ?peer_id); + None + } }; if let Some(ev) = maybe_event { diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index c54f4fdb5aa..9775f6028ff 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -4,13 +4,11 @@ use crate::types::{ error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, }; use crate::{GossipTopic, NetworkConfig}; -use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks}; -use libp2p::core::{ - identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, -}; -use libp2p::gossipsub::subscription_filter::WhitelistSubscriptionFilter; -use libp2p::gossipsub::IdentTopic as Topic; -use libp2p::{core, noise, PeerId, Transport}; +use libp2p::bandwidth::BandwidthSinks; +use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; +use libp2p::gossipsub; +use libp2p::identity::{secp256k1, Keypair}; +use libp2p::{core, noise, yamux, PeerId, Transport, TransportExt}; use prometheus_client::registry::Registry; use slog::{debug, warn}; use ssz::Decode; @@ -52,30 +50,19 @@ pub fn build_transport( transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) }; - let (transport, bandwidth) = BandwidthLogging::new(transport); - - // mplex config - let mut mplex_config = libp2p::mplex::MplexConfig::new(); - mplex_config.set_max_buffer_size(256); - mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); - // yamux config - let mut yamux_config = libp2p::yamux::YamuxConfig::default(); - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + let mut yamux_config = yamux::Config::default(); + yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read()); + let (transport, bandwidth) = transport + .upgrade(core::upgrade::Version::V1) + .authenticate(generate_noise_config(&local_private_key)) + .multiplex(yamux_config) + .timeout(Duration::from_secs(10)) + .boxed() + .with_bandwidth_logging(); // Authentication - Ok(( - transport - .upgrade(core::upgrade::Version::V1) - .authenticate(generate_noise_config(&local_private_key)) - .multiplex(core::upgrade::SelectUpgrade::new( - yamux_config, - mplex_config, - )) - .timeout(Duration::from_secs(10)) - .boxed(), - bandwidth, - )) + Ok((transport, bandwidth)) } // Useful helper functions for debugging. Currently not used in the client. @@ -94,10 +81,10 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result { #[allow(dead_code)] fn keypair_from_bytes(mut bytes: Vec) -> error::Result { - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) + secp256k1::SecretKey::try_from_bytes(&mut bytes) .map(|secret| { - let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); - Keypair::Secp256k1(keypair) + let keypair: secp256k1::Keypair = secret.into(); + keypair.into() }) .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) } @@ -115,12 +102,10 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { Err(_) => debug!(log, "Could not read network key file"), Ok(_) => { // only accept secp256k1 keys for now - if let Ok(secret_key) = - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes) - { - let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into(); + if let Ok(secret_key) = secp256k1::SecretKey::try_from_bytes(&mut key_bytes) { + let kp: secp256k1::Keypair = secret_key.into(); debug!(log, "Loaded network key from disk."); - return Keypair::Secp256k1(kp); + return kp.into(); } else { debug!(log, "Network key file is not a valid secp256k1 key"); } @@ -129,34 +114,27 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { } // if a key could not be loaded from disk, generate a new one and save it - let local_private_key = Keypair::generate_secp256k1(); - if let Keypair::Secp256k1(key) = local_private_key.clone() { - let _ = std::fs::create_dir_all(&config.network_dir); - match File::create(network_key_f.clone()) - .and_then(|mut f| f.write_all(&key.secret().to_bytes())) - { - Ok(_) => { - debug!(log, "New network key generated and written to disk"); - } - Err(e) => { - warn!( - log, - "Could not write node key to file: {:?}. error: {}", network_key_f, e - ); - } + let local_private_key = secp256k1::Keypair::generate(); + let _ = std::fs::create_dir_all(&config.network_dir); + match File::create(network_key_f.clone()) + .and_then(|mut f| f.write_all(&local_private_key.secret().to_bytes())) + { + Ok(_) => { + debug!(log, "New network key generated and written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write node key to file: {:?}. error: {}", network_key_f, e + ); } } - local_private_key + local_private_key.into() } /// Generate authenticated XX Noise config from identity keys -fn generate_noise_config( - identity_keypair: &Keypair, -) -> noise::NoiseAuthenticated { - let static_dh_keys = noise::Keypair::::new() - .into_authentic(identity_keypair) - .expect("signing can fail only once during starting a node"); - noise::NoiseConfig::xx(static_dh_keys).into_authenticated() +fn generate_noise_config(identity_keypair: &Keypair) -> noise::Config { + noise::Config::new(identity_keypair).expect("signing can fail only once during starting a node") } /// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p @@ -236,11 +214,11 @@ pub(crate) fn create_whitelist_filter( possible_fork_digests: Vec<[u8; 4]>, attestation_subnet_count: u64, sync_committee_subnet_count: u64, -) -> WhitelistSubscriptionFilter { +) -> gossipsub::WhitelistSubscriptionFilter { let mut possible_hashes = HashSet::new(); for fork_digest in possible_fork_digests { let mut add = |kind| { - let topic: Topic = + let topic: gossipsub::IdentTopic = GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); possible_hashes.insert(topic.hash()); }; @@ -260,7 +238,7 @@ pub(crate) fn create_whitelist_filter( add(SyncCommitteeMessage(SyncSubnetId::new(id))); } } - WhitelistSubscriptionFilter(possible_hashes) + gossipsub::WhitelistSubscriptionFilter(possible_hashes) } /// Persist metadata to disk diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 3892971bfd7..59a00acaf00 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -2,7 +2,7 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::TopicHash; -use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage}; +use libp2p::gossipsub; use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; use std::boxed::Box; @@ -49,12 +49,12 @@ impl SnappyTransform { } } -impl DataTransform for SnappyTransform { +impl gossipsub::DataTransform for SnappyTransform { // Provides the snappy decompression from RawGossipsubMessages fn inbound_transform( &self, - raw_message: RawGossipsubMessage, - ) -> Result { + raw_message: gossipsub::RawMessage, + ) -> Result { // check the length of the raw bytes let len = decompress_len(&raw_message.data)?; if len > self.max_size_per_message { @@ -68,7 +68,7 @@ impl DataTransform for SnappyTransform { let decompressed_data = decoder.decompress_vec(&raw_message.data)?; // Build the GossipsubMessage struct - Ok(GossipsubMessage { + Ok(gossipsub::Message { source: raw_message.source, data: decompressed_data, sequence_number: raw_message.sequence_number, diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index d74684e2156..c33d8bf4fdc 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -1,5 +1,5 @@ #![cfg(test)] -use libp2p::gossipsub::GossipsubConfigBuilder; +use libp2p::gossipsub; use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::Enr; use lighthouse_network::EnrExt; @@ -84,7 +84,7 @@ pub fn build_config(port: u16, mut boot_nodes: Vec) -> NetworkConfig { config.boot_nodes_enr.append(&mut boot_nodes); config.network_dir = path.into_path(); // Reduce gossipsub heartbeat parameters - config.gs_config = GossipsubConfigBuilder::from(config.gs_config) + config.gs_config = gossipsub::ConfigBuilder::from(config.gs_config) .heartbeat_initial_delay(Duration::from_millis(500)) .heartbeat_interval(Duration::from_millis(500)) .build() diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 0556e7fd7e1..193955ee53f 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -493,10 +493,8 @@ impl NetworkService { NetworkEvent::PeerConnectedOutgoing(peer_id) => { self.send_to_router(RouterMessage::StatusPeer(peer_id)); } - NetworkEvent::PeerConnectedIncoming(_) - | NetworkEvent::PeerBanned(_) - | NetworkEvent::PeerUnbanned(_) => { - // No action required for these events. + NetworkEvent::PeerConnectedIncoming(_) => { + // No action required for this event. } NetworkEvent::PeerDisconnected(peer_id) => { self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 1504b7ff0fe..58e6917eca9 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -28,7 +28,7 @@ operating system. Install the following packages: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` > Tips: @@ -51,10 +51,6 @@ After this, you are ready to [build Lighthouse](#build-lighthouse). brew install cmake ``` -1. Install protoc using Homebrew: -``` -brew install protobuf -``` [Homebrew]: https://brew.sh/ @@ -71,7 +67,7 @@ After this, you are ready to [build Lighthouse](#build-lighthouse). Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) ``` > - To verify that Chocolatey is ready, run `choco` and it should return the version. -1. Install Make, CMake, LLVM and protoc using Chocolatey: +1. Install Make, CMake and LLVM using Chocolatey: ``` choco install make @@ -85,10 +81,6 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' choco install llvm ``` -``` -choco install protoc -``` - These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. @@ -217,4 +209,3 @@ look into [cross compilation](./cross-compiling.md), or use a [pre-built binary](https://github.com/sigp/lighthouse/releases). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. - diff --git a/book/src/pi.md b/book/src/pi.md index d8d154d765a..550415240b4 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -22,7 +22,7 @@ terminal and an Internet connection are necessary. Install the Ubuntu dependencies: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` > Tips: diff --git a/book/src/setup.md b/book/src/setup.md index ea3c5664ac6..533e1d463d3 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -14,8 +14,6 @@ The additional requirements for developers are: don't have `anvil` available on your `PATH`. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by some dependencies. See [`Installation Guide`](./installation.md) for more info. -- [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for - the networking stack. - [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum, used by web3signer_tests. - [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also know as diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index c4e36022a82..d006156bf9d 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -80,7 +80,7 @@ impl BootNodeConfig { } let private_key = load_private_key(&network_config, &logger); - let local_key = CombinedKey::from_libp2p(&private_key)?; + let local_key = CombinedKey::from_libp2p(private_key)?; let local_enr = if let Some(dir) = matches.value_of("network-dir") { let network_dir: PathBuf = dir.into(); diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 2387d05f3c9..f817b1c3864 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -20,4 +20,4 @@ types = { path = "../../consensus/types"} kzg = { path = "../../crypto/kzg" } eth2_ssz = "0.4.1" eth2_config = { path = "../eth2_config"} -discv5 = "0.3.0" \ No newline at end of file +discv5 = "0.3.1" \ No newline at end of file