From b2978f339ccf7a3f37e4640dd459fffcc8a20c0f Mon Sep 17 00:00:00 2001
From: Raj Raorane <41839716+Raj-RR1@users.noreply.github.com>
Date: Sat, 19 Aug 2023 22:34:15 +0530
Subject: [PATCH] partial changes in cli, consensus-transition and precompiles
---
node/cli/src/cli.rs | 173 ++++++++----------
node/cli/src/command.rs | 43 ++---
node/cli/src/lib.rs | 12 +-
.../manual-seal/src/consensus/aura.rs | 15 +-
.../manual-seal/src/consensus/babe.rs | 93 +++++-----
.../manual-seal/src/consensus/timestamp.rs | 19 +-
node/runtime/src/precompiles.rs | 77 ++++----
7 files changed, 205 insertions(+), 227 deletions(-)
diff --git a/node/cli/src/cli.rs b/node/cli/src/cli.rs
index e04de0f1..662236dc 100644
--- a/node/cli/src/cli.rs
+++ b/node/cli/src/cli.rs
@@ -1,103 +1,98 @@
-// Copyright 2018-2020 Commonwealth Labs, Inc.
+// Copyright 2018-2020 Commonwealth Labs, Inc.
// This file is part of Edgeware.
-// Edgeware is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
+// Edgeware is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
-// Edgeware is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// Edgeware is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
-// You should have received a copy of the GNU General Public License
-// along with Edgeware. If not, see .
+// You should have received a copy of the GNU General Public License
+// along with Edgeware. If not, see .
// 2022 rewrite by flipchan @ edgeware
-use sc_cli::{KeySubcommand, SignCmd, VanityCmd, VerifyCmd};
-//use structopt::StructOpt;
-use edgeware_cli_opt::EthApi;
-use clap::Parser;
-
-//use ethereum_types::{H160, H256, U256};
-
-#[allow(missing_docs)]
-#[derive(Debug, Parser)]
-pub struct RunCmd {
- #[allow(missing_docs)]
- #[clap(flatten)]
- pub base: sc_cli::RunCmd,
-
- #[clap(long = "enable-dev-signer")]
- pub enable_dev_signer: bool,
-
- /// The dynamic-fee pallet target gas price set by block author
- #[clap(long, default_value = "1")]
- pub target_gas_price: u64,
-
- /// Enable EVM tracing module on a non-authority node.
- #[clap(
- long,
- conflicts_with = "validator",
- use_value_delimiter = true,
- require_value_delimiter = true,
- multiple_values = true
- )]
- pub ethapi: Vec,
-
- /// Number of concurrent tracing tasks. Meant to be shared by both "debug"
- /// and "trace" modules.
- #[clap(long, default_value = "10")]
+use sc_cli::{KeySubcommand, SignCmd, VanityCmd, VerifyCmd};
+//use structopt::StructOpt;
+use edgeware_cli_opt::EthApi;
+use clap::Parser;
+
+//use ethereum_types::{H160, H256, U256};
+
+#[allow(missing_docs)]
+#[derive(Debug, Parser)]
+pub struct RunCmd {
+ #[allow(missing_docs)]
+ #[clap(flatten)]
+ pub base: sc_cli::RunCmd,
+
+ #[clap(long = "enable-dev-signer")]
+ pub enable_dev_signer: bool,
+
+ /// The dynamic-fee pallet target gas price set by block author
+ #[clap(long, default_value = "1")]
+ pub target_gas_price: u64,
+
+ /// Enable EVM tracing module on a non-authority node.
+ #[clap(
+ long,
+ conflicts_with = "validator",
+ use_value_delimiter = true,
+ require_value_delimiter = true,
+ multiple_values = true
+ )]
+ pub ethapi: Vec,
+
+ /// Number of concurrent tracing tasks. Meant to be shared by both "debug"
+ /// and "trace" modules.
+ #[clap(long, default_value = "10")]
pub ethapi_max_permits: u32,
- /// Maximum number of trace entries a single request of `trace_filter` is
- /// allowed to return. A request asking for more or an unbounded one going
- /// over this limit will both return an error.
- #[clap(long, default_value = "500")]
- pub ethapi_trace_max_count: u32,
+ /// Maximum number of trace entries a single request of `trace_filter` is
+ /// allowed to return. A request asking for more or an unbounded one going
+ /// over this limit will both return an error.
+ #[clap(long, default_value = "500")]
+ pub ethapi_trace_max_count: u32,
- /// Duration (in seconds) after which the cache of `trace_filter` for a
- /// given block will be discarded.
- #[clap(long, default_value = "300")]
+ /// Duration (in seconds) after which the cache of `trace_filter` for a
+ /// given block will be discarded.
+ #[clap(long, default_value = "300")]
pub ethapi_trace_cache_duration: u64,
- /// Size in bytes of the LRU cache for block data.
- #[clap(long, default_value = "300000000")]
- pub eth_log_block_cache: usize,
-
- /// Size in bytes of the LRU cache for transactions statuses data.
- #[clap(long, default_value = "300000000")]
- pub eth_statuses_cache: usize,
+ /// Size in bytes of the LRU cache for block data.
+ #[clap(long, default_value = "300000000")]
+ pub eth_log_block_cache: usize,
- /// Size in bytes of data a raw tracing request is allowed to use.
- /// Bound the size of memory, stack and storage data.
- #[clap(long, default_value = "20000000")]
- pub tracing_raw_max_memory_usage: usize,
+ /// Size in bytes of the LRU cache for transactions statuses data.
+ #[clap(long, default_value = "300000000")]
+ pub eth_statuses_cache: usize,
- /// Maximum number of logs in a query.
- #[clap(long, default_value = "10000")]
- pub max_past_logs: u32,
+ /// Maximum number of logs in a query.
+ #[clap(long, default_value = "10000")]
+ pub max_past_logs: u32,
- /// Maximum fee history cache size.
- #[clap(long, default_value = "2048")]
- pub fee_history_limit: u64,
+ /// Maximum fee history cache size.
+ #[clap(long, default_value = "2048")]
+ pub fee_history_limit: u64,
}
/// An overarching CLI command definition.
-#[derive(Debug, Parser)]
-#[clap(
- propagate_version = true,
- args_conflicts_with_subcommands = true,
- subcommand_negates_reqs = true
+#[derive(Debug, Parser)]
+#[clap(
+ propagate_version = true,
+ args_conflicts_with_subcommands = true,
+ subcommand_negates_reqs = true
)]
pub struct Cli {
/// Possible subcommand with parameters.
- #[clap(subcommand)]
+ #[command(subcommand)]
pub subcommand: Option,
- #[allow(missing_docs)]
+ #[allow(missing_docs)]
#[clap(flatten)]
pub run: RunCmd,
@@ -119,33 +114,17 @@ pub struct Cli {
/// Possible subcommands of the main binary.
#[derive(Debug, clap::Subcommand)]
pub enum Subcommand {
-
- /// The custom inspect subcommmand for decoding blocks and extrinsics.
- #[command(
- name = "inspect",
- about = "Decode given block or extrinsic using current native runtime."
- )]
- Inspect(node_inspect::cli::InspectCmd),
- /// Key management cli utilities
- #[clap(subcommand)]
+ /// Key management cli utilities
+ #[command(subcommand)]
Key(KeySubcommand),
- /// The custom benchmark subcommmand benchmarking runtime pallets.
+ /// The custom benchmark subcommmand benchmarking runtime pallets.
// #[clap(name = "benchmark", about = "Benchmark runtime pallets.")]
- #[clap(subcommand)]
+ #[command(subcommand)]
Benchmark(frame_benchmarking_cli::BenchmarkCmd),
-
- /// Try some command against runtime state.
- #[cfg(feature = "try-runtime")]
- TryRuntime(try_runtime_cli::TryRuntimeCmd),
-
- /// Try some command against runtime state. Note: `try-runtime` feature must be enabled.
- #[cfg(not(feature = "try-runtime"))]
- TryRuntime,
-
- /// Verify a signature for a message, provided on STDIN, with a given
- /// (public or secret) key.
+ /// Verify a signature for a message, provided on STDIN, with a given
+ /// (public or secret) key.
Verify(VerifyCmd),
/// Generate a seed that provides a vanity address.
diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs
index 4242b412..b8e596bd 100644
--- a/node/cli/src/command.rs
+++ b/node/cli/src/command.rs
@@ -88,64 +88,52 @@ pub fn run() -> Result<()> {
Some(Subcommand::BuildSpec(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
- }
+ },
Some(Subcommand::CheckBlock(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
- let PartialComponents {
- client,
- task_manager,
- import_queue,
- ..
- } = new_partial(&config, &cli)?;
+ let PartialComponents { client, task_manager, import_queue, .. } =
+ new_partial(&config, &cli)?;
Ok((cmd.run(client, import_queue), task_manager))
})
- }
+ },
Some(Subcommand::ExportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
- let PartialComponents {
- client, task_manager, ..
- } = new_partial(&config, &cli)?;
+ let PartialComponents { client, task_manager, .. } = new_partial(&config, &cli)?;
Ok((cmd.run(client, config.database), task_manager))
})
- }
+ },
Some(Subcommand::ExportState(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
- let PartialComponents {
- client, task_manager, ..
- } = new_partial(&config, &cli)?;
+ let PartialComponents { client, task_manager, .. } = new_partial(&config, &cli)?;
Ok((cmd.run(client, config.chain_spec), task_manager))
})
- }
+ },
Some(Subcommand::ImportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
- let PartialComponents {
- client,
- task_manager,
- import_queue,
- ..
- } = new_partial(&config, &cli)?;
+ let PartialComponents { client, task_manager, import_queue, .. } =
+ new_partial(&config, &cli)?;
Ok((cmd.run(client, import_queue), task_manager))
})
- }
+ },
Some(Subcommand::PurgeChain(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.database))
- }
+ },
Some(Subcommand::Revert(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
- let PartialComponents { client, task_manager, backend, .. } = new_partial(&config,&cli)?;
- let aux_revert = Box::new(move |client,_, blocks| {
+ let PartialComponents { client, task_manager, backend, .. } = new_partial(&config, &cli)?;
+ let aux_revert = Box::new(|client,_, blocks| {
sc_finality_grandpa::revert(client, blocks)?;
Ok(())
});
Ok((cmd.run(client, backend, Some(aux_revert)), task_manager))
})
- }
+ },
None => {
let runner = cli.create_runner(&cli.run.base)?;
@@ -159,7 +147,6 @@ pub fn run() -> Result<()> {
fee_history_limit: cli.run.fee_history_limit,
max_past_logs: cli.run.max_past_logs,
relay_chain_rpc_url: None,
- tracing_raw_max_memory_usage: cli.run.tracing_raw_max_memory_usage
};
runner.run_node_until_exit(|config| async move {
diff --git a/node/cli/src/lib.rs b/node/cli/src/lib.rs
index d981662f..e2ac8945 100644
--- a/node/cli/src/lib.rs
+++ b/node/cli/src/lib.rs
@@ -35,6 +35,8 @@ pub mod chain_spec;
#[macro_use]
mod service;
#[cfg(feature = "cli")]
+mod benchmarking;
+#[cfg(feature = "cli")]
mod cli;
#[cfg(feature = "cli")]
mod command;
@@ -43,9 +45,9 @@ mod command;
pub use cli::*;
#[cfg(feature = "cli")]
pub use command::*;
+
+pub use cli::*;
+pub use command::*;
-pub use cli::*;
-pub use command::*;
-
-pub mod mainnet_fixtures;
-pub mod testnet_fixtures;
+pub mod mainnet_fixtures;
+pub mod testnet_fixtures;
\ No newline at end of file
diff --git a/node/consensus-transition/manual-seal/src/consensus/aura.rs b/node/consensus-transition/manual-seal/src/consensus/aura.rs
index 7b5d6720..900973b8 100644
--- a/node/consensus-transition/manual-seal/src/consensus/aura.rs
+++ b/node/consensus-transition/manual-seal/src/consensus/aura.rs
@@ -1,6 +1,6 @@
// This file is part of Substrate.
-// Copyright (C) 2022 Parity Technologies (UK) Ltd.
+// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
@@ -35,14 +35,14 @@ use sp_timestamp::TimestampInherentData;
use std::{marker::PhantomData, sync::Arc};
/// Consensus data provider for Aura.
-pub struct AuraConsensusDataProvider {
+pub struct AuraConsensusDataProvider {
// slot duration
slot_duration: SlotDuration,
// phantom data for required generics
- _phantom: PhantomData<(B, C)>,
+ _phantom: PhantomData<(B, C, P)>,
}
-impl AuraConsensusDataProvider
+impl AuraConsensusDataProvider
where
B: BlockT,
C: AuxStore + ProvideRuntimeApi + UsageProvider,
@@ -58,7 +58,7 @@ where
}
}
-impl ConsensusDataProvider for AuraConsensusDataProvider
+impl ConsensusDataProvider for AuraConsensusDataProvider
where
B: BlockT,
C: AuxStore
@@ -67,8 +67,10 @@ where
+ UsageProvider
+ ProvideRuntimeApi,
C::Api: AuraApi,
+ P: Send + Sync,
{
type Transaction = TransactionFor;
+ type Proof = P;
fn create_digest(
&self,
@@ -92,7 +94,8 @@ where
_parent: &B::Header,
_params: &mut BlockImportParams,
_inherents: &InherentData,
+ _proof: Self::Proof,
) -> Result<(), Error> {
Ok(())
}
-}
+}
\ No newline at end of file
diff --git a/node/consensus-transition/manual-seal/src/consensus/babe.rs b/node/consensus-transition/manual-seal/src/consensus/babe.rs
index 9f47d5f9..c5f0c925 100644
--- a/node/consensus-transition/manual-seal/src/consensus/babe.rs
+++ b/node/consensus-transition/manual-seal/src/consensus/babe.rs
@@ -1,6 +1,6 @@
// This file is part of Substrate.
-// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd.
+// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
@@ -20,32 +20,30 @@
//! that expect babe-specific digests.
use super::ConsensusDataProvider;
-use crate::Error;
+use crate::{Error, LOG_TARGET};
use parity_scale_codec::Encode;
use sc_client_api::{AuxStore, UsageProvider};
use sc_consensus_babe::{
- authorship, find_pre_digest, BabeIntermediate, CompatibleDigestItem, Config, Epoch,
- INTERMEDIATE_KEY,
+ authorship, find_pre_digest, BabeIntermediate, CompatibleDigestItem, Epoch, INTERMEDIATE_KEY,
};
use sc_consensus_epochs::{
descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor,
};
-use sp_keystore::SyncCryptoStorePtr;
-use std::{borrow::Cow, sync::Arc};
+use sp_keystore::KeystorePtr;
+use std::{marker::PhantomData, sync::Arc};
use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier};
use sp_api::{ProvideRuntimeApi, TransactionFor};
use sp_blockchain::{HeaderBackend, HeaderMetadata};
-use sp_consensus::CacheKeyId;
use sp_consensus_babe::{
digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest},
inherents::BabeInherentData,
- AuthorityId, BabeApi, BabeAuthorityWeight, ConsensusLog, BABE_ENGINE_ID,
+ AuthorityId, BabeApi, BabeAuthorityWeight, BabeConfiguration, ConsensusLog, BABE_ENGINE_ID,
};
use sp_consensus_slots::Slot;
use sp_inherents::InherentData;
use sp_runtime::{
- generic::{BlockId, Digest},
+ generic::Digest,
traits::{Block as BlockT, Header},
DigestItem,
};
@@ -53,9 +51,9 @@ use sp_timestamp::TimestampInherentData;
/// Provides BABE-compatible predigests and BlockImportParams.
/// Intended for use with BABE runtimes.
-pub struct BabeConsensusDataProvider {
+pub struct BabeConsensusDataProvider {
/// shared reference to keystore
- keystore: SyncCryptoStorePtr,
+ keystore: KeystorePtr,
/// Shared reference to the client.
client: Arc,
@@ -64,10 +62,14 @@ pub struct BabeConsensusDataProvider {
epoch_changes: SharedEpochChanges,
/// BABE config, gotten from the runtime.
- config: Config,
+ /// NOTE: This is used to fetch `slot_duration` and `epoch_length` in the
+ /// `ConsensusDataProvider` implementation. Correct as far as these values
+ /// are not changed during an epoch change.
+ config: BabeConfiguration,
/// Authorities to be used for this babe chain.
authorities: Vec<(AuthorityId, BabeAuthorityWeight)>,
+ _phantom: PhantomData
,
}
/// Verifier to be used for babe chains
@@ -96,7 +98,7 @@ where
async fn verify(
&mut self,
mut import_params: BlockImportParams,
- ) -> Result<(BlockImportParams, Option)>>), String> {
+ ) -> Result, String> {
import_params.finalized = false;
import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain);
@@ -105,7 +107,7 @@ where
let parent_hash = import_params.header.parent_hash();
let parent = self
.client
- .header(BlockId::Hash(*parent_hash))
+ .header(*parent_hash)
.ok()
.flatten()
.ok_or_else(|| format!("header for block {} not found", parent_hash))?;
@@ -114,7 +116,7 @@ where
.epoch_descriptor_for_child_of(
descendent_query(&*self.client),
&parent.hash(),
- parent.number().clone(),
+ *parent.number(),
pre_digest.slot(),
)
.map_err(|e| format!("failed to fetch epoch_descriptor: {}", e))?
@@ -122,16 +124,14 @@ where
// drop the lock
drop(epoch_changes);
- import_params.intermediates.insert(
- Cow::from(INTERMEDIATE_KEY),
- Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>,
- );
+ import_params
+ .insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor });
- Ok((import_params, None))
+ Ok(import_params)
}
}
-impl BabeConsensusDataProvider
+impl BabeConsensusDataProvider
where
B: BlockT,
C: AuxStore
@@ -143,7 +143,7 @@ where
{
pub fn new(
client: Arc,
- keystore: SyncCryptoStorePtr,
+ keystore: KeystorePtr,
epoch_changes: SharedEpochChanges,
authorities: Vec<(AuthorityId, BabeAuthorityWeight)>,
) -> Result {
@@ -151,9 +151,16 @@ where
return Err(Error::StringError("Cannot supply empty authority set!".into()))
}
- let config = Config::get(&*client)?;
+ let config = sc_consensus_babe::configuration(&*client)?;
- Ok(Self { config, client, keystore, epoch_changes, authorities })
+ Ok(Self {
+ config,
+ client,
+ keystore,
+ epoch_changes,
+ authorities,
+ _phantom: Default::default(),
+ })
}
fn epoch(&self, parent: &B::Header, slot: Slot) -> Result {
@@ -162,18 +169,16 @@ where
.epoch_descriptor_for_child_of(
descendent_query(&*self.client),
&parent.hash(),
- parent.number().clone(),
+ *parent.number(),
slot,
)
.map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))?
- .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?;
+ .ok_or(sp_consensus::Error::InvalidAuthoritiesSet)?;
let epoch = epoch_changes
- .viable_epoch(&epoch_descriptor, |slot| {
- Epoch::genesis(self.config.genesis_config(), slot)
- })
+ .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot))
.ok_or_else(|| {
- log::info!(target: "babe", "create_digest: no viable_epoch :(");
+ log::info!(target: LOG_TARGET, "create_digest: no viable_epoch :(");
sp_consensus::Error::InvalidAuthoritiesSet
})?;
@@ -181,7 +186,7 @@ where
}
}
-impl ConsensusDataProvider for BabeConsensusDataProvider
+impl ConsensusDataProvider for BabeConsensusDataProvider
where
B: BlockT,
C: AuxStore
@@ -190,8 +195,10 @@ where
+ UsageProvider
+ ProvideRuntimeApi,
C::Api: BabeApi,
+ P: Send + Sync,
{
type Transaction = TransactionFor;
+ type Proof = P;
fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result {
let slot = inherents
@@ -216,19 +223,19 @@ where
.epoch_descriptor_for_child_of(
descendent_query(&*self.client),
&parent.hash(),
- parent.number().clone(),
+ *parent.number(),
slot,
)
.map_err(|e| {
Error::StringError(format!("failed to fetch epoch_descriptor: {}", e))
})?
- .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?;
+ .ok_or(sp_consensus::Error::InvalidAuthoritiesSet)?;
match epoch_descriptor {
ViableEpochDescriptor::Signaled(identifier, _epoch_header) => {
let epoch_mut = epoch_changes
.epoch_mut(&identifier)
- .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?;
+ .ok_or(sp_consensus::Error::InvalidAuthoritiesSet)?;
// mutate the current epoch
epoch_mut.authorities = self.authorities.clone();
@@ -236,7 +243,7 @@ where
let next_epoch = ConsensusLog::NextEpochData(NextEpochDescriptor {
authorities: self.authorities.clone(),
// copy the old randomness
- randomness: epoch_mut.randomness.clone(),
+ randomness: epoch_mut.randomness,
});
vec![
@@ -259,6 +266,7 @@ where
parent: &B::Header,
params: &mut BlockImportParams,
inherents: &InherentData,
+ _proof: Self::Proof,
) -> Result<(), Error> {
let slot = inherents
.babe_inherent_data()?
@@ -268,11 +276,11 @@ where
.epoch_descriptor_for_child_of(
descendent_query(&*self.client),
&parent.hash(),
- parent.number().clone(),
+ *parent.number(),
slot,
)
.map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))?
- .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?;
+ .ok_or(sp_consensus::Error::InvalidAuthoritiesSet)?;
// drop the lock
drop(epoch_changes);
// a quick check to see if we're in the authorities
@@ -281,7 +289,7 @@ where
let has_authority = epoch.authorities.iter().any(|(id, _)| *id == *authority);
if !has_authority {
- log::info!(target: "manual-seal", "authority not found");
+ log::info!(target: LOG_TARGET, "authority not found");
let timestamp = inherents
.timestamp_inherent_data()?
.ok_or_else(|| Error::StringError("No timestamp inherent data".into()))?;
@@ -295,7 +303,7 @@ where
identifier,
EpochHeader {
start_slot: slot,
- end_slot: (*slot * self.config.genesis_config().epoch_length).into(),
+ end_slot: (*slot * self.config.epoch_length).into(),
},
),
_ => unreachable!(
@@ -304,11 +312,8 @@ where
};
}
- params.intermediates.insert(
- Cow::from(INTERMEDIATE_KEY),
- Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>,
- );
+ params.insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor });
Ok(())
}
-}
+}
\ No newline at end of file
diff --git a/node/consensus-transition/manual-seal/src/consensus/timestamp.rs b/node/consensus-transition/manual-seal/src/consensus/timestamp.rs
index e7f4e709..aa5c5bf9 100644
--- a/node/consensus-transition/manual-seal/src/consensus/timestamp.rs
+++ b/node/consensus-transition/manual-seal/src/consensus/timestamp.rs
@@ -1,6 +1,6 @@
// This file is part of Substrate.
-// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd.
+// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
@@ -30,10 +30,7 @@ use sp_consensus_aura::{
use sp_consensus_babe::BabeApi;
use sp_consensus_slots::{Slot, SlotDuration};
use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier};
-use sp_runtime::{
- generic::BlockId,
- traits::{Block as BlockT, Zero},
-};
+use sp_runtime::traits::{Block as BlockT, Zero};
use sp_timestamp::{InherentType, INHERENT_IDENTIFIER};
use std::{
sync::{atomic, Arc},
@@ -46,10 +43,10 @@ use std::{
/// This works by either fetching the `slot_number` from the most recent header and dividing
/// that value by `slot_duration` in order to fork chains that expect this inherent.
///
-/// It produces timestamp inherents that are increaed by `slot_duraation` whenever
+/// It produces timestamp inherents that are increased by `slot_duration` whenever
/// `provide_inherent_data` is called.
pub struct SlotTimestampProvider {
- // holds the unix millisecnd timestamp for the most recent block
+ // holds the unix millisecond timestamp for the most recent block
unix_millis: atomic::AtomicU64,
// configured slot_duration in the runtime
slot_duration: SlotDuration,
@@ -63,7 +60,7 @@ impl SlotTimestampProvider {
C: AuxStore + HeaderBackend + ProvideRuntimeApi + UsageProvider,
C::Api: BabeApi,
{
- let slot_duration = sc_consensus_babe::Config::get(&*client)?.slot_duration();
+ let slot_duration = sc_consensus_babe::configuration(&*client)?.slot_duration();
let time = Self::with_header(&client, slot_duration, |header| {
let slot_number = *sc_consensus_babe::find_pre_digest::(&header)
@@ -109,7 +106,7 @@ impl SlotTimestampProvider {
// otherwise we'd be producing blocks for older slots.
let time = if info.best_number != Zero::zero() {
let header = client
- .header(BlockId::Hash(info.best_hash))?
+ .header(info.best_hash)?
.ok_or_else(|| "best header not found in the db!".to_string())?;
let slot = func(header)?;
// add the slot duration so there's no collision of slots
@@ -141,7 +138,7 @@ impl SlotTimestampProvider {
#[async_trait::async_trait]
impl InherentDataProvider for SlotTimestampProvider {
- fn provide_inherent_data(
+ async fn provide_inherent_data(
&self,
inherent_data: &mut InherentData,
) -> Result<(), sp_inherents::Error> {
@@ -161,4 +158,4 @@ impl InherentDataProvider for SlotTimestampProvider {
) -> Option> {
None
}
-}
+}
\ No newline at end of file
diff --git a/node/runtime/src/precompiles.rs b/node/runtime/src/precompiles.rs
index d65806c6..59390048 100644
--- a/node/runtime/src/precompiles.rs
+++ b/node/runtime/src/precompiles.rs
@@ -1,10 +1,12 @@
-use pallet_evm::{Context, Precompile, PrecompileResult, PrecompileSet};
+use pallet_evm::{
+ IsPrecompileResult, Precompile, PrecompileHandle, PrecompileResult, PrecompileSet,
+};
use sp_core::H160;
use sp_std::marker::PhantomData;
-use pallet_evm_precompile_blake2::Blake2F;
-use pallet_evm_precompile_bn128::{Bn128Add, Bn128Mul, Bn128Pairing};
-use pallet_evm_precompile_curve25519::{Curve25519Add, Curve25519ScalarMul};
+use pallet_evm_precompile_blake2::Blake2F;
+use pallet_evm_precompile_bn128::{Bn128Add, Bn128Mul, Bn128Pairing};
+use pallet_evm_precompile_curve25519::{Curve25519Add, Curve25519ScalarMul};
use pallet_evm_precompile_ed25519::Ed25519Verify;
use pallet_evm_precompile_modexp::Modexp;
use pallet_evm_precompile_sha3fips::Sha3FIPS256;
@@ -19,52 +21,55 @@ where
pub fn new() -> Self {
Self(Default::default())
}
-
- pub fn used_addresses() -> sp_std::vec::Vec {
- sp_std::vec![1, 2, 3, 4, 5, 1024, 1025, 1026, 1027, 1028]
- .into_iter()
- .map(|x| hash(x))
- .collect()
+ pub fn used_addresses() -> [H160; 10] {
+ [
+ hash(1),
+ hash(2),
+ hash(3),
+ hash(4),
+ hash(5),
+ hash(1024),
+ hash(1025),
+ hash(1026),
+ hash(1027),
+ hash(1028),
+ ]
}
}
impl PrecompileSet for EdgewarePrecompiles
where
R: pallet_evm::Config,
{
- fn execute(
- &self,
- address: H160,
- input: &[u8],
- target_gas: Option,
- context: &Context,
- is_static: bool,
- ) -> Option {
- match address {
+ fn execute(&self, handle: &mut impl PrecompileHandle) -> Option {
+ match handle.code_address() {
// Ethereum precompiles :
- a if a == hash(1) => Some(ECRecover::execute(input, target_gas, context, is_static)),
- a if a == hash(2) => Some(Sha256::execute(input, target_gas, context, is_static)),
- a if a == hash(3) => Some(Ripemd160::execute(input, target_gas, context, is_static)),
- a if a == hash(4) => Some(Identity::execute(input, target_gas, context, is_static)),
- a if a == hash(5) => Some(Modexp::execute(input, target_gas, context, is_static)),
- a if a == hash(6) => Some(Bn128Add::execute(input, target_gas, context, is_static)),
- a if a == hash(7) => Some(Bn128Mul::execute(input, target_gas, context, is_static)),
- a if a == hash(8) => Some(Bn128Pairing::execute(input, target_gas, context, is_static)),
- a if a == hash(9) => Some(Blake2F::execute(input, target_gas, context, is_static)),
+ a if a == hash(1) => Some(ECRecover::execute(handle)),
+ a if a == hash(2) => Some(Sha256::execute(handle)),
+ a if a == hash(3) => Some(Ripemd160::execute(handle)),
+ a if a == hash(4) => Some(Identity::execute(handle)),
+ a if a == hash(5) => Some(Modexp::execute(handle)),
+ a if a == hash(6) => Some(Bn128Add::execute(handle)),
+ a if a == hash(7) => Some(Bn128Mul::execute(handle)),
+ a if a == hash(8) => Some(Bn128Pairing::execute(handle)),
+ a if a == hash(9) => Some(Blake2F::execute(handle)),
// Non-Frontier specific nor Ethereum precompiles :
- a if a == hash(1024) => Some(Sha3FIPS256::execute(input, target_gas, context, is_static)),
- a if a == hash(1025) => Some(ECRecoverPublicKey::execute(input, target_gas, context, is_static)),
- a if a == hash(1026) => Some(Ed25519Verify::execute(input, target_gas, context, is_static)),
- a if a == hash(1027) => Some(Curve25519Add::execute(input, target_gas, context, is_static)),
- a if a == hash(1028) => Some(Curve25519ScalarMul::execute(input, target_gas, context, is_static)),
+ a if a == hash(1024) => Some(Sha3FIPS256::execute(handle)),
+ a if a == hash(1025) => Some(ECRecoverPublicKey::execute(handle)),
+ a if a == hash(1026) => Some(Ed25519Verify::execute(handle)),
+ a if a == hash(1027) => Some(Curve25519Add::execute(handle)),
+ a if a == hash(1028) => Some(Curve25519ScalarMul::execute(handle)),
_ => None,
}
}
- fn is_precompile(&self, address: H160) -> bool {
- Self::used_addresses().contains(&address)
+ fn is_precompile(&self, address: H160, _gas: u64) -> IsPrecompileResult {
+ IsPrecompileResult::Answer {
+ is_precompile: Self::used_addresses().contains(&address),
+ extra_cost: 0,
+ }
}
}
fn hash(a: u64) -> H160 {
H160::from_low_u64_be(a)
-}
+}
\ No newline at end of file