diff --git a/api/bin/chainflip-ingress-egress-tracker/src/witnessing/state_chain.rs b/api/bin/chainflip-ingress-egress-tracker/src/witnessing/state_chain.rs index fafbfff21b..51c0d4b7b4 100644 --- a/api/bin/chainflip-ingress-egress-tracker/src/witnessing/state_chain.rs +++ b/api/bin/chainflip-ingress-egress-tracker/src/witnessing/state_chain.rs @@ -370,7 +370,8 @@ where Swapping(_) | LiquidityProvider(_) | LiquidityPools(_) | - SolanaElections(_) => {}, + SolanaElections(_) | + BitcoinElections(_) => {}, }; Ok(()) diff --git a/state-chain/cf-integration-tests/src/mock_runtime.rs b/state-chain/cf-integration-tests/src/mock_runtime.rs index 82a3e1d365..6fe08fb734 100644 --- a/state-chain/cf-integration-tests/src/mock_runtime.rs +++ b/state-chain/cf-integration-tests/src/mock_runtime.rs @@ -26,8 +26,8 @@ use state_chain_runtime::{ opaque::SessionKeys, test_runner::*, AccountId, AccountRolesConfig, ArbitrumChainTrackingConfig, BitcoinChainTrackingConfig, - EmissionsConfig, EnvironmentConfig, EthereumChainTrackingConfig, EthereumVaultConfig, - EvmThresholdSignerConfig, FlipConfig, FundingConfig, GovernanceConfig, + BitcoinElectionsConfig, EmissionsConfig, EnvironmentConfig, EthereumChainTrackingConfig, + EthereumVaultConfig, EvmThresholdSignerConfig, FlipConfig, FundingConfig, GovernanceConfig, PolkadotChainTrackingConfig, ReputationConfig, SessionConfig, SolanaChainTrackingConfig, SolanaElectionsConfig, ValidatorConfig, }; @@ -340,6 +340,7 @@ impl ExtBuilder { ), }), }, + bitcoin_elections: BitcoinElectionsConfig { option_initial_state: None }, ethereum_broadcaster: state_chain_runtime::EthereumBroadcasterConfig { broadcast_timeout: 5 * BLOCKS_PER_MINUTE_ETHEREUM, }, diff --git a/state-chain/chains/src/lib.rs b/state-chain/chains/src/lib.rs index a063e1982b..d91788f3c1 100644 --- a/state-chain/chains/src/lib.rs +++ b/state-chain/chains/src/lib.rs @@ -71,7 +71,41 @@ pub mod mocks; pub mod witness_period { use core::ops::{Rem, Sub}; + use sp_std::ops::RangeInclusive; + + use codec::{Decode, Encode}; use frame_support::sp_runtime::traits::{One, Saturating}; + use scale_info::TypeInfo; + use serde::{Deserialize, Serialize}; + + // So we can store a range-like object in storage, since this has encode and decode. + #[derive( + Debug, Clone, PartialEq, Eq, Encode, Decode, TypeInfo, Deserialize, Serialize, Default, + )] + pub struct BlockWitnessRange { + start: I, + end: I, + } + + impl From> for BlockWitnessRange { + fn from(range: RangeInclusive) -> Self { + Self { start: *range.start(), end: *range.end() } + } + } + + impl BlockWitnessRange { + pub fn into_range_inclusive(range: BlockWitnessRange) -> RangeInclusive { + range.start..=range.end + } + + pub fn start(&self) -> &I { + &self.start + } + + pub fn end(&self) -> &I { + &self.end + } + } fn block_witness_floor< I: Copy + Saturating + Sub + Rem + Eq + One, diff --git a/state-chain/node/src/chain_spec.rs b/state-chain/node/src/chain_spec.rs index a9801dc356..5a59092a4b 100644 --- a/state-chain/node/src/chain_spec.rs +++ b/state-chain/node/src/chain_spec.rs @@ -27,8 +27,8 @@ use state_chain_runtime::{ BLOCKS_PER_MINUTE_SOLANA, }, opaque::SessionKeys, - AccountId, BlockNumber, FlipBalance, SetSizeParameters, Signature, SolanaElectionsConfig, - WASM_BINARY, + AccountId, BitcoinElectionsConfig, BlockNumber, FlipBalance, SetSizeParameters, Signature, + SolanaElectionsConfig, WASM_BINARY, }; use cf_utilities::clean_hex_address; @@ -849,6 +849,9 @@ fn testnet_genesis( ..Default::default() }, solana_elections, + + // TODO: Set correct initial state + bitcoin_elections: BitcoinElectionsConfig { option_initial_state: None }, // We can't use ..Default::default() here because chain tracking panics on default (by // design). And the way ..Default::default() syntax works is that it generates the default // value for the whole struct, not just the fields that are missing. diff --git a/state-chain/pallets/cf-elections/src/electoral_systems.rs b/state-chain/pallets/cf-elections/src/electoral_systems.rs index c7a51f6fc8..d7b6b68961 100644 --- a/state-chain/pallets/cf-elections/src/electoral_systems.rs +++ b/state-chain/pallets/cf-elections/src/electoral_systems.rs @@ -1,3 +1,4 @@ +pub mod block_witnesser; pub mod blockchain; pub mod composite; pub mod egress_success; diff --git a/state-chain/pallets/cf-elections/src/electoral_systems/block_witnesser.rs b/state-chain/pallets/cf-elections/src/electoral_systems/block_witnesser.rs new file mode 100644 index 0000000000..ded933bbaf --- /dev/null +++ b/state-chain/pallets/cf-elections/src/electoral_systems/block_witnesser.rs @@ -0,0 +1,355 @@ +use crate::{ + electoral_system::{ + AuthorityVoteOf, ConsensusVotes, ElectionIdentifierOf, ElectionReadAccess, + ElectionWriteAccess, ElectoralSystem, ElectoralWriteAccess, VotePropertiesOf, + }, + vote_storage::{self, VoteStorage}, + CorruptStorageError, SharedDataHash, +}; +use cf_chains::witness_period::BlockWitnessRange; +use cf_utilities::success_threshold_from_share_count; +use codec::{Decode, Encode}; +use frame_support::{ + ensure, + pallet_prelude::{MaybeSerializeDeserialize, Member}, + sp_runtime::Saturating, + Parameter, +}; +use log::info; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; +use sp_std::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec::Vec, +}; + +// Rather than push processing outside, we could provide an evaluation function that is called +// to determine whether to process or not. This keeps things encapsulated a little better. + +// We create an election with all the channels for a particular block. Then when everyone votes +// there is nothing to witness for that election i.e. for that block then it closes the election, so +// we don't duplicate that much state at all... unless on recovery. + +// How do we create elections for channels that only existed at passed state? - We manage channel +// lifetimes in the ES. Then we don't prematurely expire when we're in safe mode. The channels +// themselves can live outside the ES, but their lifetimes is managed form within the ES. We just +// need to know the id to lookup the channel and its lifetime (opened_at, closed_at). + +// If there are no channels, we don't have any elections. + +// safety margin??? +// Double witnessing??? - should be handled by the downstream. E.g. dispatching a second boost to +// the ingress egress should be handled by ingress egress, same way it is now. + +// NB: We only worry about safety margins in the on-consensus hook. Chain tracking pushes the latest +// block number, potentially with gaps which we fill. The safety is determined by the dispatching +// action, this is how we can achieve dynamic, amount based safety margins. +pub struct BlockWitnesser +{ + _phantom: core::marker::PhantomData<( + Chain, + BlockData, + Properties, + ValidatorId, + OnConsensus, + ElectionGenerator, + )>, +} + +pub trait ProcessBlockData { + // Returns the unprocessed state to write back to the ES. To clear the state, we return an + // explicit None from this function. + fn process_block_data>( + chain_block_number: ChainBlockNumber, + block_data: It, + ) -> impl Iterator)>; +} + +/// Allows external/runtime/implementation to return the properties that the election should use. +/// This means each instantiation of the block witnesser can control how the properties are +/// generated, and allows for easier testing of this hook externally vs. actually creating the new +/// election inside this hook. +pub trait BlockElectionPropertiesGenerator { + fn generate_election_properties(root_block_to_witness: ChainBlockNumber) -> Properties; +} + +pub type ElectionCount = u16; + +#[derive( + Debug, Clone, PartialEq, Eq, Encode, Decode, TypeInfo, Deserialize, Serialize, Default, +)] +pub struct BlockWitnesserSettings { + // We don't want to start too many elections at once, as this could overload the engines. + // e.g. If we entered safe mode for a long time and then missed 1000 blocks, without this, we + // would start 1000 elections at the same time. Instead, we throttle the recovery. + pub max_concurrent_elections: ElectionCount, +} + +#[derive( + Debug, Clone, PartialEq, Eq, Encode, Decode, TypeInfo, Deserialize, Serialize, Default, +)] +pub struct BlockWitnesserState { + // The last block where we know that we have processed everything from.... + // what about a reorg?????? + pub last_block_election_emitted_for: ChainBlockNumber, + + // Last block we received from context. + pub last_block_received: ChainBlockNumber, + + // The block roots (of a block range) that we received non empty block data for, but still + // requires processing. + // NOTE: It is possible for block data to arrive and then be partially processed. In this case, + // the block will still be here until there is no more block data for this block root to + // process. + pub unprocessed_block_roots: BTreeSet, + + pub open_elections: ElectionCount, +} + +impl< + Chain: cf_chains::Chain, + BlockData: Member + Parameter + Eq, + Properties: Parameter + Member, + ValidatorId: Member + Parameter + Ord + MaybeSerializeDeserialize, + BlockDataProcessor: ProcessBlockData<::ChainBlockNumber, BlockData> + 'static, + ElectionGenerator: BlockElectionPropertiesGenerator< + ::ChainBlockNumber, + Properties, + > + 'static, + > ElectoralSystem + for BlockWitnesser< + Chain, + BlockData, + Properties, + ValidatorId, + BlockDataProcessor, + ElectionGenerator, + > +{ + type ValidatorId = ValidatorId; + // Store the last processed block number, number of, and the number of open elections. + type ElectoralUnsynchronisedState = + BlockWitnesserState<::ChainBlockNumber>; + + // We store all the unprocessed block data here, including the most recently added block data, + // so it can be used in the OnBlockConsensus + type ElectoralUnsynchronisedStateMapKey = ::ChainBlockNumber; + type ElectoralUnsynchronisedStateMapValue = BlockData; + + type ElectoralUnsynchronisedSettings = BlockWitnesserSettings; + type ElectoralSettings = (); + type ElectionIdentifierExtra = (); + // The first item is the block number we wish to witness, the second is something else about + // that block we want to witness. e.g. all the deposit channel addresses that are active at + // that block. + type ElectionProperties = + (BlockWitnessRange<::ChainBlockNumber>, Properties); + type ElectionState = (); + type Vote = vote_storage::bitmap::Bitmap; + type Consensus = BlockData; + type OnFinalizeContext = ::ChainBlockNumber; + type OnFinalizeReturn = (); + + fn generate_vote_properties( + _election_identifier: ElectionIdentifierOf, + _previous_vote: Option<(VotePropertiesOf, AuthorityVoteOf)>, + _vote: &::PartialVote, + ) -> Result, CorruptStorageError> { + Ok(()) + } + + fn is_vote_desired>( + _election_access: &ElectionAccess, + _current_vote: Option<(VotePropertiesOf, AuthorityVoteOf)>, + ) -> Result { + Ok(true) + } + + fn on_finalize + 'static>( + election_identifiers: Vec>, + current_chain_block_number: &Self::OnFinalizeContext, + ) -> Result { + ensure!(::is_block_witness_root(*current_chain_block_number), { + log::error!( + "Block number must be a block witness root: {:?}", + *current_chain_block_number + ); + CorruptStorageError {} + }); + + let BlockWitnesserState { + mut last_block_election_emitted_for, + last_block_received, + mut open_elections, + mut unprocessed_block_roots, + } = ElectoralAccess::unsynchronised_state()?; + + info!("Last received: {:?}", last_block_received); + info!("current_chain_block_number: {:?}", *current_chain_block_number); + + // no two elections should have the same state + let (last_block_election_emitted_for, open_elections) = if *current_chain_block_number < + last_block_received + { + info!("Starting new elction because current less than last received"); + // There has been a reorg, we need to reprocess everything. + // But what about things we've already processed? How do we know if we've already + // processed, we could double witness. + // Current we include the block number in the hashed data, which means the hash would be + // different if we witnessed the same tx in a different block. This would result in + // double witnessing, but we use safety to protect against this. + + // We need to clear the states, and we assume the chain tracking will provide with the + // subsequent block numbers. + (*current_chain_block_number..=last_block_election_emitted_for).for_each( + |block_number| { + ElectoralAccess::set_unsynchronised_state_map(block_number, None); + }, + ); + + // All ongoing elections are now invalid, we will recreate the elections so engines can + // recast votes. + election_identifiers.into_iter().for_each(|election_identifier| { + ElectoralAccess::election_mut(election_identifier).delete(); + }); + + ElectoralAccess::new_election( + (), + ( + Chain::block_witness_range(*current_chain_block_number).into(), + ElectionGenerator::generate_election_properties(*current_chain_block_number), + ), + (), + )?; + + // We need to use the channels that were created at the old block height - this can only + // be realistic up to a certain point - this should be fine? as they'll stay open until + // we close them. As long as the reorg isn't absurdly long. We should be able to + // assert on this as a clear assumption. + (*current_chain_block_number, 1) + } else { + // ==== No reorg case ==== + + // We could have multiple elections going, for different block/ranges. + for election_identifier in election_identifiers { + let election_access = ElectoralAccess::election_mut(election_identifier); + if let Some(block_data) = election_access.check_consensus()?.has_consensus() { + let (root_block_number, _extra_properties) = election_access.properties()?; + + election_access.delete(); + + open_elections = open_elections.saturating_sub(1); + + ElectoralAccess::set_unsynchronised_state_map( + *root_block_number.start(), + Some(block_data), + ); + unprocessed_block_roots.insert(*root_block_number.start()); + } + } + + // If we haven't done any new elections, since the last run, there's not really any + // reason to run this again, so we could probably optimise this. + + let unprocessed_state = unprocessed_block_roots + .clone() + .into_iter() + .step_by(Into::::into(Chain::WITNESS_PERIOD) as usize) + .into_iter() + // Note: We don't implement iter on the underlying trait here, because this would be + // iterating over the storage of *all* electoral systems, and then we'd have to + // filter it out. This wouldn't be very efficient, so we shortcut this by + // storing the blocks we still need to process here and then iterating over them. + .map(|block_number| { + ElectoralAccess::unsynchronised_state_map(&block_number) + .map(|r| (block_number, r)) + }) + .collect::, _>>()? + .into_iter() + .filter_map(|(block_number, block_data)| { + block_data.map(|block_data| (block_number, block_data)) + }); + + let remaining_states = BlockDataProcessor::process_block_data( + *current_chain_block_number, + unprocessed_state, + ); + + // Write back any state remaining for use in future runs. + remaining_states.for_each(|(block_number, block_data)| { + if block_data.is_none() { + unprocessed_block_roots.remove(&block_number); + } + info!("Setting unsynced state for: {:?}", block_number); + ElectoralAccess::set_unsynchronised_state_map(block_number, block_data); + }); + + debug_assert!( + ::is_block_witness_root(last_block_election_emitted_for), + "We only store this if it passes the original block witness root check" + ); + + let settings = ElectoralAccess::unsynchronised_settings()?; + + // println!("max concurrent elections: {:?}", settings.max_concurrent_elections); + + for range_root in (last_block_election_emitted_for + .saturating_add(Chain::WITNESS_PERIOD)..= + *current_chain_block_number) + .step_by(Into::::into(Chain::WITNESS_PERIOD) as usize) + .take(settings.max_concurrent_elections.saturating_sub(open_elections) as usize) + { + ElectoralAccess::new_election( + (), + ( + Chain::block_witness_range(range_root).into(), + ElectionGenerator::generate_election_properties(range_root), + ), + (), + )?; + last_block_election_emitted_for = range_root; + open_elections = open_elections.saturating_add(1); + } + (last_block_election_emitted_for, open_elections) + }; + + ElectoralAccess::set_unsynchronised_state(BlockWitnesserState { + last_block_received: *current_chain_block_number, + open_elections, + last_block_election_emitted_for, + unprocessed_block_roots, + })?; + + Ok(()) + } + + fn check_consensus>( + _election_access: &ElectionAccess, + _previous_consensus: Option<&Self::Consensus>, + consensus_votes: ConsensusVotes, + ) -> Result, CorruptStorageError> { + let num_authorities = consensus_votes.num_authorities(); + let active_votes = consensus_votes.active_votes(); + let num_active_votes = active_votes.len() as u32; + let success_threshold = success_threshold_from_share_count(num_authorities); + Ok(if num_active_votes >= success_threshold { + let mut hash_to_block_data = BTreeMap::::new(); + + let mut counts = BTreeMap::::new(); + for vote in active_votes { + let vote_hash = SharedDataHash::of(&vote); + hash_to_block_data.insert(vote_hash.clone(), vote.clone()); + counts.entry(vote_hash).and_modify(|count| *count += 1).or_insert(1); + } + counts.iter().find_map(|(vote, count)| { + if *count >= success_threshold { + Some(hash_to_block_data.get(vote).expect("We must insert it above").clone()) + } else { + None + } + }) + } else { + None + }) + } +} diff --git a/state-chain/pallets/cf-elections/src/electoral_systems/composite.rs b/state-chain/pallets/cf-elections/src/electoral_systems/composite.rs index 1c0f49e18f..dbc79a0105 100644 --- a/state-chain/pallets/cf-elections/src/electoral_systems/composite.rs +++ b/state-chain/pallets/cf-elections/src/electoral_systems/composite.rs @@ -146,11 +146,15 @@ macro_rules! generate_electoral_system_tuple_impls { Ok(( match properties { CompositeVoteProperties::$electoral_system(properties) => properties, + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => return Err(CorruptStorageError::new()), }, match vote { AuthorityVote::PartialVote(CompositePartialVote::$electoral_system(partial_vote)) => AuthorityVote::PartialVote(partial_vote), AuthorityVote::Vote(CompositeVote::$electoral_system(vote)) => AuthorityVote::Vote(vote), + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => return Err(CorruptStorageError::new()), }, )) @@ -191,6 +195,8 @@ macro_rules! generate_electoral_system_tuple_impls { ) }, )* + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => true, } } @@ -211,11 +217,15 @@ macro_rules! generate_electoral_system_tuple_impls { Ok(( match previous_properties { CompositeVoteProperties::$electoral_system(previous_properties) => previous_properties, + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => return Err(CorruptStorageError::new()), }, match previous_vote { AuthorityVote::PartialVote(CompositePartialVote::$electoral_system(partial_vote)) => AuthorityVote::PartialVote(partial_vote), AuthorityVote::Vote(CompositeVote::$electoral_system(vote)) => AuthorityVote::Vote(vote), + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => return Err(CorruptStorageError::new()), }, )) @@ -223,6 +233,8 @@ macro_rules! generate_electoral_system_tuple_impls { partial_vote, ).map(CompositeVoteProperties::$electoral_system) },)* + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()), } } @@ -249,6 +261,8 @@ macro_rules! generate_electoral_system_tuple_impls { previous_consensus.map(|previous_consensus| { match previous_consensus { CompositeConsensus::$electoral_system(previous_consensus) => Ok(previous_consensus), + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()), } }).transpose()?, @@ -263,6 +277,8 @@ macro_rules! generate_electoral_system_tuple_impls { vote: Some((properties, vote)), validator_id }), + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()), } } else { @@ -318,6 +334,8 @@ macro_rules! generate_electoral_system_tuple_impls { CompositeElectionProperties::$current(properties) => { Ok(properties) }, + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()) } } @@ -326,6 +344,8 @@ macro_rules! generate_electoral_system_tuple_impls { CompositeElectionState::$current(state) => { Ok(state) }, + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()) } } @@ -367,6 +387,8 @@ macro_rules! generate_electoral_system_tuple_impls { consensus_status.try_map(|consensus| { match consensus { CompositeConsensus::$current(consensus) => Ok(consensus), + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()), } }) @@ -399,6 +421,8 @@ macro_rules! generate_electoral_system_tuple_impls { match StorageAccess::unsynchronised_state_map(&CompositeElectoralUnsynchronisedStateMapKey::$current(key.clone())) { Some(CompositeElectoralUnsynchronisedStateMapValue::$current(value)) => Ok(Some(value)), None => Ok(None), + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()), } } @@ -459,4 +483,5 @@ macro_rules! generate_electoral_system_tuple_impls { }; } +generate_electoral_system_tuple_impls!(tuple_1_impls: ((A, A0))); generate_electoral_system_tuple_impls!(tuple_7_impls: ((A, A0), (B, B0), (C, C0), (D, D0), (EE, E0), (FF, F0), (GG, G0))); diff --git a/state-chain/pallets/cf-elections/src/electoral_systems/mocks.rs b/state-chain/pallets/cf-elections/src/electoral_systems/mocks.rs index 0d51d2812f..a5c66a01ea 100644 --- a/state-chain/pallets/cf-elections/src/electoral_systems/mocks.rs +++ b/state-chain/pallets/cf-elections/src/electoral_systems/mocks.rs @@ -57,7 +57,6 @@ where Self { unsynchronised_state, ..self } } - #[allow(dead_code)] pub fn with_unsynchronised_settings( self, unsynchronised_settings: ES::ElectoralUnsynchronisedSettings, @@ -65,12 +64,10 @@ where Self { unsynchronised_settings, ..self } } - #[allow(dead_code)] pub fn with_electoral_settings(self, electoral_settings: ES::ElectoralSettings) -> Self { Self { electoral_settings, ..self } } - #[allow(dead_code)] pub fn with_initial_election_state( self, extra: ES::ElectionIdentifierExtra, @@ -163,6 +160,47 @@ impl TestContext { ) } + // TODO: factor out with above. + // Note: it's important that these expectations are executed in order, as some tests rely on + // testing that the order several elections are processed does not matter. + pub fn expect_consensus_multi( + self, + votes_and_expectations: Vec<(ConsensusVotes, Option)>, + ) -> Self { + let mut active_election_ids = self.all_election_ids().into_iter(); + + let mut next_self = self.clone(); + + for (mut consensus_votes, expected_consensus) in votes_and_expectations { + assert!(consensus_votes.num_authorities() > 0, "Cannot have zero authorities."); + + use rand::seq::SliceRandom; + consensus_votes.votes.shuffle(&mut rand::thread_rng()); + + let current_election_id = &active_election_ids + .next() + .expect("More expected elections than active.") + .clone(); + + let new_consensus = MockAccess::::election(*current_election_id) + .check_consensus(None, consensus_votes) + .unwrap(); + + // Should assert on some condition about the consensus. + assert_eq!(new_consensus.clone(), expected_consensus); + + next_self = next_self.inner_force_consensus_update( + *current_election_id, + if let Some(consensus) = new_consensus { + ConsensusStatus::Gained { most_recent: None, new: consensus } + } else { + ConsensusStatus::None + }, + ); + } + self + } + pub fn only_election_id(&self) -> ElectionIdentifierOf { self.all_election_ids() .into_iter() @@ -233,6 +271,12 @@ impl TestContext { } self } + + /// For running some code in between updates. + pub fn then(self, f: impl FnOnce()) -> Self { + f(); + self + } } type CheckFnParam = diff --git a/state-chain/pallets/cf-elections/src/electoral_systems/mocks/access.rs b/state-chain/pallets/cf-elections/src/electoral_systems/mocks/access.rs index af32e582ee..ab06f8e9df 100644 --- a/state-chain/pallets/cf-elections/src/electoral_systems/mocks/access.rs +++ b/state-chain/pallets/cf-elections/src/electoral_systems/mocks/access.rs @@ -291,7 +291,6 @@ impl MockStorageAccess { identifier: ElectionIdentifierOf, state: ES::ElectionState, ) { - println!("Setting election state for identifier: {:?}", identifier); ELECTION_STATE.with(|old_state| { let mut state_ref = old_state.borrow_mut(); state_ref.insert(identifier.encode(), state.encode()); @@ -390,6 +389,7 @@ impl MockStorageAccess { key: ES::ElectoralUnsynchronisedStateMapKey, value: Option, ) { + println!("Setting unsynced state map for key: {:?}", key); ELECTORAL_UNSYNCHRONISED_STATE_MAP.with(|old_state_map| { let mut state_map_ref = old_state_map.borrow_mut(); state_map_ref.insert(key.encode(), value.map(|v| v.encode())); diff --git a/state-chain/pallets/cf-elections/src/electoral_systems/tests.rs b/state-chain/pallets/cf-elections/src/electoral_systems/tests.rs index 7fffd97d21..6e865a6a9d 100644 --- a/state-chain/pallets/cf-elections/src/electoral_systems/tests.rs +++ b/state-chain/pallets/cf-elections/src/electoral_systems/tests.rs @@ -2,6 +2,7 @@ pub(crate) use super::mocks; pub(crate) use crate::register_checks; +pub mod block_witnesser; pub mod egress_success; pub mod liveness; pub mod monotonic_change; diff --git a/state-chain/pallets/cf-elections/src/electoral_systems/tests/block_witnesser.rs b/state-chain/pallets/cf-elections/src/electoral_systems/tests/block_witnesser.rs new file mode 100644 index 0000000000..a9e4088d04 --- /dev/null +++ b/state-chain/pallets/cf-elections/src/electoral_systems/tests/block_witnesser.rs @@ -0,0 +1,611 @@ +// 1. We come to consensus when all data is the same. +// 2. We execute hooks when coming to consensus +// 3. On finalize we start as many elections as possible within the new range, but no more than the +// maximum => skipping received blocks is fine, even ranges +// 4. First time the ES is run, we should only spawn the last election. Last procsesed will not +// exist, so we have to ensure we don't generate +// all elections until the beginning of time. +// 5. Ranges are consistent, and if we're mid range we don't emit a new election, we wait for next +// time we can election for a whole range. +// 6. State updates. When channel is opened, what happens? new election? +// Out of order consensus - when catching up this is possible. We need to ensure everything is still +// handled correctly. +// Testing with a chain with range > 1 +// State partially processed, how do we test that the state still gets processed until all the state +// is processed. + +use super::{ + mocks::{Check, TestSetup}, + register_checks, +}; +use crate::{ + electoral_system::{ConsensusVote, ConsensusVotes, ElectoralSystem}, + electoral_systems::block_witnesser::*, +}; +use cf_chains::{mocks::MockEthereum, Chain}; +use codec::Encode; +use sp_std::collections::btree_set::BTreeSet; + +thread_local! { + pub static PROPERTIES_TO_RETURN: std::cell::RefCell = const { std::cell::RefCell::new(BTreeSet::new()) }; + pub static GENERATE_ELECTION_HOOK_CALLED: std::cell::Cell = const { std::cell::Cell::new(0) }; + pub static PROCESS_BLOCK_DATA_HOOK_CALLED: std::cell::Cell = const { std::cell::Cell::new(0) }; + // the actual block data that process_block_data was called with. + pub static PROCESS_BLOCK_DATA_CALLED_WITH: std::cell::RefCell> = const { std::cell::RefCell::new(vec![]) }; + pub static PROCESS_BLOCK_DATA_TO_RETURN: std::cell::RefCell)>> = const { std::cell::RefCell::new(vec![]) }; +} + +pub type ChainBlockNumber = ::ChainBlockNumber; +pub type ValidatorId = u16; + +pub type BlockData = Vec; + +struct MockGenerateElectionHook { + _phantom: core::marker::PhantomData<(ChainBlockNumber, Properties)>, +} + +pub type Properties = BTreeSet; + +impl BlockElectionPropertiesGenerator + for MockGenerateElectionHook +{ + fn generate_election_properties(_root_to_witness: ChainBlockNumber) -> Properties { + GENERATE_ELECTION_HOOK_CALLED.with(|hook_called| hook_called.set(hook_called.get() + 1)); + // The properties are not important to the logic of the electoral system itself, so we can + // return empty. + BTreeSet::new() + } +} + +struct MockBlockProcessor { + _phantom: core::marker::PhantomData<(ChainBlockNumber, BlockData)>, +} + +impl MockBlockProcessor { + pub fn set_block_data_to_return(block_data: Vec<(ChainBlockNumber, Option)>) { + PROCESS_BLOCK_DATA_TO_RETURN + .with(|block_data_to_return| *block_data_to_return.borrow_mut() = block_data); + } +} + +impl ProcessBlockData + for MockBlockProcessor +{ + // We need to do more here, like store some state and push back. + fn process_block_data>( + // This isn't so important, in these tests, it's important for the implemenation of the + // hooks. e.g. to determine a safety margin. + _chain_block_number: ChainBlockNumber, + block_data: BlockDataIter, + ) -> impl Iterator)> { + let block_data_vec = block_data.into_iter().collect::>(); + // We can only return a subset of the block data. + PROCESS_BLOCK_DATA_CALLED_WITH + .with(|old_block_data| *old_block_data.borrow_mut() = block_data_vec.clone()); + PROCESS_BLOCK_DATA_HOOK_CALLED.with(|hook_called| hook_called.set(hook_called.get() + 1)); + let block_data_to_return = PROCESS_BLOCK_DATA_TO_RETURN + .with(|block_data_to_return| block_data_to_return.borrow().clone()); + + // TODO: Think about if we need this check. It's not currently enforced in the traits, so + // perhaps instead we should handle cases where the hook returns any set of properties. It + // would usually be wrong to do so, but this ES doens't have to break as a result. + // check that all blocks in block_data_to_retun are in block_data to ensure test consistency + // block_data_to_return + // .clone() + // .into_iter() + // .for_each(|(block_number, block_data_return)| { + // if let Some(data) = block_data_return { + // assert!(block_data_vec.contains(&(block_number, data))); + // } else { + // assert!(!block_data_vec.iter().any(|(number, _)| number == &block_number)); + // } + // }); + + block_data_to_return.into_iter() + } +} + +// We need to provide a mock chain here... MockEthereum might be what we're after +type SimpleBlockWitnesser = BlockWitnesser< + MockEthereum, + BlockData, + Properties, + ValidatorId, + MockBlockProcessor, + MockGenerateElectionHook, +>; + +register_checks! { + SimpleBlockWitnesser { + generate_election_properties_called_n_times(_pre, _post, n: u8) { + assert_eq!(GENERATE_ELECTION_HOOK_CALLED.with(|hook_called| hook_called.get()), n, "generate_election_properties should have been called {} times so far!", n); + }, + number_of_open_elections_is(_pre, post, n: ElectionCount) { + assert_eq!(post.unsynchronised_state.open_elections, n, "Number of open elections should be {}", n); + }, + process_block_data_called_n_times(_pre, _post, n: u8) { + assert_eq!(PROCESS_BLOCK_DATA_HOOK_CALLED.with(|hook_called| hook_called.get()), n, "process_block_data should have been called {} times so far!", n); + }, + process_block_data_called_last_with(_pre, _post, block_data: Vec<(ChainBlockNumber, BlockData)>) { + assert_eq!(PROCESS_BLOCK_DATA_CALLED_WITH.with(|old_block_data| old_block_data.borrow().clone()), block_data, "process_block_data should have been called with {:?}", block_data); + }, + unsynchronised_state_map_contains(_pre, post, root_block_number: ChainBlockNumber) { + assert!(post.unsynchronised_state_map.get(&root_block_number.encode()[..]).is_some(), "Root block number {} should be in the state map", root_block_number); + }, + } +} + +fn generate_votes( + correct_voters: BTreeSet, + incorrect_voters: BTreeSet, + did_not_vote: BTreeSet, + correct_data: BlockData, +) -> ConsensusVotes { + let incorrect_data = vec![1u8, 2, 3]; + assert_ne!(incorrect_data, correct_data); + ConsensusVotes { + votes: correct_voters + .into_iter() + .map(|v| ConsensusVote { vote: Some(((), correct_data.clone())), validator_id: v }) + .chain(incorrect_voters.into_iter().map(|v| ConsensusVote { + vote: Some(((), incorrect_data.clone())), + validator_id: v, + })) + .chain(did_not_vote.into_iter().map(|v| ConsensusVote { vote: None, validator_id: v })) + .collect(), + } +} + +// Util to create a successful set of votes, along with the consensus expectation. +fn create_votes_expectation( + consensus: BlockData, +) -> ( + ConsensusVotes, + Option<::Consensus>, +) { + ( + generate_votes( + (0..20).into_iter().collect(), + Default::default(), + Default::default(), + consensus.clone(), + ), + Some(consensus), + ) +} + +const MAX_CONCURRENT_ELECTIONS: ElectionCount = 5; + +// We start an election for a block and there is nothing there. The base case. +#[test] +fn no_block_data_success() { + const INIT_LAST_BLOCK_RECEIVED: ChainBlockNumber = 0; + TestSetup::::default() + .with_unsynchronised_state(BlockWitnesserState { + last_block_received: INIT_LAST_BLOCK_RECEIVED, + ..BlockWitnesserState::default() + }) + .with_unsynchronised_settings(BlockWitnesserSettings { + max_concurrent_elections: MAX_CONCURRENT_ELECTIONS, + }) + .build() + .test_on_finalize( + &(INIT_LAST_BLOCK_RECEIVED + 1), + |_| {}, + vec![ + Check::::generate_election_properties_called_n_times(1), + Check::::number_of_open_elections_is(1), + Check::::process_block_data_called_n_times(1), + ], + ) + .expect_consensus( + generate_votes( + (0..20).into_iter().collect(), + Default::default(), + Default::default(), + vec![], + ), + Some(vec![]), + ); +} + +#[test] +fn creates_multiple_elections_below_maximum_when_required() { + const INIT_LAST_BLOCK_RECEIVED: ChainBlockNumber = 0; + const NUMBER_OF_ELECTIONS: ElectionCount = MAX_CONCURRENT_ELECTIONS - 1; + TestSetup::::default() + .with_unsynchronised_state(BlockWitnesserState { + last_block_received: INIT_LAST_BLOCK_RECEIVED, + ..BlockWitnesserState::default() + }) + .with_unsynchronised_settings(BlockWitnesserSettings { + max_concurrent_elections: MAX_CONCURRENT_ELECTIONS, + }) + .build() + .test_on_finalize( + // Process multiple elections, but still elss than the maximum concurrent + &(INIT_LAST_BLOCK_RECEIVED + (NUMBER_OF_ELECTIONS as u64)), + |pre_state| { + assert_eq!(pre_state.unsynchronised_state.open_elections, 0); + }, + vec![ + Check::::generate_election_properties_called_n_times(4), + Check::::number_of_open_elections_is(NUMBER_OF_ELECTIONS), + ], + ) + .expect_consensus_multi(vec![ + ( + generate_votes( + (0..20).into_iter().collect(), + Default::default(), + Default::default(), + vec![], + ), + Some(vec![]), + ), + ( + generate_votes( + (0..20).into_iter().collect(), + Default::default(), + Default::default(), + vec![1, 3, 4], + ), + Some(vec![1, 3, 4]), + ), + // no progress on external chain but on finalize called again + ]) + .test_on_finalize( + &(INIT_LAST_BLOCK_RECEIVED + (NUMBER_OF_ELECTIONS as u64)), + |pre_state| { + assert_eq!(pre_state.unsynchronised_state.open_elections, NUMBER_OF_ELECTIONS); + }, + vec![ + // Still no extra elections created. + Check::::generate_election_properties_called_n_times( + NUMBER_OF_ELECTIONS as u8, + ), + // we should have resolved two elections + Check::::number_of_open_elections_is(2), + ], + ); +} + +#[test] +fn creates_multiple_elections_limited_by_maximum() { + const INIT_LAST_BLOCK_RECEIVED: ChainBlockNumber = 0; + const NUMBER_OF_ELECTIONS_REQUIRED: ElectionCount = MAX_CONCURRENT_ELECTIONS * 2; + let consensus_resolutions: Vec<( + ConsensusVotes, + Option<::Consensus>, + )> = vec![ + create_votes_expectation(vec![]), + create_votes_expectation(vec![1, 3, 4]), + // no progress on external chain but on finalize called again + ]; + let number_of_resolved_elections = consensus_resolutions.len(); + TestSetup::::default() + .with_unsynchronised_state(BlockWitnesserState { + last_block_received: INIT_LAST_BLOCK_RECEIVED, + ..BlockWitnesserState::default() + }) + .with_unsynchronised_settings(BlockWitnesserSettings { + max_concurrent_elections: MAX_CONCURRENT_ELECTIONS, + }) + .build() + .test_on_finalize( + // Process multiple elections, but still elss than the maximum concurrent + &(INIT_LAST_BLOCK_RECEIVED + (NUMBER_OF_ELECTIONS_REQUIRED as u64)), + |pre_state| { + assert_eq!(pre_state.unsynchronised_state.open_elections, 0); + }, + vec![ + Check::::generate_election_properties_called_n_times( + MAX_CONCURRENT_ELECTIONS as u8, + ), + Check::::number_of_open_elections_is( + MAX_CONCURRENT_ELECTIONS, + ), + ], + ) + // Only resolve two of the elections. The last 3 are unresolved at this point. But + // we now have space to start new elections. + .expect_consensus_multi(consensus_resolutions) + .test_on_finalize( + &(INIT_LAST_BLOCK_RECEIVED + (NUMBER_OF_ELECTIONS_REQUIRED as u64)), + |pre_state| { + assert_eq!(pre_state.unsynchronised_state.open_elections, MAX_CONCURRENT_ELECTIONS); + }, + vec![ + // Still no extra elections created. + Check::::generate_election_properties_called_n_times( + MAX_CONCURRENT_ELECTIONS as u8 + number_of_resolved_elections as u8, + ), + // we should have resolved two elections + Check::::number_of_open_elections_is( + MAX_CONCURRENT_ELECTIONS, + ), + ], + ); +} + +#[test] +fn reorg_removes_state_and_continues() { + const INIT_LAST_BLOCK_RECEIVED: ChainBlockNumber = 10; + const NEXT_BLOCK_NUMBER: ChainBlockNumber = INIT_LAST_BLOCK_RECEIVED + 10; + const REORG_LENGTH: ChainBlockNumber = 5; + + let all_votes = (0..MAX_CONCURRENT_ELECTIONS) + .into_iter() + .map(|_| create_votes_expectation(vec![5, 6, 7])) + .collect::>(); + TestSetup::::default() + .with_unsynchronised_state(BlockWitnesserState { + last_block_received: INIT_LAST_BLOCK_RECEIVED, + ..BlockWitnesserState::default() + }) + .with_unsynchronised_settings(BlockWitnesserSettings { + max_concurrent_elections: MAX_CONCURRENT_ELECTIONS, + }) + .build() + .test_on_finalize( + &NEXT_BLOCK_NUMBER, + |_| {}, + vec![ + Check::::generate_election_properties_called_n_times( + MAX_CONCURRENT_ELECTIONS as u8, + ), + Check::::number_of_open_elections_is( + MAX_CONCURRENT_ELECTIONS, + ), + // No reorg, so we try processing any unprocessed state (there would be none at + // this point though, since no elections have resolved). + Check::::process_block_data_called_n_times(1), + ], + ) + .expect_consensus_multi(all_votes) + .test_on_finalize( + &(NEXT_BLOCK_NUMBER - REORG_LENGTH), + |_| {}, + // We remove the actives ones and open one for the first block that we detected a + // reorg for. + vec![ + Check::::generate_election_properties_called_n_times( + MAX_CONCURRENT_ELECTIONS as u8 + 1, + ), + Check::::number_of_open_elections_is(1), + // There was a reorg, so there's definitely nothing to process since we're deleting + // all the data and just starting a new election, no extra calls here. + Check::::process_block_data_called_n_times(1), + ], + ); +} + +#[test] +fn partially_processed_block_data_processed_next_on_finalize() { + let first_block_consensus: BlockData = vec![5, 6, 7]; + + let first_block_data_after_processing: Vec<_> = + first_block_consensus.clone().into_iter().take(2).collect(); + + const INIT_LAST_BLOCK_RECEIVED: ChainBlockNumber = 0; + TestSetup::::default() + .with_unsynchronised_state(BlockWitnesserState { + last_block_received: INIT_LAST_BLOCK_RECEIVED, + ..BlockWitnesserState::default() + }) + .with_unsynchronised_settings(BlockWitnesserSettings { + max_concurrent_elections: MAX_CONCURRENT_ELECTIONS, + }) + .build() + .test_on_finalize( + &(INIT_LAST_BLOCK_RECEIVED + 1), + |_| {}, + vec![ + Check::::generate_election_properties_called_n_times(1), + Check::::number_of_open_elections_is(1), + Check::::process_block_data_called_n_times(1), + // We haven't come to consensus on any elections, so there's no unprocessed data. + Check::::process_block_data_called_last_with(vec![]), + ], + ) + .expect_consensus_multi(vec![create_votes_expectation(first_block_consensus.clone())]) + .then(|| { + // We process one of the items, so we return only 2 of 3. + MockBlockProcessor::set_block_data_to_return(vec![( + INIT_LAST_BLOCK_RECEIVED + 1, + Some(first_block_data_after_processing.clone()), + )]); + }) + .test_on_finalize( + &(INIT_LAST_BLOCK_RECEIVED + 2), + |_| {}, + vec![ + Check::::generate_election_properties_called_n_times(2), + // One opened, one closed. + Check::::number_of_open_elections_is(1), + // We call it again. + Check::::process_block_data_called_n_times(2), + // We have the election data for the election we emitted before now. We try to + // process it. + Check::::process_block_data_called_last_with(vec![( + INIT_LAST_BLOCK_RECEIVED + 1, + first_block_consensus, + )]), + ], + ) + // No progress on external chain, so state should be the same as above, except that we + // processed one of the items last time. + .test_on_finalize( + &(INIT_LAST_BLOCK_RECEIVED + 2), + |_| {}, + vec![ + Check::::generate_election_properties_called_n_times(2), + Check::::number_of_open_elections_is(1), + // We call it again. + Check::::process_block_data_called_n_times(3), + Check::::process_block_data_called_last_with(vec![( + INIT_LAST_BLOCK_RECEIVED + 1, + first_block_data_after_processing, + )]), + ], + ); +} + +#[test] +fn elections_resolved_out_of_order_has_no_impact() { + const INIT_LAST_BLOCK_RECEIVED: ChainBlockNumber = 0; + const FIRST_ELECTION_BLOCK_CREATED: ChainBlockNumber = INIT_LAST_BLOCK_RECEIVED + 1; + const SECOND_ELECTION_BLOCK_CREATED: ChainBlockNumber = FIRST_ELECTION_BLOCK_CREATED + 1; + const NUMBER_OF_ELECTIONS: ElectionCount = 2; + TestSetup::::default() + .with_unsynchronised_state(BlockWitnesserState { + last_block_received: INIT_LAST_BLOCK_RECEIVED, + ..BlockWitnesserState::default() + }) + .with_unsynchronised_settings(BlockWitnesserSettings { + max_concurrent_elections: MAX_CONCURRENT_ELECTIONS, + }) + .build() + .test_on_finalize( + // Process multiple elections, but still elss than the maximum concurrent + &(INIT_LAST_BLOCK_RECEIVED + 2), + |pre_state| { + assert_eq!(pre_state.unsynchronised_state.open_elections, 0); + }, + vec![ + Check::::generate_election_properties_called_n_times( + NUMBER_OF_ELECTIONS as u8, + ), + Check::::number_of_open_elections_is(NUMBER_OF_ELECTIONS), + ], + ) + .expect_consensus_multi(vec![ + ( + // no consensus + generate_votes( + (0..20).into_iter().collect(), + (0..20).into_iter().collect(), + Default::default(), + vec![], + ), + None, + ), + ( + // consensus + generate_votes( + (0..40).into_iter().collect(), + Default::default(), + Default::default(), + vec![1, 3, 4], + ), + Some(vec![1, 3, 4]), + ), + // no progress on external chain but on finalize called again + ]) + // TODO: Check the new elections have kicked off correct + .test_on_finalize( + &(INIT_LAST_BLOCK_RECEIVED + (NUMBER_OF_ELECTIONS as u64) + 1), + |pre_state| { + assert_eq!(pre_state.unsynchronised_state.open_elections, NUMBER_OF_ELECTIONS); + }, + vec![ + // one extra election created + Check::::generate_election_properties_called_n_times( + (NUMBER_OF_ELECTIONS + 1) as u8, + ), + // we should have resolved one election, and started one election + Check::::number_of_open_elections_is(2), + // 0 indexed second election should have come to consensus, and its block data + // should be stored. + Check::::unsynchronised_state_map_contains( + SECOND_ELECTION_BLOCK_CREATED, + ), + ], + ) + // gain consensus on the first emitted election now + .expect_consensus_multi(vec![( + generate_votes( + (0..40).into_iter().collect(), + Default::default(), + Default::default(), + vec![9, 1, 2], + ), + Some(vec![9, 1, 2]), + )]) + .test_on_finalize( + &(INIT_LAST_BLOCK_RECEIVED + (NUMBER_OF_ELECTIONS as u64) + 2), + |pre_state| { + assert_eq!( + pre_state.unsynchronised_state.open_elections, 2, + "number of open elections should be 2" + ); + }, + vec![ + // one extra election created + Check::::generate_election_properties_called_n_times( + (NUMBER_OF_ELECTIONS + 2) as u8, + ), + // we should have resolved one elections, and started one election + Check::::number_of_open_elections_is(2), + // Now the first election we emitted is resolved, and its block data should be + // stored, and we should still have the second election block data. + Check::::unsynchronised_state_map_contains( + FIRST_ELECTION_BLOCK_CREATED, + ), + Check::::unsynchronised_state_map_contains( + SECOND_ELECTION_BLOCK_CREATED, + ), + ], + ) // gain consensus on the first emitted election now + .expect_consensus_multi(vec![ + ( + generate_votes( + (0..40).into_iter().collect(), + Default::default(), + Default::default(), + vec![81, 1, 93], + ), + Some(vec![81, 1, 93]), + ), + ( + generate_votes( + (0..40).into_iter().collect(), + Default::default(), + Default::default(), + vec![69, 69, 69], + ), + Some(vec![69, 69, 69]), + ), + ]) + // external chain doesn't move forward + .test_on_finalize( + &(INIT_LAST_BLOCK_RECEIVED + (NUMBER_OF_ELECTIONS as u64) + 2), + |pre_state| { + assert_eq!( + pre_state.unsynchronised_state.open_elections, 2, + "number of open elections should be 2" + ); + }, + vec![ + // one extra election created + Check::::generate_election_properties_called_n_times( + (NUMBER_OF_ELECTIONS + 2) as u8, + ), + // all elections have resolved now + Check::::number_of_open_elections_is(0), + // Now the last two elections are resolved in order + Check::::unsynchronised_state_map_contains( + FIRST_ELECTION_BLOCK_CREATED, + ), + Check::::unsynchronised_state_map_contains( + SECOND_ELECTION_BLOCK_CREATED, + ), + Check::::unsynchronised_state_map_contains( + SECOND_ELECTION_BLOCK_CREATED + 1, + ), + Check::::unsynchronised_state_map_contains( + SECOND_ELECTION_BLOCK_CREATED + 2, + ), + ], + ); +} diff --git a/state-chain/pallets/cf-elections/src/lib.rs b/state-chain/pallets/cf-elections/src/lib.rs index d8f8f9864a..538349413e 100644 --- a/state-chain/pallets/cf-elections/src/lib.rs +++ b/state-chain/pallets/cf-elections/src/lib.rs @@ -102,6 +102,7 @@ //! or reconstructed from the others is also configured via that trait. #![feature(try_find)] +#![feature(step_trait)] #![cfg_attr(test, feature(closure_track_caller))] #![cfg_attr(not(feature = "std"), no_std)] #![doc = include_str!("../README.md")] diff --git a/state-chain/pallets/cf-elections/src/vote_storage/composite.rs b/state-chain/pallets/cf-elections/src/vote_storage/composite.rs index 3de8dc9e97..0da6427523 100644 --- a/state-chain/pallets/cf-elections/src/vote_storage/composite.rs +++ b/state-chain/pallets/cf-elections/src/vote_storage/composite.rs @@ -86,6 +86,8 @@ macro_rules! generate_vote_storage_tuple_impls { }) }, )* + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()), } } @@ -109,6 +111,8 @@ macro_rules! generate_vote_storage_tuple_impls { match get_shared_data(shared_data_hash)? { Some(CompositeSharedData::$t(shared_data)) => Ok(Some(shared_data)), None => Ok(None), + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()) } }, @@ -135,6 +139,8 @@ macro_rules! generate_vote_storage_tuple_impls { match get_shared_data(shared_data_hash)? { Some(CompositeSharedData::$t(shared_data)) => Ok(Some(shared_data)), None => Ok(None), + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()), } }, @@ -161,6 +167,8 @@ macro_rules! generate_vote_storage_tuple_impls { match get_shared_data(shared_data_hash)? { Some(CompositeSharedData::$t(shared_data)) => Ok(Some(shared_data)), None => Ok(None), + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()), } }, @@ -179,6 +187,8 @@ macro_rules! generate_vote_storage_tuple_impls { individual_component: None, bitmap_component: None, } => Ok(None), + // For when we have a composite of 1 + #[allow(unreachable_patterns)] _ => Err(CorruptStorageError::new()), } } @@ -274,4 +284,5 @@ macro_rules! generate_vote_storage_tuple_impls { } } +generate_vote_storage_tuple_impls!(tuple_1_impls: (A)); generate_vote_storage_tuple_impls!(tuple_7_impls: (A, B, C, D, EE, FF, GG)); diff --git a/state-chain/pallets/cf-ingress-egress/src/lib.rs b/state-chain/pallets/cf-ingress-egress/src/lib.rs index 92cb0cc458..1795167760 100644 --- a/state-chain/pallets/cf-ingress-egress/src/lib.rs +++ b/state-chain/pallets/cf-ingress-egress/src/lib.rs @@ -275,7 +275,9 @@ pub mod pallet { pub type TransactionInIdFor = <<>::TargetChain as Chain>::ChainCrypto as ChainCrypto>::TransactionInId; - #[derive(Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, TypeInfo, MaxEncodedLen)] + #[derive( + Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, TypeInfo, MaxEncodedLen, Ord, PartialOrd, + )] pub struct DepositWitness { pub deposit_address: C::ChainAccount, pub asset: C::ChainAsset, @@ -1630,7 +1632,7 @@ impl, I: 'static> Pallet { } } - fn process_deposit_witnesses( + pub fn process_deposit_witnesses( deposit_witnesses: Vec>, block_height: TargetChainBlockNumber, ) -> DispatchResult { diff --git a/state-chain/runtime/src/chainflip.rs b/state-chain/runtime/src/chainflip.rs index 35e20db33c..69d434d7d4 100644 --- a/state-chain/runtime/src/chainflip.rs +++ b/state-chain/runtime/src/chainflip.rs @@ -10,6 +10,9 @@ mod missed_authorship_slots; mod offences; pub mod pending_rotation_broadcasts; mod signer_nomination; + +// Election pallet implementations +pub mod bitcoin_elections; pub mod solana_elections; use crate::{ diff --git a/state-chain/runtime/src/chainflip/bitcoin_elections.rs b/state-chain/runtime/src/chainflip/bitcoin_elections.rs new file mode 100644 index 0000000000..329289950e --- /dev/null +++ b/state-chain/runtime/src/chainflip/bitcoin_elections.rs @@ -0,0 +1,128 @@ +use crate::Runtime; +use cf_chains::{btc, Bitcoin}; +use cf_traits::Chainflip; +use pallet_cf_ingress_egress::DepositChannelDetails; + +use cf_chains::instances::BitcoinInstance; + +use codec::{Decode, Encode, MaxEncodedLen}; +use pallet_cf_elections::{ + electoral_system::ElectoralSystem, + electoral_systems::{ + block_witnesser::{BlockElectionPropertiesGenerator, BlockWitnesser, ProcessBlockData}, + composite::{tuple_1_impls::Hooks, CompositeRunner}, + }, + CorruptStorageError, ElectionIdentifier, RunnerStorageAccess, +}; + +use pallet_cf_ingress_egress::DepositWitness; +use scale_info::TypeInfo; + +use sp_std::{vec, vec::Vec}; + +pub type BitcoinElectoralSystemRunner = CompositeRunner< + (BitcoinDepositChannelWitnessing,), + ::ValidatorId, + RunnerStorageAccess, + BitcoinElectionHooks, +>; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] +pub struct OpenChannelDetails { + pub open_block: ChainBlockNumber, + pub close_block: ChainBlockNumber, +} + +type BitcoinDepositChannelWitnessing = BlockWitnesser< + Bitcoin, + Vec>, + Vec>, + ::ValidatorId, + BitcoinDepositChannelWitessingProcessor, + BitcoinDepositChannelWitnessingGenerator, +>; + +pub struct BitcoinDepositChannelWitnessingGenerator; + +impl + BlockElectionPropertiesGenerator< + btc::BlockNumber, + Vec>, + > for BitcoinDepositChannelWitnessingGenerator +{ + fn generate_election_properties( + block_witness_root: btc::BlockNumber, + ) -> Vec> { + // Get addresses for this block, and any that have expired after this block. + + // then generate election with addresses for this block + // then trigger exipry for any addresses that have expired after this block + + // The fetching of valid addresses can be done inside the ingress-egress pallet where they + // are stored. maybe the expiry too. + // let deposit_channels_for_block = BTreeMap::new(); + log::info!("Generating election for block number: {}", block_witness_root); + + vec![] + } +} + +pub struct BitcoinDepositChannelWitessingProcessor; + +impl ProcessBlockData>> + for BitcoinDepositChannelWitessingProcessor +{ + fn process_block_data< + It: IntoIterator>)>, + >( + _current_block: btc::BlockNumber, + witnesses: It, + ) -> impl Iterator>>)> { + witnesses.into_iter().map(|(block_number, deposits)| { + log::info!( + "Processing block number: {}, got {} deposits", + block_number, + deposits.len() + ); + // Check if the block number is the current block number + // If it is, then we can process the deposits + // If it is not, then we can store the deposits for later processing + (block_number, Some(deposits)) + }) + + // when is it safe to expire a channel? when the block number is beyond their expiry? but + // what if we're at block 40 it expires at block 39 and then we reorg back to block 36. It + // will already be expired. + + // Channel expiry here should be viewed as, from what block should it be included in an + // election. The recycle height is the moment from which if we were to have reached it, a + // reorg back to before the expiry would cause a bug - let's assert on this assumption + // somewhere. + } +} + +pub struct BitcoinElectionHooks; + +impl Hooks for BitcoinElectionHooks { + fn on_finalize( + (deposit_channel_witnessing_identifiers,): ( + Vec< + ElectionIdentifier< + ::ElectionIdentifierExtra, + >, + >, + ), + ) -> Result<(), CorruptStorageError> { + log::info!( + "BitcoinElectionHooks::on_finalize: {:?}", + deposit_channel_witnessing_identifiers + ); + todo!() + } +} + +// Channel expiry: +// We need to process elections in order, even after a safe mode pause. This is to ensure channel +// expiry is done correctly. During safe mode pause, we could get into a situation where the current +// state suggests that a channel is expired, but at the time of a previous block which we have not +// yet processed, the channel was not expired. diff --git a/state-chain/runtime/src/lib.rs b/state-chain/runtime/src/lib.rs index 49da0c11b4..f3184c1f94 100644 --- a/state-chain/runtime/src/lib.rs +++ b/state-chain/runtime/src/lib.rs @@ -1023,6 +1023,12 @@ impl pallet_cf_elections::Config for Runtime { type WeightInfo = pallet_cf_elections::weights::PalletWeight; } +impl pallet_cf_elections::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ElectoralSystemRunner = chainflip::bitcoin_elections::BitcoinElectoralSystemRunner; + type WeightInfo = pallet_cf_elections::weights::PalletWeight; +} + #[frame_support::runtime] mod runtime { #[runtime::runtime] @@ -1138,6 +1144,9 @@ mod runtime { #[runtime::pallet_index(47)] pub type AssetBalances = pallet_cf_asset_balances; + + #[runtime::pallet_index(48)] + pub type BitcoinElections = pallet_cf_elections; } /// The address format for describing accounts.