From 178df8896c96407fa63394ff2aa8a311f7a6b04e Mon Sep 17 00:00:00 2001 From: Jamie Ford Date: Thu, 21 Sep 2023 16:41:55 +1000 Subject: [PATCH 01/15] CFE detect incorrect network (#4016) * feat: detect incorrect dot genesis hash * feat: btc network detection * refactor: use display for BitcoinNetwork name * refactor: Address PR comments --- engine/src/btc/retry_rpc.rs | 17 ++-- engine/src/btc/rpc.rs | 164 +++++++++++++++++++++++----------- engine/src/constants.rs | 4 + engine/src/dot/http_rpc.rs | 26 ++++-- engine/src/dot/retry_rpc.rs | 7 +- engine/src/dot/rpc.rs | 19 +++- engine/src/eth/rpc.rs | 35 ++++---- engine/src/main.rs | 55 ++++++++---- state-chain/chains/src/btc.rs | 40 +++++++++ 9 files changed, 263 insertions(+), 104 deletions(-) diff --git a/engine/src/btc/retry_rpc.rs b/engine/src/btc/retry_rpc.rs index 74925f6ed7..a756024c84 100644 --- a/engine/src/btc/retry_rpc.rs +++ b/engine/src/btc/retry_rpc.rs @@ -6,7 +6,7 @@ use crate::{ settings::{HttpBasicAuthEndpoint, NodeContainer}, witness::common::chain_source::{ChainClient, Header}, }; -use cf_chains::Bitcoin; +use cf_chains::{btc::BitcoinNetwork, Bitcoin}; use core::time::Duration; use anyhow::Result; @@ -24,19 +24,24 @@ const MAX_CONCURRENT_SUBMISSIONS: u32 = 100; const MAX_BROADCAST_RETRIES: Attempt = 5; impl BtcRetryRpcClient { - pub fn new( + pub async fn new( scope: &Scope<'_, anyhow::Error>, nodes: NodeContainer, + expected_btc_network: BitcoinNetwork, ) -> Result { - let primary = BtcRpcClient::new(nodes.primary)?; - let backup = nodes.backup.map(BtcRpcClient::new).transpose()?; + let rpc_client = BtcRpcClient::new(nodes.primary, expected_btc_network)?; + + let backup_rpc_client = nodes + .backup + .map(|backup_endpoint| BtcRpcClient::new(backup_endpoint, expected_btc_network)) + .transpose()?; Ok(Self { retry_client: RetrierClient::new( scope, "btc_rpc", - futures::future::ready(primary), - backup.map(futures::future::ready), + rpc_client, + backup_rpc_client, BITCOIN_RPC_TIMEOUT, MAX_CONCURRENT_SUBMISSIONS, ), diff --git a/engine/src/btc/rpc.rs b/engine/src/btc/rpc.rs index 199f89caae..38833d5458 100644 --- a/engine/src/btc/rpc.rs +++ b/engine/src/btc/rpc.rs @@ -1,3 +1,5 @@ +use cf_chains::btc::BitcoinNetwork; +use futures_core::Future; use thiserror::Error; use reqwest::Client; @@ -7,11 +9,12 @@ use serde; use serde_json::json; use bitcoin::{block::Version, Amount, Block, BlockHash, Txid}; -use utilities::redact_endpoint_secret::SecretUrl; +use tracing::error; +use utilities::make_periodic_tick; -use crate::settings::HttpBasicAuthEndpoint; +use crate::{constants::RPC_RETRY_CONNECTION_INTERVAL, settings::HttpBasicAuthEndpoint}; -use anyhow::{Context, Result}; +use anyhow::{anyhow, Context, Result}; // From jsonrpc crate #[derive(Clone, Debug, Deserialize, Serialize)] @@ -58,20 +61,40 @@ struct FeeRateResponse { #[derive(Clone)] pub struct BtcRpcClient { - // internally the Client is Arc'd + // Internally the Client is Arc'd client: Client, - url: SecretUrl, - user: String, - password: String, + endpoint: HttpBasicAuthEndpoint, } impl BtcRpcClient { - pub fn new(basic_auth_endpoint: HttpBasicAuthEndpoint) -> Result { - Ok(Self { - client: Client::builder().build()?, - url: basic_auth_endpoint.http_endpoint, - user: basic_auth_endpoint.basic_auth_user, - password: basic_auth_endpoint.basic_auth_password, + pub fn new( + endpoint: HttpBasicAuthEndpoint, + expected_btc_network: BitcoinNetwork, + ) -> Result> { + let client = Client::builder().build()?; + + Ok(async move { + let mut poll_interval = make_periodic_tick(RPC_RETRY_CONNECTION_INTERVAL, true); + loop { + poll_interval.tick().await; + match get_bitcoin_network(&client, &endpoint).await { + Ok(network) if network == expected_btc_network => break, + Ok(network) => { + error!( + "Connected to Bitcoin node but with incorrect network name `{network}`, expected `{expected_btc_network}` on endpoint {}. Please check your CFE + configuration file...", + endpoint.http_endpoint + ); + }, + Err(e) => error!( + "Failure connecting to Bitcoin node at {} with error: {e}. Please check your CFE + configuration file. Retrying in {:?}...", + endpoint.http_endpoint, + poll_interval.period() + ), + } + } + Self { client, endpoint } }) } @@ -79,35 +102,60 @@ impl BtcRpcClient { &self, method: &str, params: Vec, - ) -> Result { - let request_body = json!({ - "jsonrpc": "1.0", - "id":"1", - "method": method, - "params": params - }); - - let response = &self - .client - .post(self.url.as_ref()) - .basic_auth(&self.user, Some(&self.password)) - .json(&request_body) - .send() - .await - .map_err(Error::Transport)? - .json::() - .await - .map_err(Error::Transport)?; - - let error = &response["error"]; - if !error.is_null() { - Err(Error::Rpc(serde_json::from_value(error.clone()).map_err(Error::Json)?)) - } else { - Ok(T::deserialize(&response["result"]).map_err(Error::Json))? - } + ) -> Result { + T::deserialize(call_rpc_raw(&self.client, &self.endpoint, method, params).await?) + .map_err(anyhow::Error::msg) + } +} + +async fn call_rpc_raw( + client: &Client, + endpoint: &HttpBasicAuthEndpoint, + method: &str, + params: Vec, +) -> Result { + let request_body = json!({ + "jsonrpc": "1.0", + "id":"1", + "method": method, + "params": params + }); + + let response = client + .post(endpoint.http_endpoint.as_ref()) + .basic_auth(&endpoint.basic_auth_user, Some(&endpoint.basic_auth_password)) + .json(&request_body) + .send() + .await + .map_err(Error::Transport)? + .json::() + .await + .map_err(Error::Transport)?; + + let error = &response["error"]; + if !error.is_null() { + Err(Error::Rpc(serde_json::from_value(error.clone()).map_err(Error::Json)?)) + } else { + Ok(response["result"].to_owned()) } } +/// Get the BitcoinNetwork by calling the `getblockchaininfo` RPC. +async fn get_bitcoin_network( + client: &Client, + endpoint: &HttpBasicAuthEndpoint, +) -> anyhow::Result { + // Using `call_rpc_raw` so we don't have to deserialize the whole response. + let json_value = call_rpc_raw(client, endpoint, "getblockchaininfo", vec![]) + .await + .map_err(anyhow::Error::msg)?; + let network_name = json_value["chain"] + .as_str() + .ok_or(anyhow!("Missing or empty `chain` field in getblockchaininfo response"))?; + + BitcoinNetwork::try_from(network_name) +} + #[derive(Clone, PartialEq, Debug, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] pub struct BlockHeader { @@ -218,41 +266,49 @@ impl BtcRpcApi for BtcRpcClient { #[cfg(test)] mod tests { + use utilities::testing::logging::init_test_logger; + use super::*; #[tokio::test] #[ignore = "requires local node, useful for manual testing"] async fn test_btc_async() { - let client = BtcRpcClient::new(HttpBasicAuthEndpoint { - http_endpoint: "http://localhost:8332".into(), - basic_auth_user: "flip".to_string(), - basic_auth_password: "flip".to_string(), - }) - .unwrap(); + init_test_logger(); + + let client = BtcRpcClient::new( + HttpBasicAuthEndpoint { + http_endpoint: "http://localhost:8332".into(), + basic_auth_user: "flip".to_string(), + basic_auth_password: "flip".to_string(), + }, + BitcoinNetwork::Regtest, + ) + .unwrap() + .await; let block_hash_zero = client.block_hash(0).await.unwrap(); - println!("block_hash_zero: {:?}", block_hash_zero); + println!("block_hash_zero: {block_hash_zero:?}"); let block_zero = client.block(block_hash_zero).await.unwrap(); - println!("block_zero: {:?}", block_zero); + println!("block_zero: {block_zero:?}"); let next_block_fee_rate = client.next_block_fee_rate().await.unwrap(); - println!("next_block_fee_rate: {:?}", next_block_fee_rate); - - let average_block_fee_rate = client.average_block_fee_rate(block_hash_zero).await.unwrap(); - - println!("average_block_fee_rate: {}", average_block_fee_rate); + println!("next_block_fee_rate: {next_block_fee_rate:?}"); let best_block_hash = client.best_block_hash().await.unwrap(); - println!("best_block_hash: {:?}", best_block_hash); + println!("best_block_hash: {best_block_hash:?}"); let block_header = client.block_header(best_block_hash).await.unwrap(); - println!("block_header: {:?}", block_header); + println!("block_header: {block_header:?}"); + + let average_block_fee_rate = client.average_block_fee_rate(best_block_hash).await.unwrap(); + + println!("average_block_fee_rate: {average_block_fee_rate}"); // Generate new hex bytes using ./bouncer/commands/create_raw_btc_tx.ts; // let hex_str = diff --git a/engine/src/constants.rs b/engine/src/constants.rs index 04cd2652d9..1c8a1f3dbb 100644 --- a/engine/src/constants.rs +++ b/engine/src/constants.rs @@ -13,6 +13,10 @@ pub const SYNC_POLL_INTERVAL: Duration = Duration::from_secs(4); pub const DOT_AVERAGE_BLOCK_TIME: Duration = Duration::from_secs(6); +// ======= Rpc Clients ======= + +pub const RPC_RETRY_CONNECTION_INTERVAL: Duration = Duration::from_secs(10); + // ======= Settings environment variables ======= pub const ETH_HTTP_ENDPOINT: &str = "ETH__RPC__HTTP_ENDPOINT"; diff --git a/engine/src/dot/http_rpc.rs b/engine/src/dot/http_rpc.rs index 6dafa758dd..16a2e3ca00 100644 --- a/engine/src/dot/http_rpc.rs +++ b/engine/src/dot/http_rpc.rs @@ -23,7 +23,7 @@ use subxt::{ use anyhow::Result; use utilities::{make_periodic_tick, redact_endpoint_secret::SecretUrl}; -use crate::constants::DOT_AVERAGE_BLOCK_TIME; +use crate::constants::RPC_RETRY_CONNECTION_INTERVAL; use super::rpc::DotRpcApi; @@ -80,21 +80,33 @@ pub struct DotHttpRpcClient { } impl DotHttpRpcClient { - pub fn new(url: SecretUrl) -> Result> { + pub fn new( + url: SecretUrl, + expected_genesis_hash: PolkadotHash, + ) -> Result> { let polkadot_http_client = Arc::new(PolkadotHttpClient::new(&url)?); Ok(async move { // We don't want to return an error here. Returning an error means that we'll exit the // CFE. So on client creation we wait until we can be successfully connected to the // Polkadot node. So the other chains are unaffected - let mut poll_interval = make_periodic_tick(DOT_AVERAGE_BLOCK_TIME, true); + let mut poll_interval = make_periodic_tick(RPC_RETRY_CONNECTION_INTERVAL, true); let online_client = loop { poll_interval.tick().await; match OnlineClient::::from_rpc_client(polkadot_http_client.clone()) .await { - Ok(online_client) => break online_client, + Ok(online_client) => { + let genesis_hash = online_client.genesis_hash(); + if genesis_hash == expected_genesis_hash { + break online_client + } else { + tracing::error!( + "Connected to Polkadot node at {url} but the genesis hash {genesis_hash} does not match the expected genesis hash {expected_genesis_hash}. Please check your CFE configuration file." + ) + } + }, Err(e) => { tracing::error!( "Failed to connect to Polkadot node at {url} with error: {e}. Please check your CFE @@ -201,7 +213,11 @@ mod tests { #[ignore = "requires local node"] #[tokio::test] async fn test_http_rpc() { - let dot_http_rpc = DotHttpRpcClient::new("http://localhost:9945".into()).unwrap().await; + // This will no longer work because we need to know the genesis hash + let dot_http_rpc = + DotHttpRpcClient::new("http://localhost:9945".into(), PolkadotHash::default()) + .unwrap() + .await; let block_hash = dot_http_rpc.block_hash(1).await.unwrap(); println!("block_hash: {:?}", block_hash); } diff --git a/engine/src/dot/retry_rpc.rs b/engine/src/dot/retry_rpc.rs index 8becefa1e8..913c3acc89 100644 --- a/engine/src/dot/retry_rpc.rs +++ b/engine/src/dot/retry_rpc.rs @@ -44,11 +44,12 @@ impl DotRetryRpcClient { pub fn new( scope: &Scope<'_, anyhow::Error>, nodes: NodeContainer, + expected_genesis_hash: PolkadotHash, ) -> Result { let f_create_clients = |endpoints: WsHttpEndpoints| { Result::<_, anyhow::Error>::Ok(( - DotHttpRpcClient::new(endpoints.http_endpoint)?, - DotSubClient::new(endpoints.ws_endpoint), + DotHttpRpcClient::new(endpoints.http_endpoint, expected_genesis_hash)?, + DotSubClient::new(endpoints.ws_endpoint, expected_genesis_hash), )) }; @@ -306,6 +307,7 @@ mod tests { async fn my_test() { task_scope(|scope| { async move { + // This will no longer work because we need to know the genesis hash let dot_retry_rpc_client = DotRetryRpcClient::new( scope, NodeContainer { @@ -315,6 +317,7 @@ mod tests { }, backup: None, }, + PolkadotHash::default(), ) .unwrap(); diff --git a/engine/src/dot/rpc.rs b/engine/src/dot/rpc.rs index 1d55829889..81ce4f1541 100644 --- a/engine/src/dot/rpc.rs +++ b/engine/src/dot/rpc.rs @@ -14,7 +14,7 @@ use subxt::{ use tokio::sync::RwLock; use utilities::redact_endpoint_secret::SecretUrl; -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, bail, Result}; use super::http_rpc::DotHttpRpcClient; @@ -136,11 +136,12 @@ impl DotRpcApi for DotRpcClient { #[derive(Clone)] pub struct DotSubClient { pub ws_endpoint: SecretUrl, + expected_genesis_hash: PolkadotHash, } impl DotSubClient { - pub fn new(ws_endpoint: SecretUrl) -> Self { - Self { ws_endpoint } + pub fn new(ws_endpoint: SecretUrl, expected_genesis_hash: PolkadotHash) -> Self { + Self { ws_endpoint, expected_genesis_hash } } } @@ -150,6 +151,12 @@ impl DotSubscribeApi for DotSubClient { &self, ) -> Result> + Send>>> { let client = OnlineClient::::from_url(&self.ws_endpoint).await?; + + let genesis_hash = client.genesis_hash(); + if genesis_hash != self.expected_genesis_hash { + bail!("Expected genesis hash {} but got {genesis_hash}", self.expected_genesis_hash); + } + Ok(Box::pin( client .blocks() @@ -164,6 +171,12 @@ impl DotSubscribeApi for DotSubClient { &self, ) -> Result> + Send>>> { let client = OnlineClient::::from_url(&self.ws_endpoint).await?; + + let genesis_hash = client.genesis_hash(); + if genesis_hash != self.expected_genesis_hash { + bail!("Expected genesis hash {} but got {genesis_hash}", self.expected_genesis_hash); + } + Ok(Box::pin( client .blocks() diff --git a/engine/src/eth/rpc.rs b/engine/src/eth/rpc.rs index 909e77b926..3329aca7c1 100644 --- a/engine/src/eth/rpc.rs +++ b/engine/src/eth/rpc.rs @@ -1,10 +1,11 @@ pub mod address_checker; +use anyhow::bail; use ethers::{prelude::*, signers::Signer, types::transaction::eip2718::TypedTransaction}; use futures_core::Future; use utilities::redact_endpoint_secret::SecretUrl; -use crate::constants::{ETH_AVERAGE_BLOCK_TIME, SYNC_POLL_INTERVAL}; +use crate::constants::{RPC_RETRY_CONNECTION_INTERVAL, SYNC_POLL_INTERVAL}; use anyhow::{anyhow, Context, Result}; use std::{path::PathBuf, str::FromStr, sync::Arc, time::Instant}; use tokio::sync::Mutex; @@ -43,20 +44,21 @@ impl EthRpcClient { // We don't want to return an error here. Returning an error means that we'll exit the // CFE. So on client creation we wait until we can be successfully connected to the ETH // node. So the other chains are unaffected - let mut poll_interval = make_periodic_tick(ETH_AVERAGE_BLOCK_TIME, true); + let mut poll_interval = make_periodic_tick(RPC_RETRY_CONNECTION_INTERVAL, true); loop { poll_interval.tick().await; match client.chain_id().await { Ok(chain_id) if chain_id == expected_chain_id.into() => break client, Ok(chain_id) => { - tracing::warn!( - "Connected to Ethereum node but with chain_id {chain_id}, expected {expected_chain_id}. Please check your CFE + tracing::error!( + "Connected to Ethereum node but with incorrect chain_id {chain_id}, expected {expected_chain_id} from {http_endpoint}. Please check your CFE configuration file...", ); }, Err(e) => tracing::error!( "Cannot connect to an Ethereum node at {http_endpoint} with error: {e}. Please check your CFE - configuration file. Retrying...", + configuration file. Retrying in {:?}...", + poll_interval.period() ), } } @@ -227,20 +229,17 @@ impl ReconnectSubscribeApi for ReconnectSubscriptionClient { let client_chain_id = web3.eth().chain_id().await.context("Failed to fetch chain id.")?; if self.chain_id != client_chain_id { - Err(anyhow!( - "Expected chain id {}, eth ws client returned {client_chain_id}.", - self.chain_id - )) - } else { - Ok(ConscientiousEthWebsocketBlockHeaderStream { - stream: Some( - web3.eth_subscribe() - .subscribe_new_heads() - .await - .context("Failed to subscribe to new heads with WS Client")?, - ), - }) + bail!("Expected chain id {}, eth ws client returned {client_chain_id}.", self.chain_id) } + + Ok(ConscientiousEthWebsocketBlockHeaderStream { + stream: Some( + web3.eth_subscribe() + .subscribe_new_heads() + .await + .context("Failed to subscribe to new heads with WS Client")?, + ), + }) } } diff --git a/engine/src/main.rs b/engine/src/main.rs index 3c7661b1a0..0c3cef0c47 100644 --- a/engine/src/main.rs +++ b/engine/src/main.rs @@ -1,4 +1,5 @@ use anyhow::Context; +use cf_chains::dot::PolkadotHash; use cf_primitives::{AccountRole, SemVer}; use chainflip_engine::{ btc::retry_rpc::BtcRetryRpcClient, @@ -255,22 +256,44 @@ async fn start( scope.spawn(btc_multisig_client_backend_future); // Create all the clients - let expected_chain_id = web3::types::U256::from( - state_chain_client - .storage_value::>( - state_chain_client.latest_finalized_hash(), - ) - .await - .expect(STATE_CHAIN_CONNECTION), - ); - let eth_client = EthersRetryRpcClient::new( - scope, - settings.eth.private_key_file, - settings.eth.nodes, - expected_chain_id, - )?; - let btc_client = BtcRetryRpcClient::new(scope, settings.btc.nodes)?; - let dot_client = DotRetryRpcClient::new(scope, settings.dot.nodes)?; + let eth_client = { + let expected_eth_chain_id = web3::types::U256::from( + state_chain_client + .storage_value::>( + state_chain_client.latest_finalized_hash(), + ) + .await + .expect(STATE_CHAIN_CONNECTION), + ); + EthersRetryRpcClient::new( + scope, + settings.eth.private_key_file, + settings.eth.nodes, + expected_eth_chain_id, + )? + }; + let btc_client = { + let expected_btc_network = cf_chains::btc::BitcoinNetwork::from( + state_chain_client + .storage_value::>( + state_chain_client.latest_finalized_hash(), + ) + .await + .expect(STATE_CHAIN_CONNECTION), + ); + BtcRetryRpcClient::new(scope, settings.btc.nodes, expected_btc_network).await? + }; + let dot_client = { + let expected_dot_genesis_hash = PolkadotHash::from( + state_chain_client + .storage_value::>( + state_chain_client.latest_finalized_hash(), + ) + .await + .expect(STATE_CHAIN_CONNECTION), + ); + DotRetryRpcClient::new(scope, settings.dot.nodes, expected_dot_genesis_hash)? + }; witness::start::start( scope, diff --git a/state-chain/chains/src/btc.rs b/state-chain/chains/src/btc.rs index e5dd64e454..d03039b053 100644 --- a/state-chain/chains/src/btc.rs +++ b/state-chain/chains/src/btc.rs @@ -428,6 +428,30 @@ impl BitcoinNetwork { } } +impl core::fmt::Display for BitcoinNetwork { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + BitcoinNetwork::Mainnet => write!(f, "main"), + BitcoinNetwork::Testnet => write!(f, "test"), + BitcoinNetwork::Regtest => write!(f, "regtest"), + } + } +} + +#[cfg(feature = "std")] +impl TryFrom<&str> for BitcoinNetwork { + type Error = anyhow::Error; + + fn try_from(s: &str) -> Result { + match s { + "main" => Ok(BitcoinNetwork::Mainnet), + "test" => Ok(BitcoinNetwork::Testnet), + "regtest" => Ok(BitcoinNetwork::Regtest), + unknown => Err(anyhow::anyhow!("Unknown Bitcoin network: {unknown}")), + } + } +} + const SEGWIT_VERSION_ZERO: u8 = 0; const SEGWIT_VERSION_TAPROOT: u8 = 1; const SEGWIT_VERSION_MAX: u8 = 16; @@ -1264,4 +1288,20 @@ mod test { assert_eq!(to_varint(x.0), x.1); } } + + #[test] + fn test_btc_network_names() { + assert_eq!( + BitcoinNetwork::try_from(BitcoinNetwork::Mainnet.to_string().as_str()).unwrap(), + BitcoinNetwork::Mainnet + ); + assert_eq!( + BitcoinNetwork::try_from(BitcoinNetwork::Testnet.to_string().as_str()).unwrap(), + BitcoinNetwork::Testnet + ); + assert_eq!( + BitcoinNetwork::try_from(BitcoinNetwork::Regtest.to_string().as_str()).unwrap(), + BitcoinNetwork::Regtest + ); + } } From c9bcd5be93755789a2fd3b79ac7ba5ab0d8b2859 Mon Sep 17 00:00:00 2001 From: Ramiz Siddiqui <97471826+ramizhasan111@users.noreply.github.com> Date: Thu, 21 Sep 2023 17:50:09 +0500 Subject: [PATCH 02/15] fix: use saturating sub while calculating change amount (#4026) Co-authored-by: Daniel Co-authored-by: dandanlen <3168260+dandanlen@users.noreply.github.com> --- state-chain/pallets/cf-environment/src/lib.rs | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/state-chain/pallets/cf-environment/src/lib.rs b/state-chain/pallets/cf-environment/src/lib.rs index 1474ea79a3..1e2f33c386 100644 --- a/state-chain/pallets/cf-environment/src/lib.rs +++ b/state-chain/pallets/cf-environment/src/lib.rs @@ -426,14 +426,19 @@ impl Pallet { match utxo_selection_type { UtxoSelectionType::SelectAllForRotation => { let available_utxos = BitcoinAvailableUtxos::::take(); - (!available_utxos.is_empty()).then_some(available_utxos).map(|available_utxos| { - ( - available_utxos.clone(), - available_utxos.iter().map(|Utxo { amount, .. }| *amount).sum::() - - (available_utxos.len() as u64) * fee_per_input_utxo - - fee_per_output_utxo - min_fee_required_per_tx, - ) - }) + (!available_utxos.is_empty()).then_some(available_utxos).and_then( + |available_utxos| { + available_utxos + .iter() + .map(|Utxo { amount, .. }| *amount) + .sum::() + .checked_sub( + ((available_utxos.len() as u64) * fee_per_input_utxo) + + fee_per_output_utxo + min_fee_required_per_tx, + ) + .map(|change_amount| (available_utxos, change_amount)) + }, + ) }, UtxoSelectionType::Some { output_amount, number_of_outputs } => BitcoinAvailableUtxos::::try_mutate(|available_utxos| { From 56083b85df98a7802c1276644a75263b50ea5408 Mon Sep 17 00:00:00 2001 From: Roy Yang Date: Fri, 22 Sep 2023 01:26:47 +1200 Subject: [PATCH 03/15] feat: Witnesser dispatch call filter (#4001) Co-authored-by: Daniel Co-authored-by: dandanlen <3168260+dandanlen@users.noreply.github.com> --- Cargo.lock | 1 + .../cf-integration-tests/src/authorities.rs | 2 +- state-chain/pallets/cf-witnesser/src/lib.rs | 106 ++++++++++------ state-chain/pallets/cf-witnesser/src/mock.rs | 24 +++- state-chain/pallets/cf-witnesser/src/tests.rs | 35 ++++++ state-chain/runtime/Cargo.toml | 2 + state-chain/runtime/src/chainflip.rs | 23 +--- state-chain/runtime/src/lib.rs | 40 +++--- state-chain/runtime/src/safe_mode.rs | 117 ++++++++++++++++++ state-chain/traits/src/lib.rs | 10 ++ 10 files changed, 278 insertions(+), 82 deletions(-) create mode 100644 state-chain/runtime/src/safe_mode.rs diff --git a/Cargo.lock b/Cargo.lock index c8ce548cf3..13d0a2ebe8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11426,6 +11426,7 @@ dependencies = [ "cf-amm", "cf-chains", "cf-primitives", + "cf-runtime-utilities", "cf-session-benchmarking", "cf-test-utilities", "cf-traits", diff --git a/state-chain/cf-integration-tests/src/authorities.rs b/state-chain/cf-integration-tests/src/authorities.rs index 940de5d7d9..4ef96a1c82 100644 --- a/state-chain/cf-integration-tests/src/authorities.rs +++ b/state-chain/cf-integration-tests/src/authorities.rs @@ -12,7 +12,7 @@ use cf_traits::{AsyncResult, EpochInfo, SafeMode, VaultRotator, VaultStatus}; use pallet_cf_environment::SafeModeUpdate; use pallet_cf_validator::{CurrentRotationPhase, RotationPhase}; use state_chain_runtime::{ - chainflip::RuntimeSafeMode, BitcoinVault, Environment, EthereumInstance, EthereumVault, Flip, + safe_mode::RuntimeSafeMode, BitcoinVault, Environment, EthereumInstance, EthereumVault, Flip, PolkadotInstance, PolkadotVault, Runtime, RuntimeOrigin, Validator, }; diff --git a/state-chain/pallets/cf-witnesser/src/lib.rs b/state-chain/pallets/cf-witnesser/src/lib.rs index c2b9c9d7d7..ae19005b03 100644 --- a/state-chain/pallets/cf-witnesser/src/lib.rs +++ b/state-chain/pallets/cf-witnesser/src/lib.rs @@ -1,4 +1,5 @@ #![cfg_attr(not(feature = "std"), no_std)] +#![feature(extract_if)] #![doc = include_str!("../README.md")] #![doc = include_str!("../../cf-doc-head.md")] @@ -14,19 +15,49 @@ mod tests; use bitvec::prelude::*; use cf_primitives::EpochIndex; -use cf_traits::{impl_pallet_safe_mode, AccountRoleRegistry, Chainflip, EpochInfo}; +use cf_traits::{AccountRoleRegistry, CallDispatchFilter, Chainflip, EpochInfo, SafeMode}; use cf_utilities::success_threshold_from_share_count; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{DispatchResultWithPostInfo, GetDispatchInfo, UnfilteredDispatchable}, ensure, pallet_prelude::Member, storage::with_storage_layer, traits::{EnsureOrigin, Get}, - Hashable, + Hashable, RuntimeDebug, }; -use sp_std::{collections::vec_deque::VecDeque, prelude::*}; +use scale_info::TypeInfo; +use sp_std::prelude::*; + +#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum PalletSafeMode { + CodeGreen, + CodeRed, + CodeAmber(CallPermission), +} -impl_pallet_safe_mode!(PalletSafeMode; witness_calls_enabled); +impl> CallDispatchFilter + for PalletSafeMode +{ + fn should_dispatch(&self, call: &C) -> bool { + match self { + Self::CodeGreen => true, + Self::CodeRed => false, + Self::CodeAmber(permissions) => permissions.should_dispatch(call), + } + } +} + +impl Default for PalletSafeMode { + fn default() -> Self { + as SafeMode>::CODE_GREEN + } +} + +impl SafeMode for PalletSafeMode { + const CODE_RED: Self = PalletSafeMode::CodeRed; + const CODE_GREEN: Self = PalletSafeMode::CodeGreen; +} pub trait WitnessDataExtraction { /// Extracts some data from a call and encodes it so it can be stored for later. @@ -63,7 +94,10 @@ pub mod pallet { + WitnessDataExtraction; /// Safe Mode access. - type SafeMode: Get; + type SafeMode: Get>; + + /// Filter for dispatching witnessed calls. + type CallDispatchPermission: Parameter + CallDispatchFilter<::RuntimeCall>; /// Benchmark stuff type WeightInfo: WeightInfo; @@ -112,44 +146,36 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - /// Clear stale data from expired epochs fn on_idle(_block_number: BlockNumberFor, remaining_weight: Weight) -> Weight { let mut used_weight = Weight::zero(); - if T::SafeMode::get().witness_calls_enabled { - let _ = WitnessedCallsScheduledForDispatch::::try_mutate( - |witnessed_calls_storage| { - used_weight.saturating_accrue(T::DbWeight::get().reads(1)); - if !witnessed_calls_storage.is_empty() { - let mut witnessed_calls = - VecDeque::from(sp_std::mem::take(witnessed_calls_storage)); - while let Some((_, call, _)) = witnessed_calls.front() { - let next_weight = - used_weight.saturating_add(call.get_dispatch_info().weight); - if remaining_weight.all_gte(next_weight) { - used_weight = next_weight; - let (witnessed_at_epoch, call, call_hash) = - witnessed_calls.pop_front().unwrap(); - Self::dispatch_call( - witnessed_at_epoch, - T::EpochInfo::epoch_index(), - call, - call_hash, - ); - } else { - break - } + + let safe_mode = T::SafeMode::get(); + if safe_mode != SafeMode::CODE_RED { + WitnessedCallsScheduledForDispatch::::mutate(|witnessed_calls_storage| { + witnessed_calls_storage + .extract_if(|(_, call, _)| { + let next_weight = + used_weight.saturating_add(call.get_dispatch_info().weight); + if remaining_weight.all_gte(next_weight) && + safe_mode.should_dispatch(call) + { + used_weight = next_weight; + true + } else { + false } - let _empty = sp_std::mem::replace( - witnessed_calls_storage, - witnessed_calls.make_contiguous().to_vec(), + }) + .collect::>() + .into_iter() + .for_each(|(witnessed_at_epoch, call, call_hash)| { + Self::dispatch_call( + witnessed_at_epoch, + T::EpochInfo::epoch_index(), + call, + call_hash, ); - used_weight.saturating_accrue(T::DbWeight::get().writes(1)); - Ok(()) - } else { - Err("no action needed when the scheduled witness calls list is empty") - } - }, - ); + }); + }); } let mut epochs_to_cull = EpochsToCull::::get(); @@ -352,7 +378,7 @@ pub mod pallet { if let Some(mut extra_data) = ExtraCallData::::get(epoch_index, call_hash) { call.combine_and_inject(&mut extra_data) } - if T::SafeMode::get().witness_calls_enabled { + if T::SafeMode::get().should_dispatch(&call) { Self::dispatch_call(epoch_index, current_epoch, *call, call_hash); } else { WitnessedCallsScheduledForDispatch::::append(( diff --git a/state-chain/pallets/cf-witnesser/src/mock.rs b/state-chain/pallets/cf-witnesser/src/mock.rs index 7542e8c323..f2710e9d9f 100644 --- a/state-chain/pallets/cf-witnesser/src/mock.rs +++ b/state-chain/pallets/cf-witnesser/src/mock.rs @@ -1,9 +1,13 @@ #![cfg(test)] use crate::{self as pallet_cf_witness, WitnessDataExtraction}; -use cf_traits::{impl_mock_chainflip, impl_mock_runtime_safe_mode, AccountRoleRegistry}; -use frame_support::parameter_types; +use cf_traits::{ + impl_mock_chainflip, impl_mock_runtime_safe_mode, AccountRoleRegistry, CallDispatchFilter, +}; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::{parameter_types, RuntimeDebug}; use frame_system as system; +use scale_info::TypeInfo; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; use sp_std::collections::btree_set::BTreeSet; @@ -53,13 +57,27 @@ impl system::Config for Test { type MaxConsumers = frame_support::traits::ConstU32<5>; } -impl_mock_runtime_safe_mode! { witnesser: pallet_cf_witness::PalletSafeMode } +impl_mock_runtime_safe_mode! { witnesser: pallet_cf_witness::PalletSafeMode } + +parameter_types! { + pub static AllowCall: bool = true; +} + +#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct MockCallFilter; + +impl CallDispatchFilter for MockCallFilter { + fn should_dispatch(&self, _call: &RuntimeCall) -> bool { + AllowCall::get() + } +} impl pallet_cf_witness::Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type SafeMode = MockRuntimeSafeMode; + type CallDispatchPermission = MockCallFilter; type WeightInfo = (); } diff --git a/state-chain/pallets/cf-witnesser/src/tests.rs b/state-chain/pallets/cf-witnesser/src/tests.rs index 49e4592dc0..7fbc6bb947 100644 --- a/state-chain/pallets/cf-witnesser/src/tests.rs +++ b/state-chain/pallets/cf-witnesser/src/tests.rs @@ -389,3 +389,38 @@ fn test_safe_mode() { assert_eq!(pallet_dummy::Something::::get(), Some(0u32)); }); } + +#[test] +fn safe_mode_code_amber_can_filter_calls() { + new_test_ext().execute_with(|| { + // Block calls via SafeMode::CodeAmber + MockRuntimeSafeMode::set_safe_mode(MockRuntimeSafeMode { + witnesser: PalletSafeMode::CodeAmber(MockCallFilter {}), + }); + AllowCall::set(false); + + // Sign the call so its ready to be dispatched + let call = Box::new(RuntimeCall::Dummy(pallet_dummy::Call::::increment_value {})); + let current_epoch = MockEpochInfo::epoch_index(); + for s in [ALISSA, BOBSON] { + assert_ok!(Witnesser::witness_at_epoch( + RuntimeOrigin::signed(s), + call.clone(), + current_epoch + )); + } + assert_eq!(WitnessedCallsScheduledForDispatch::::decode_len(), Some(1)); + + // Call is not dispatched because its blocked by the CallDispatchFilter + Witnesser::on_idle(1, Weight::zero().set_ref_time(1_000_000_000_000u64)); + assert!(!WitnessedCallsScheduledForDispatch::::get().is_empty()); + + // Allow the call to pass the filter + AllowCall::set(true); + + // Call should be dispatched now. + Witnesser::on_idle(2, Weight::zero().set_ref_time(1_000_000_000_000u64)); + assert!(WitnessedCallsScheduledForDispatch::::get().is_empty()); + assert_eq!(pallet_dummy::Something::::get(), Some(0u32)); + }); +} diff --git a/state-chain/runtime/Cargo.toml b/state-chain/runtime/Cargo.toml index cc04345f82..38708ff8f3 100644 --- a/state-chain/runtime/Cargo.toml +++ b/state-chain/runtime/Cargo.toml @@ -29,6 +29,7 @@ cf-amm = { path = '../amm', default-features = false } cf-chains = { path = '../chains', default-features = false } cf-primitives = { path = '../primitives', default-features = false } cf-session-benchmarking = { path = '../cf-session-benchmarking', optional = true, default-features = false } +cf-runtime-utilities = { path = '../runtime-utilities', default-features = false } cf-traits = { path = '../traits', default-features = false } cf-utilities = { package = 'utilities', path = '../../utilities', default-features = false } @@ -134,6 +135,7 @@ std = [ 'cf-amm/std', 'cf-chains/std', 'cf-primitives/std', + 'cf-runtime-utilities/std', 'cf-traits/std', 'cf-utilities/std', 'codec/std', diff --git a/state-chain/runtime/src/chainflip.rs b/state-chain/runtime/src/chainflip.rs index 29ede49402..4bde2f0d4e 100644 --- a/state-chain/runtime/src/chainflip.rs +++ b/state-chain/runtime/src/chainflip.rs @@ -44,10 +44,10 @@ use cf_chains::{ }; use cf_primitives::{chains::assets, AccountRole, Asset, BasisPoints, ChannelId, EgressId}; use cf_traits::{ - impl_runtime_safe_mode, AccountRoleRegistry, BlockEmissions, BroadcastAnyChainGovKey, - Broadcaster, Chainflip, CommKeyBroadcaster, DepositApi, DepositHandler, EgressApi, EpochInfo, - Heartbeat, Issuance, KeyProvider, OnBroadcastReady, QualifyNode, RewardsDistribution, - RuntimeUpgrade, VaultTransitionHandler, + AccountRoleRegistry, BlockEmissions, BroadcastAnyChainGovKey, Broadcaster, Chainflip, + CommKeyBroadcaster, DepositApi, DepositHandler, EgressApi, EpochInfo, Heartbeat, Issuance, + KeyProvider, OnBroadcastReady, QualifyNode, RewardsDistribution, RuntimeUpgrade, + VaultTransitionHandler, }; use codec::{Decode, Encode}; use frame_support::{ @@ -76,21 +76,6 @@ impl Chainflip for Runtime { type AccountRoleRegistry = AccountRoles; type FundingInfo = Flip; } - -impl_runtime_safe_mode! { - RuntimeSafeMode, - pallet_cf_environment::RuntimeSafeMode, - emissions: pallet_cf_emissions::PalletSafeMode, - funding: pallet_cf_funding::PalletSafeMode, - swapping: pallet_cf_swapping::PalletSafeMode, - liquidity_provider: pallet_cf_lp::PalletSafeMode, - validator: pallet_cf_validator::PalletSafeMode, - pools: pallet_cf_pools::PalletSafeMode, - reputation: pallet_cf_reputation::PalletSafeMode, - vault: pallet_cf_vaults::PalletSafeMode, - witnesser: pallet_cf_witnesser::PalletSafeMode, - broadcast: pallet_cf_broadcast::PalletSafeMode, -} struct BackupNodeEmissions; impl RewardsDistribution for BackupNodeEmissions { diff --git a/state-chain/runtime/src/lib.rs b/state-chain/runtime/src/lib.rs index 2d8d3871fd..5c3be0658e 100644 --- a/state-chain/runtime/src/lib.rs +++ b/state-chain/runtime/src/lib.rs @@ -4,6 +4,7 @@ pub mod chainflip; pub mod constants; pub mod runtime_apis; +pub mod safe_mode; #[cfg(feature = "std")] pub mod test_runner; mod weights; @@ -84,12 +85,13 @@ pub use cf_traits::{EpochInfo, QualifyNode, SessionKeysRegistered, SwappingApi}; pub use chainflip::chain_instances::*; use chainflip::{ - epoch_transition::ChainflipEpochTransitions, BroadcastReadyProvider, BtcEnvironment, - BtcVaultTransitionHandler, ChainAddressConverter, ChainflipHeartbeat, EthEnvironment, + all_vaults_rotator::AllVaultRotator, epoch_transition::ChainflipEpochTransitions, + BroadcastReadyProvider, BtcEnvironment, BtcVaultTransitionHandler, ChainAddressConverter, + ChainflipHeartbeat, DotEnvironment, DotVaultTransitionHandler, EthEnvironment, EthVaultTransitionHandler, TokenholderGovernanceBroadcaster, }; +use safe_mode::{RuntimeSafeMode, WitnesserCallPermission}; -use chainflip::{all_vaults_rotator::AllVaultRotator, DotEnvironment, DotVaultTransitionHandler}; use constants::common::*; use pallet_cf_flip::{Bonder, FlipSlasher}; use pallet_cf_vaults::Vault; @@ -184,7 +186,7 @@ impl pallet_cf_validator::Config for Runtime { ); type OffenceReporter = Reputation; type Bonder = Bonder; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type ReputationResetter = Reputation; } @@ -215,7 +217,7 @@ impl pallet_cf_environment::Config for Runtime { type BitcoinVaultKeyWitnessedHandler = BitcoinVault; type BitcoinNetwork = BitcoinNetworkParam; type BitcoinFeeInfo = chainflip::BitcoinFeeGetter; - type RuntimeSafeMode = chainflip::RuntimeSafeMode; + type RuntimeSafeMode = RuntimeSafeMode; type CurrentCompatibilityVersion = CurrentCompatibilityVersion; type WeightInfo = pallet_cf_environment::weights::PalletWeight; } @@ -226,7 +228,7 @@ impl pallet_cf_swapping::Config for Runtime { type EgressHandler = chainflip::AnyChainIngressEgressHandler; type SwappingApi = LiquidityPools; type AddressConverter = ChainAddressConverter; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type WeightInfo = pallet_cf_swapping::weights::PalletWeight; } @@ -244,7 +246,7 @@ impl pallet_cf_vaults::Config for Runtime { type OffenceReporter = Reputation; type WeightInfo = pallet_cf_vaults::weights::PalletWeight; type ChainTracking = EthereumChainTracking; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type Slasher = FlipSlasher; } @@ -262,7 +264,7 @@ impl pallet_cf_vaults::Config for Runtime { type OffenceReporter = Reputation; type WeightInfo = pallet_cf_vaults::weights::PalletWeight; type ChainTracking = PolkadotChainTracking; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type Slasher = FlipSlasher; } @@ -280,7 +282,7 @@ impl pallet_cf_vaults::Config for Runtime { type OffenceReporter = Reputation; type WeightInfo = pallet_cf_vaults::weights::PalletWeight; type ChainTracking = BitcoinChainTracking; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type Slasher = FlipSlasher; } @@ -342,7 +344,7 @@ impl pallet_cf_pools::Config for Runtime { type RuntimeEvent = RuntimeEvent; type LpBalance = LiquidityProvider; type NetworkFee = NetworkFee; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type WeightInfo = (); } @@ -351,7 +353,7 @@ impl pallet_cf_lp::Config for Runtime { type DepositHandler = chainflip::AnyChainIngressEgressHandler; type EgressHandler = chainflip::AnyChainIngressEgressHandler; type AddressConverter = ChainAddressConverter; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type WeightInfo = pallet_cf_lp::weights::PalletWeight; } @@ -402,7 +404,6 @@ parameter_types! { } // Configure FRAME pallets to include in runtime. - impl frame_system::Config for Runtime { /// The basic call filter to use in dispatchable. type BaseCallFilter = frame_support::traits::Everything; @@ -529,7 +530,8 @@ impl pallet_cf_witnesser::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; + type CallDispatchPermission = WitnesserCallPermission; type WeightInfo = pallet_cf_witnesser::weights::PalletWeight; } @@ -543,7 +545,7 @@ impl pallet_cf_funding::Config for Runtime { pallet_cf_threshold_signature::EnsureThresholdSigned; type RegisterRedemption = EthereumApi; type TimeSource = Timestamp; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type WeightInfo = pallet_cf_funding::weights::PalletWeight; } @@ -583,7 +585,7 @@ impl pallet_cf_emissions::Config for Runtime { type EthEnvironment = EthEnvironment; type FlipToBurn = LiquidityPools; type EgressHandler = chainflip::AnyChainIngressEgressHandler; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type WeightInfo = pallet_cf_emissions::weights::PalletWeight; } @@ -615,7 +617,7 @@ impl pallet_cf_reputation::Config for Runtime { type Slasher = FlipSlasher; type WeightInfo = pallet_cf_reputation::weights::PalletWeight; type MaximumAccruableReputation = MaximumAccruableReputation; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; } impl pallet_cf_threshold_signature::Config for Runtime { @@ -677,7 +679,7 @@ impl pallet_cf_broadcast::Config for Runtime { type BroadcastReadyProvider = BroadcastReadyProvider; type BroadcastTimeout = ConstU32<{ 10 * MINUTES }>; type WeightInfo = pallet_cf_broadcast::weights::PalletWeight; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type SafeModeBlockMargin = ConstU32<10>; type KeyProvider = EthereumVault; } @@ -699,7 +701,7 @@ impl pallet_cf_broadcast::Config for Runtime { type BroadcastReadyProvider = BroadcastReadyProvider; type BroadcastTimeout = ConstU32<{ 10 * MINUTES }>; type WeightInfo = pallet_cf_broadcast::weights::PalletWeight; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type SafeModeBlockMargin = ConstU32<10>; type KeyProvider = PolkadotVault; } @@ -721,7 +723,7 @@ impl pallet_cf_broadcast::Config for Runtime { type BroadcastReadyProvider = BroadcastReadyProvider; type BroadcastTimeout = ConstU32<{ 90 * MINUTES }>; type WeightInfo = pallet_cf_broadcast::weights::PalletWeight; - type SafeMode = chainflip::RuntimeSafeMode; + type SafeMode = RuntimeSafeMode; type SafeModeBlockMargin = ConstU32<10>; type KeyProvider = BitcoinVault; } diff --git a/state-chain/runtime/src/safe_mode.rs b/state-chain/runtime/src/safe_mode.rs new file mode 100644 index 0000000000..f9a6a76cdd --- /dev/null +++ b/state-chain/runtime/src/safe_mode.rs @@ -0,0 +1,117 @@ +//! For filtering runtime calls and other related utilities. + +use crate::{Runtime, RuntimeCall}; +use cf_traits::{impl_runtime_safe_mode, CallDispatchFilter}; +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +impl_runtime_safe_mode! { + RuntimeSafeMode, + pallet_cf_environment::RuntimeSafeMode, + emissions: pallet_cf_emissions::PalletSafeMode, + funding: pallet_cf_funding::PalletSafeMode, + swapping: pallet_cf_swapping::PalletSafeMode, + liquidity_provider: pallet_cf_lp::PalletSafeMode, + validator: pallet_cf_validator::PalletSafeMode, + pools: pallet_cf_pools::PalletSafeMode, + reputation: pallet_cf_reputation::PalletSafeMode, + vault: pallet_cf_vaults::PalletSafeMode, + witnesser: pallet_cf_witnesser::PalletSafeMode, + broadcast: pallet_cf_broadcast::PalletSafeMode, +} + +/// Contains permissions for different Runtime calls. +/// This is done through the SafeMode::CodeAmber of the Witnesser pallet. +/// Only calls allowed here can be dispatched with Witnesser origin. +#[derive( + Encode, + Decode, + MaxEncodedLen, + TypeInfo, + Default, + Copy, + Clone, + PartialEq, + Eq, + frame_support::RuntimeDebug, +)] +pub struct WitnesserCallPermission { + // Non-instantiable pallets + pub governance: bool, + pub funding: bool, + pub swapping: bool, + + // Ethereum pallets + pub ethereum_broadcast: bool, + pub ethereum_chain_tracking: bool, + pub ethereum_ingress_egress: bool, + pub ethereum_vault: bool, + + // Polkadot pallets + pub polkadot_broadcast: bool, + pub polkadot_chain_tracking: bool, + pub polkadot_ingress_egress: bool, + pub polkadot_vault: bool, + + // Bitcoin pallets + pub bitcoin_broadcast: bool, + pub bitcoin_chain_tracking: bool, + pub bitcoin_ingress_egress: bool, + pub bitcoin_vault: bool, +} + +impl WitnesserCallPermission { + pub fn allow_all() -> Self { + WitnesserCallPermission { + governance: true, + funding: true, + swapping: true, + ethereum_broadcast: true, + ethereum_chain_tracking: true, + ethereum_ingress_egress: true, + ethereum_vault: true, + polkadot_broadcast: true, + polkadot_chain_tracking: true, + polkadot_ingress_egress: true, + polkadot_vault: true, + bitcoin_broadcast: true, + bitcoin_chain_tracking: true, + bitcoin_ingress_egress: true, + bitcoin_vault: true, + } + } +} + +impl CallDispatchFilter for WitnesserCallPermission { + fn should_dispatch(&self, call: &RuntimeCall) -> bool { + match call { + RuntimeCall::Governance(..) => self.governance, + RuntimeCall::Funding(..) => self.funding, + RuntimeCall::Swapping(..) => self.swapping, + + RuntimeCall::EthereumBroadcaster(..) => self.ethereum_broadcast, + RuntimeCall::EthereumChainTracking(..) => self.ethereum_chain_tracking, + RuntimeCall::EthereumIngressEgress(..) => self.ethereum_ingress_egress, + RuntimeCall::EthereumVault(..) => self.ethereum_vault, + + RuntimeCall::PolkadotBroadcaster(..) => self.polkadot_broadcast, + RuntimeCall::PolkadotChainTracking(..) => self.polkadot_chain_tracking, + RuntimeCall::PolkadotIngressEgress(..) => self.polkadot_ingress_egress, + RuntimeCall::PolkadotVault(..) => self.polkadot_vault, + + RuntimeCall::BitcoinBroadcaster(..) => self.bitcoin_broadcast, + RuntimeCall::BitcoinChainTracking(..) => self.bitcoin_chain_tracking, + RuntimeCall::BitcoinIngressEgress(..) => self.bitcoin_ingress_egress, + RuntimeCall::BitcoinVault(..) => self.bitcoin_vault, + + _ => { + cf_runtime_utilities::log_or_panic!( + "All witnesser calls must be controllable through `WitnesserCallPermission`. Call: {:?}", + call + ); + #[allow(unreachable_code)] + false + }, + } + } +} diff --git a/state-chain/traits/src/lib.rs b/state-chain/traits/src/lib.rs index 52f36cb51e..b87c948922 100644 --- a/state-chain/traits/src/lib.rs +++ b/state-chain/traits/src/lib.rs @@ -815,3 +815,13 @@ pub trait AuthoritiesCfeVersions { /// Returns the percentage of current authorities with their CFEs at the given version. fn precent_authorities_at_version(version: SemVer) -> Percent; } + +pub trait CallDispatchFilter { + fn should_dispatch(&self, call: &RuntimeCall) -> bool; +} + +impl CallDispatchFilter for () { + fn should_dispatch(&self, _call: &RuntimeCall) -> bool { + true + } +} From e94f1f2921728e5af8357d153bc88cfb96d40f46 Mon Sep 17 00:00:00 2001 From: Marcello Date: Thu, 21 Sep 2023 15:44:04 +0200 Subject: [PATCH 04/15] add RUSTSEC-2023-0063 to config.toml (#4040) --- .cargo/config.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.cargo/config.toml b/.cargo/config.toml index 24d6858f7e..67dfe9896d 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -36,6 +36,7 @@ tree --no-default-features --depth 1 --edges=features,normal # - RUSTSEC-2023-0053: This advisory comes from rustls-webpki, a dependency of ethers-rs. CPU denial of service in certificate path building. # - RUSTSEC-2021-0060: This is a transitive dependency of libp2p and will be fixed in an upcoming release. # - RUSTSEC-2021-0059: This is a transitive dependency of libp2p and will be fixed in an upcoming release. +# - RUSTSEC-2023-0063: This is a transitive dependency of libp2p and it is not used. cf-audit = ''' audit --ignore RUSTSEC-2022-0061 --ignore RUSTSEC-2020-0071 @@ -47,4 +48,5 @@ audit --ignore RUSTSEC-2022-0061 --ignore RUSTSEC-2023-0053 --ignore RUSTSEC-2021-0060 --ignore RUSTSEC-2021-0059 + --ignore RUSTSEC-2023-0063 ''' From 342b182aa459d334c9b87af02c06734c648c547c Mon Sep 17 00:00:00 2001 From: Alastair Holmes <42404303+AlastairHolmes@users.noreply.github.com> Date: Fri, 22 Sep 2023 14:38:32 +0200 Subject: [PATCH 05/15] feat: subcribe_price and depth rpc (#3978) * price subscription * remove dead_code allow * depth_between rpc squash squash squash * subscribe_price -> subscribe_pool_price * inclusive and exclusive ranges * comment * depth rpc improvement --- Cargo.lock | 3 + state-chain/amm/src/common.rs | 5 +- state-chain/amm/src/lib.rs | 32 +++++++ state-chain/amm/src/limit_orders.rs | 30 +++++- state-chain/amm/src/range_orders.rs | 58 ++++++++++- state-chain/custom-rpc/Cargo.toml | 3 + state-chain/custom-rpc/src/lib.rs | 122 +++++++++++++++++++++++- state-chain/node/src/service.rs | 2 + state-chain/pallets/cf-pools/src/lib.rs | 52 +++++++++- state-chain/runtime/src/lib.rs | 6 +- state-chain/runtime/src/runtime_apis.rs | 7 +- 11 files changed, 305 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13d0a2ebe8..68ea096442 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2319,13 +2319,16 @@ dependencies = [ "cf-amm", "cf-chains", "cf-primitives", + "futures", "hex", "jsonrpsee 0.16.2", "pallet-cf-governance", "pallet-cf-pools", "sc-client-api", + "sc-rpc-api", "serde", "sp-api", + "sp-core", "sp-rpc", "sp-runtime", "state-chain-runtime", diff --git a/state-chain/amm/src/common.rs b/state-chain/amm/src/common.rs index 7dc5c8f706..39bb990d37 100644 --- a/state-chain/amm/src/common.rs +++ b/state-chain/amm/src/common.rs @@ -114,9 +114,8 @@ impl core::ops::IndexMut for SideMap { } } } -#[cfg(test)] -impl, R> std::ops::Add> for SideMap { - type Output = SideMap<>::Output>; +impl, R> core::ops::Add> for SideMap { + type Output = SideMap<>::Output>; fn add(self, rhs: SideMap) -> Self::Output { SideMap { zero: self.zero + rhs.zero, one: self.one + rhs.one } } diff --git a/state-chain/amm/src/lib.rs b/state-chain/amm/src/lib.rs index 4d9e746f57..020c878985 100644 --- a/state-chain/amm/src/lib.rs +++ b/state-chain/amm/src/lib.rs @@ -308,4 +308,36 @@ impl PoolState { pub fn range_order_liquidity(&self) -> Vec<(Tick, Liquidity)> { self.range_orders.liquidity() } + + pub fn limit_order_depth( + &mut self, + range: core::ops::Range, + ) -> Result, Amount)>, limit_orders::DepthError> { + Ok(SideMap { + zero: ( + self.limit_orders.current_sqrt_price::(), + self.limit_orders.depth::(range.clone())?, + ), + one: ( + self.limit_orders.current_sqrt_price::(), + self.limit_orders.depth::(range)?, + ), + }) + } + + pub fn range_order_depth( + &self, + range: core::ops::Range, + ) -> Result, Amount)>, range_orders::DepthError> { + self.range_orders.depth(range.start, range.end).map(|assets| SideMap { + zero: ( + self.range_orders.current_sqrt_price::().map(sqrt_price_to_price), + assets[Side::Zero], + ), + one: ( + self.range_orders.current_sqrt_price::().map(sqrt_price_to_price), + assets[Side::One], + ), + }) + } } diff --git a/state-chain/amm/src/limit_orders.rs b/state-chain/amm/src/limit_orders.rs index 6a943d725a..f69edff751 100644 --- a/state-chain/amm/src/limit_orders.rs +++ b/state-chain/amm/src/limit_orders.rs @@ -205,6 +205,14 @@ pub enum SetFeesError { InvalidFeeAmount, } +#[derive(Debug)] +pub enum DepthError { + /// Invalid Price + InvalidTick, + /// Start tick must be less than or equal to the end tick + InvalidTickRange, +} + #[derive(Debug)] pub enum MintError { /// One of the start/end ticks of the range reached its maximum gross liquidity @@ -729,11 +737,31 @@ impl PoolState { /// Returns all the assets available for swaps in a given direction /// /// This function never panics. - #[allow(dead_code)] pub(super) fn liquidity(&self) -> Vec<(Tick, Amount)> { self.fixed_pools[!SD::INPUT_SIDE] .iter() .map(|(sqrt_price, fixed_pool)| (tick_at_sqrt_price(*sqrt_price), fixed_pool.available)) .collect() } + + /// Returns all the assets available for swaps between two prices (inclusive..exclusive) + /// + /// This function never panics. + pub(super) fn depth( + &self, + range: core::ops::Range, + ) -> Result { + let start = + Self::validate_tick::(range.start).map_err(|_| DepthError::InvalidTick)?; + let end = + Self::validate_tick::(range.end).map_err(|_| DepthError::InvalidTick)?; + if start <= end { + Ok(self.fixed_pools[!SD::INPUT_SIDE] + .range(start..end) + .map(|(_, fixed_pool)| fixed_pool.available) + .fold(Default::default(), |acc, x| acc + x)) + } else { + Err(DepthError::InvalidTickRange) + } + } } diff --git a/state-chain/amm/src/range_orders.rs b/state-chain/amm/src/range_orders.rs index fd3e730699..4febb3bb57 100644 --- a/state-chain/amm/src/range_orders.rs +++ b/state-chain/amm/src/range_orders.rs @@ -28,9 +28,9 @@ use scale_info::TypeInfo; use sp_core::{U256, U512}; use crate::common::{ - is_sqrt_price_valid, mul_div_ceil, mul_div_floor, sqrt_price_at_tick, tick_at_sqrt_price, - Amount, OneToZero, Side, SideMap, SqrtPriceQ64F96, Tick, ZeroToOne, MAX_TICK, MIN_TICK, - ONE_IN_HUNDREDTH_PIPS, SQRT_PRICE_FRACTIONAL_BITS, + is_sqrt_price_valid, is_tick_valid, mul_div_ceil, mul_div_floor, sqrt_price_at_tick, + tick_at_sqrt_price, Amount, OneToZero, Side, SideMap, SqrtPriceQ64F96, Tick, ZeroToOne, + MAX_TICK, MIN_TICK, ONE_IN_HUNDREDTH_PIPS, SQRT_PRICE_FRACTIONAL_BITS, }; pub type Liquidity = u128; @@ -345,6 +345,14 @@ pub enum RequiredAssetRatioError { InvalidTickRange, } +#[derive(Debug)] +pub enum DepthError { + /// Invalid Price + InvalidTick, + /// Start tick must be less than or equal to the end tick + InvalidTickRange, +} + #[derive(Debug)] pub enum LiquidityToAmountsError { /// Invalid Tick range @@ -1013,6 +1021,50 @@ impl PoolState { }) .collect() } + + pub(super) fn depth( + &self, + lower_tick: Tick, + upper_tick: Tick, + ) -> Result, DepthError> { + if !is_tick_valid(lower_tick) || !is_tick_valid(upper_tick) { + return Err(DepthError::InvalidTick) + } + + if lower_tick <= upper_tick { + let liquidity_at_lower_tick: Liquidity = + self.liquidity_map.range(..lower_tick).fold(0, |liquidity, (_, tick_delta)| { + liquidity.checked_add_signed(tick_delta.liquidity_delta).unwrap() + }); + + let (_liquidity, _tick, assets) = self + .liquidity_map + .range(lower_tick..upper_tick) + .map(|(tick, tick_delta)| (tick, tick_delta.liquidity_delta)) + .chain(core::iter::once((&upper_tick, 0 /* value doesn't matter */))) + .fold( + (liquidity_at_lower_tick, lower_tick, SideMap::::default()), + |(liquidity, previous_tick, assets), (current_tick, liquidity_delta)| { + ( + // Addition is guaranteed to never overflow, see test `max_liquidity` + liquidity.checked_add_signed(liquidity_delta).unwrap(), + *current_tick, + assets + + self.inner_liquidity_to_amounts::( + liquidity, + previous_tick, + *current_tick, + ) + .0, + ) + }, + ); + + Ok(assets) + } else { + Err(DepthError::InvalidTickRange) + } + } } fn zero_amount_delta_floor( diff --git a/state-chain/custom-rpc/Cargo.toml b/state-chain/custom-rpc/Cargo.toml index 9d0711f160..07d82b42eb 100644 --- a/state-chain/custom-rpc/Cargo.toml +++ b/state-chain/custom-rpc/Cargo.toml @@ -10,6 +10,7 @@ edition = '2021' state-chain-runtime = { path = '../runtime' } anyhow = "1.0" +futures = "0.3.14" jsonrpsee = { version = "0.16.2", features = ["full"] } serde = { version = '1.0', features = ['derive'] } cf-chains = { path = '../chains' } @@ -21,6 +22,8 @@ pallet-cf-pools = { path = "../pallets/cf-pools" } hex = '0.4.3' sp-api = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } +sp-core = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } sp-rpc = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } +sc-rpc-api = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } sp-runtime = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } sc-client-api = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } diff --git a/state-chain/custom-rpc/src/lib.rs b/state-chain/custom-rpc/src/lib.rs index a60b59e97a..17b5ffc6c4 100644 --- a/state-chain/custom-rpc/src/lib.rs +++ b/state-chain/custom-rpc/src/lib.rs @@ -5,10 +5,15 @@ use cf_amm::{ use cf_chains::{btc::BitcoinNetwork, dot::PolkadotHash, eth::Address as EthereumAddress}; use cf_primitives::{Asset, AssetAmount, SemVer, SwapOutput}; use core::ops::Range; -use jsonrpsee::{core::RpcResult, proc_macros::rpc, types::error::CallError}; +use jsonrpsee::{ + core::RpcResult, + proc_macros::rpc, + types::error::{CallError, SubscriptionEmptyError}, + SubscriptionSink, +}; use pallet_cf_governance::GovCallHash; -use pallet_cf_pools::{AssetsMap, PoolInfo, PoolLiquidity, PoolOrders}; -use sc_client_api::HeaderBackend; +use pallet_cf_pools::{AssetsMap, Depth, PoolInfo, PoolLiquidity, PoolOrders}; +use sc_client_api::{BlockchainEvents, HeaderBackend}; use serde::{Deserialize, Serialize}; use sp_api::BlockT; use sp_rpc::number::NumberOrHex; @@ -215,6 +220,14 @@ pub trait CustomApi { pair_asset: Asset, at: Option, ) -> RpcResult>; + #[method(name = "pool_depth")] + fn cf_pool_depth( + &self, + base_asset: Asset, + pair_asset: Asset, + tick_range: Range, + at: Option, + ) -> RpcResult, DispatchError>>>; #[method(name = "pool_liquidity")] fn cf_pool_liquidity( &self, @@ -245,18 +258,26 @@ pub trait CustomApi { fn cf_current_compatibility_version(&self) -> RpcResult; #[method(name = "min_swap_amount")] fn cf_min_swap_amount(&self, asset: Asset) -> RpcResult; + #[subscription(name = "subscribe_pool_price", item = Price)] + fn cf_subscribe_pool_price(&self, from: Asset, to: Asset); } /// An RPC extension for the state chain node. pub struct CustomRpc { pub client: Arc, pub _phantom: PhantomData, + pub executor: Arc, } impl CustomRpc where B: BlockT, - C: sp_api::ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + C: sp_api::ProvideRuntimeApi + + Send + + Sync + + 'static + + HeaderBackend + + BlockchainEvents, C::Api: CustomRuntimeApi, { fn unwrap_or_best(&self, from_rpc: Option<::Hash>) -> B::Hash { @@ -271,7 +292,12 @@ fn to_rpc_error(e: E) -> jsonrpsee impl CustomApiServer for CustomRpc where B: BlockT, - C: sp_api::ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + C: sp_api::ProvideRuntimeApi + + Send + + Sync + + 'static + + HeaderBackend + + BlockchainEvents, C::Api: CustomRuntimeApi, { fn cf_is_auction_phase(&self, at: Option<::Hash>) -> RpcResult { @@ -554,6 +580,19 @@ where .map_err(to_rpc_error) } + fn cf_pool_depth( + &self, + base_asset: Asset, + pair_asset: Asset, + tick_range: Range, + at: Option, + ) -> RpcResult, DispatchError>>> { + self.client + .runtime_api() + .cf_pool_depth(self.unwrap_or_best(at), base_asset, pair_asset, tick_range) + .map_err(to_rpc_error) + } + fn cf_pool_liquidity( &self, base_asset: Asset, @@ -638,4 +677,77 @@ where .cf_min_swap_amount(self.unwrap_or_best(None), asset) .map_err(to_rpc_error) } + + fn cf_subscribe_pool_price( + &self, + sink: SubscriptionSink, + from: Asset, + to: Asset, + ) -> Result<(), SubscriptionEmptyError> { + self.new_subscription(sink, move |api, hash| api.cf_pool_price(hash, from, to)) + } +} + +impl CustomRpc +where + B: BlockT, + C: sp_api::ProvideRuntimeApi + + Send + + Sync + + 'static + + HeaderBackend + + BlockchainEvents, + C::Api: CustomRuntimeApi, +{ + fn new_subscription< + T: Serialize + Send + Clone + Eq + 'static, + E: std::error::Error + Send + Sync + 'static, + F: Fn(&C::Api, state_chain_runtime::Hash) -> Result + Send + Clone + 'static, + >( + &self, + mut sink: SubscriptionSink, + f: F, + ) -> Result<(), SubscriptionEmptyError> { + use futures::{future::FutureExt, stream::StreamExt}; + + let client = self.client.clone(); + + let initial = match f(&self.client.runtime_api(), self.client.info().best_hash) { + Ok(initial) => initial, + Err(e) => { + let _ = sink.reject(jsonrpsee::core::Error::from( + sc_rpc_api::state::error::Error::Client(Box::new(e)), + )); + return Ok(()) + }, + }; + + let mut previous = initial.clone(); + + let stream = self + .client + .import_notification_stream() + .filter(|n| futures::future::ready(n.is_new_best)) + .filter_map(move |n| { + let new = f(&client.runtime_api(), n.hash); + + match new { + Ok(new) if new != previous => { + previous = new.clone(); + futures::future::ready(Some(new)) + }, + _ => futures::future::ready(None), + } + }); + + let stream = futures::stream::once(futures::future::ready(initial)).chain(stream); + + let fut = async move { + sink.pipe_from_stream(stream).await; + }; + + self.executor.spawn("cf-rpc-subscription", Some("rpc"), fut.boxed()); + + Ok(()) + } } diff --git a/state-chain/node/src/service.rs b/state-chain/node/src/service.rs index 195953fc28..14c2b501f7 100644 --- a/state-chain/node/src/service.rs +++ b/state-chain/node/src/service.rs @@ -242,6 +242,7 @@ pub fn new_full(config: Configuration) -> Result { let rpc_builder = { let client = client.clone(); let pool = transaction_pool.clone(); + let executor = Arc::new(task_manager.spawn_handle()); Box::new(move |deny_unsafe, subscription_executor| { let build = || { @@ -276,6 +277,7 @@ pub fn new_full(config: Configuration) -> Result { module.merge(CustomApiServer::into_rpc(CustomRpc { client: client.clone(), _phantom: PhantomData, + executor: executor.clone(), }))?; Ok(module) diff --git a/state-chain/pallets/cf-pools/src/lib.rs b/state-chain/pallets/cf-pools/src/lib.rs index 20cdc7a48a..554f2ecc57 100644 --- a/state-chain/pallets/cf-pools/src/lib.rs +++ b/state-chain/pallets/cf-pools/src/lib.rs @@ -5,7 +5,7 @@ use cf_amm::{ common::{Amount, Order, Price, Side, SideMap, Tick}, limit_orders, range_orders, range_orders::Liquidity, - NewError, PoolState, + PoolState, }; use cf_primitives::{chains::assets::any, Asset, AssetAmount, SwapOutput, STABLE_ASSET}; use cf_traits::{impl_pallet_safe_mode, Chainflip, LpBalanceApi, SwappingApi}; @@ -193,6 +193,7 @@ pub mod pallet { common::Tick, limit_orders, range_orders::{self, Liquidity}, + NewError, }; use cf_traits::{AccountRoleRegistry, LpBalanceApi}; use frame_system::pallet_prelude::BlockNumberFor; @@ -956,6 +957,18 @@ pub struct PoolLiquidity { pub range_orders: Vec<(Tick, Liquidity)>, } +#[derive(Clone, Debug, Encode, Decode, TypeInfo, PartialEq, Eq, Deserialize, Serialize)] +pub struct SingleDepth { + pub price: Option, + pub depth: Amount, +} + +#[derive(Clone, Debug, Encode, Decode, TypeInfo, PartialEq, Eq, Deserialize, Serialize)] +pub struct Depth { + pub limit_orders: SingleDepth, + pub range_orders: SingleDepth, +} + impl Pallet { #[allow(clippy::too_many_arguments)] fn inner_update_limit_order( @@ -1243,6 +1256,43 @@ impl Pallet { ) } + pub fn pool_depth( + base_asset: any::Asset, + pair_asset: any::Asset, + tick_range: Range, + ) -> Option, DispatchError>> { + let asset_pair = AssetPair::::new(base_asset, pair_asset).ok()?; + let mut pool = Pools::::get(asset_pair.canonical_asset_pair)?; + + let limit_orders = pool.pool_state.limit_order_depth(tick_range.clone()).map_err(|error| { + match error { + limit_orders::DepthError::InvalidTickRange => Error::::InvalidTickRange, + limit_orders::DepthError::InvalidTick => Error::::InvalidTick, + } + .into() + }); + + let range_orders = pool.pool_state.range_order_depth(tick_range).map_err(|error| { + match error { + range_orders::DepthError::InvalidTickRange => Error::::InvalidTickRange, + range_orders::DepthError::InvalidTick => Error::::InvalidTick, + } + .into() + }); + + Some(limit_orders.and_then(|limit_orders| { + range_orders.map(|range_orders| { + asset_pair.side_map_to_assets_map(SideMap::<()>::default().map(|side, ()| { + let to_single_depth = |(price, depth)| SingleDepth { price, depth }; + Depth { + limit_orders: to_single_depth(limit_orders[side]), + range_orders: to_single_depth(range_orders[side]), + } + })) + }) + })) + } + pub fn pool_liquidity_providers( base_asset: any::Asset, pair_asset: any::Asset, diff --git a/state-chain/runtime/src/lib.rs b/state-chain/runtime/src/lib.rs index 5c3be0658e..83ab32f5a6 100644 --- a/state-chain/runtime/src/lib.rs +++ b/state-chain/runtime/src/lib.rs @@ -29,7 +29,7 @@ use cf_chains::{ use core::ops::Range; pub use frame_system::Call as SystemCall; use pallet_cf_governance::GovCallHash; -use pallet_cf_pools::{AssetsMap, PoolLiquidity}; +use pallet_cf_pools::{AssetsMap, Depth, PoolLiquidity}; use pallet_cf_reputation::ExclusionList; use pallet_transaction_payment::{ConstFeeMultiplier, Multiplier}; use sp_runtime::DispatchError; @@ -1036,6 +1036,10 @@ impl_runtime_apis! { LiquidityPools::pool_info(base_asset, pair_asset) } + fn cf_pool_depth(base_asset: Asset, pair_asset: Asset, tick_range: Range) -> Option, DispatchError>> { + LiquidityPools::pool_depth(base_asset, pair_asset, tick_range) + } + fn cf_pool_liquidity(base_asset: Asset, pair_asset: Asset) -> Option { LiquidityPools::pool_liquidity(base_asset, pair_asset) } diff --git a/state-chain/runtime/src/runtime_apis.rs b/state-chain/runtime/src/runtime_apis.rs index 98a2f32d66..da1779f94a 100644 --- a/state-chain/runtime/src/runtime_apis.rs +++ b/state-chain/runtime/src/runtime_apis.rs @@ -9,7 +9,7 @@ use codec::{Decode, Encode}; use core::ops::Range; use frame_support::sp_runtime::AccountId32; use pallet_cf_governance::GovCallHash; -use pallet_cf_pools::{AssetsMap, PoolInfo, PoolLiquidity, PoolOrders}; +use pallet_cf_pools::{AssetsMap, Depth, PoolInfo, PoolLiquidity, PoolOrders}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_api::decl_runtime_apis; @@ -114,6 +114,11 @@ decl_runtime_apis!( fn cf_pool_simulate_swap(from: Asset, to: Asset, amount: AssetAmount) -> Option; fn cf_pool_info(base_asset: Asset, pair_asset: Asset) -> Option; + fn cf_pool_depth( + base_asset: Asset, + pair_asset: Asset, + tick_range: Range, + ) -> Option, DispatchError>>; fn cf_pool_liquidity(base_asset: Asset, pair_asset: Asset) -> Option; fn cf_required_asset_ratio_for_range_order( base_asset: Asset, From 3703cf20579fb43e56d157fb06161b3ef71e67dd Mon Sep 17 00:00:00 2001 From: kylezs Date: Sat, 23 Sep 2023 00:32:10 +1000 Subject: [PATCH 06/15] refactor: deposit address expiry preparation (#4033) --- state-chain/chains/src/deposit_channel.rs | 18 ----- .../pallets/cf-ingress-egress/src/lib.rs | 65 +++++++++---------- 2 files changed, 31 insertions(+), 52 deletions(-) diff --git a/state-chain/chains/src/deposit_channel.rs b/state-chain/chains/src/deposit_channel.rs index f97c5713b7..fa25d742dc 100644 --- a/state-chain/chains/src/deposit_channel.rs +++ b/state-chain/chains/src/deposit_channel.rs @@ -40,24 +40,6 @@ pub trait ChannelLifecycleHooks: Sized { impl ChannelLifecycleHooks for () {} -impl ChannelLifecycleHooks for DepositChannel { - fn can_fetch(&self) -> bool { - self.state.can_fetch() - } - - fn on_fetch_scheduled(&mut self) -> bool { - self.state.on_fetch_scheduled() - } - - fn on_fetch_completed(&mut self) -> bool { - self.state.on_fetch_completed() - } - - fn maybe_recycle(self) -> Option { - self.state.maybe_recycle().map(|state| Self { state, ..self }) - } -} - impl DepositChannel { pub fn generate_new>( channel_id: ChannelId, diff --git a/state-chain/pallets/cf-ingress-egress/src/lib.rs b/state-chain/pallets/cf-ingress-egress/src/lib.rs index d269b7ebaa..59afc93e29 100644 --- a/state-chain/pallets/cf-ingress-egress/src/lib.rs +++ b/state-chain/pallets/cf-ingress-egress/src/lib.rs @@ -404,18 +404,11 @@ pub mod pallet { ) -> DispatchResult { T::EnsureWitnessedAtCurrentEpoch::ensure_origin(origin)?; for deposit_address in addresses { - if let Some(mut deposit_details) = - DepositChannelLookup::::get(&deposit_address) - { - if deposit_details.deposit_channel.on_fetch_completed() { - DepositChannelLookup::::insert(&deposit_address, deposit_details); - } - } else { - log::error!( - "Deposit address {:?} not found in DepositChannelLookup", - deposit_address - ); - } + DepositChannelLookup::::mutate(deposit_address, |deposit_channel_details| { + deposit_channel_details + .as_mut() + .map(|details| details.deposit_channel.state.on_fetch_completed()); + }); } Ok(()) } @@ -551,27 +544,28 @@ impl, I: 'static> Pallet { deposit_address, deposit_fetch_id, .. - } => - if let Some(mut details) = - DepositChannelLookup::::get(&*deposit_address) - { - if details.deposit_channel.can_fetch() { - deposit_fetch_id - .replace(details.deposit_channel.fetch_id()); - if details.deposit_channel.on_fetch_scheduled() { - DepositChannelLookup::::insert( - deposit_address, - details, - ); - } - true - } else { - false - } - } else { - log::error!("Deposit address {:?} not found in DepositChannelLookup", deposit_address); - false + } => DepositChannelLookup::::mutate( + deposit_address, + |details| { + details + .as_mut() + .map(|details| { + let can_fetch = + details.deposit_channel.state.can_fetch(); + if can_fetch { + deposit_fetch_id.replace( + details.deposit_channel.fetch_id(), + ); + details + .deposit_channel + .state + .on_fetch_scheduled(); + } + can_fetch + }) + .unwrap_or(false) }, + ), FetchOrTransfer::Transfer { .. } => true, } }) @@ -935,8 +929,11 @@ impl, I: 'static> DepositApi for Pallet { fn expire_channel(address: TargetChainAccount) { ChannelActions::::remove(&address); if let Some(deposit_channel_details) = DepositChannelLookup::::get(&address) { - if let Some(channel) = deposit_channel_details.deposit_channel.maybe_recycle() { - DepositChannelPool::::insert(channel.channel_id, channel); + if let Some(state) = deposit_channel_details.deposit_channel.state.maybe_recycle() { + DepositChannelPool::::insert( + deposit_channel_details.deposit_channel.channel_id, + DepositChannel { state, ..deposit_channel_details.deposit_channel }, + ); } } else { log_or_panic!("Tried to close an unknown channel."); From 0231a5a99362b311951af9977d562d1937064a36 Mon Sep 17 00:00:00 2001 From: Jamie Ford Date: Mon, 25 Sep 2023 14:54:30 +1000 Subject: [PATCH 07/15] CFE RPC Client optional genesis hash check for tests (#4041) * refactor: optional genesis hash for tests * chore: renamed new_ext -> new_inner --- engine/src/dot/http_rpc.rs | 27 +++++++++++++------------ engine/src/dot/retry_rpc.rs | 16 +++++++++++---- engine/src/dot/rpc.rs | 39 ++++++++++++++++++++++++------------- 3 files changed, 52 insertions(+), 30 deletions(-) diff --git a/engine/src/dot/http_rpc.rs b/engine/src/dot/http_rpc.rs index 16a2e3ca00..497549c1ea 100644 --- a/engine/src/dot/http_rpc.rs +++ b/engine/src/dot/http_rpc.rs @@ -21,6 +21,7 @@ use subxt::{ }; use anyhow::Result; +use tracing::{error, warn}; use utilities::{make_periodic_tick, redact_endpoint_secret::SecretUrl}; use crate::constants::RPC_RETRY_CONNECTION_INTERVAL; @@ -82,7 +83,7 @@ pub struct DotHttpRpcClient { impl DotHttpRpcClient { pub fn new( url: SecretUrl, - expected_genesis_hash: PolkadotHash, + expected_genesis_hash: Option, ) -> Result> { let polkadot_http_client = Arc::new(PolkadotHttpClient::new(&url)?); @@ -98,17 +99,22 @@ impl DotHttpRpcClient { .await { Ok(online_client) => { - let genesis_hash = online_client.genesis_hash(); - if genesis_hash == expected_genesis_hash { - break online_client + if let Some(expected_genesis_hash) = expected_genesis_hash { + let genesis_hash = online_client.genesis_hash(); + if genesis_hash == expected_genesis_hash { + break online_client + } else { + error!( + "Connected to Polkadot node at {url} but the genesis hash {genesis_hash} does not match the expected genesis hash {expected_genesis_hash}. Please check your CFE configuration file." + ) + } } else { - tracing::error!( - "Connected to Polkadot node at {url} but the genesis hash {genesis_hash} does not match the expected genesis hash {expected_genesis_hash}. Please check your CFE configuration file." - ) + warn!("Skipping Polkadot genesis hash check"); + break online_client } }, Err(e) => { - tracing::error!( + error!( "Failed to connect to Polkadot node at {url} with error: {e}. Please check your CFE configuration file. Retrying in {:?}...", poll_interval.period() @@ -213,11 +219,8 @@ mod tests { #[ignore = "requires local node"] #[tokio::test] async fn test_http_rpc() { - // This will no longer work because we need to know the genesis hash let dot_http_rpc = - DotHttpRpcClient::new("http://localhost:9945".into(), PolkadotHash::default()) - .unwrap() - .await; + DotHttpRpcClient::new("http://localhost:9945".into(), None).unwrap().await; let block_hash = dot_http_rpc.block_hash(1).await.unwrap(); println!("block_hash: {:?}", block_hash); } diff --git a/engine/src/dot/retry_rpc.rs b/engine/src/dot/retry_rpc.rs index 913c3acc89..da447a3cf0 100644 --- a/engine/src/dot/retry_rpc.rs +++ b/engine/src/dot/retry_rpc.rs @@ -45,6 +45,15 @@ impl DotRetryRpcClient { scope: &Scope<'_, anyhow::Error>, nodes: NodeContainer, expected_genesis_hash: PolkadotHash, + ) -> Result { + Self::new_inner(scope, nodes, Some(expected_genesis_hash)) + } + + fn new_inner( + scope: &Scope<'_, anyhow::Error>, + nodes: NodeContainer, + // The genesis hash is optional to facilitate testing + expected_genesis_hash: Option, ) -> Result { let f_create_clients = |endpoints: WsHttpEndpoints| { Result::<_, anyhow::Error>::Ok(( @@ -58,7 +67,7 @@ impl DotRetryRpcClient { let (backup_rpc_client, backup_sub_client) = option_inner(nodes.backup.map(f_create_clients).transpose()?); - Ok(Self { + Ok(DotRetryRpcClient { rpc_retry_client: RetrierClient::new( scope, "dot_rpc", @@ -307,8 +316,7 @@ mod tests { async fn my_test() { task_scope(|scope| { async move { - // This will no longer work because we need to know the genesis hash - let dot_retry_rpc_client = DotRetryRpcClient::new( + let dot_retry_rpc_client = DotRetryRpcClient::new_inner( scope, NodeContainer { primary: WsHttpEndpoints { @@ -317,7 +325,7 @@ mod tests { }, backup: None, }, - PolkadotHash::default(), + None, ) .unwrap(); diff --git a/engine/src/dot/rpc.rs b/engine/src/dot/rpc.rs index 81ce4f1541..e36884b5f3 100644 --- a/engine/src/dot/rpc.rs +++ b/engine/src/dot/rpc.rs @@ -12,6 +12,7 @@ use subxt::{ Config, OnlineClient, PolkadotConfig, }; use tokio::sync::RwLock; +use tracing::warn; use utilities::redact_endpoint_secret::SecretUrl; use anyhow::{anyhow, bail, Result}; @@ -136,11 +137,11 @@ impl DotRpcApi for DotRpcClient { #[derive(Clone)] pub struct DotSubClient { pub ws_endpoint: SecretUrl, - expected_genesis_hash: PolkadotHash, + expected_genesis_hash: Option, } impl DotSubClient { - pub fn new(ws_endpoint: SecretUrl, expected_genesis_hash: PolkadotHash) -> Self { + pub fn new(ws_endpoint: SecretUrl, expected_genesis_hash: Option) -> Self { Self { ws_endpoint, expected_genesis_hash } } } @@ -150,12 +151,7 @@ impl DotSubscribeApi for DotSubClient { async fn subscribe_best_heads( &self, ) -> Result> + Send>>> { - let client = OnlineClient::::from_url(&self.ws_endpoint).await?; - - let genesis_hash = client.genesis_hash(); - if genesis_hash != self.expected_genesis_hash { - bail!("Expected genesis hash {} but got {genesis_hash}", self.expected_genesis_hash); - } + let client = create_online_client(&self.ws_endpoint, self.expected_genesis_hash).await?; Ok(Box::pin( client @@ -170,12 +166,7 @@ impl DotSubscribeApi for DotSubClient { async fn subscribe_finalized_heads( &self, ) -> Result> + Send>>> { - let client = OnlineClient::::from_url(&self.ws_endpoint).await?; - - let genesis_hash = client.genesis_hash(); - if genesis_hash != self.expected_genesis_hash { - bail!("Expected genesis hash {} but got {genesis_hash}", self.expected_genesis_hash); - } + let client = create_online_client(&self.ws_endpoint, self.expected_genesis_hash).await?; Ok(Box::pin( client @@ -188,6 +179,26 @@ impl DotSubscribeApi for DotSubClient { } } +/// Creates an OnlineClient from the given websocket endpoint and checks the genesis hash if +/// provided. +async fn create_online_client( + ws_endpoint: &SecretUrl, + expected_genesis_hash: Option, +) -> Result> { + let client = OnlineClient::::from_url(ws_endpoint).await?; + + if let Some(expected_genesis_hash) = expected_genesis_hash { + let genesis_hash = client.genesis_hash(); + if genesis_hash != expected_genesis_hash { + bail!("Expected Polkadot genesis hash {expected_genesis_hash} but got {genesis_hash}"); + } + } else { + warn!("Skipping Polkadot genesis hash check"); + } + + Ok(client) +} + #[async_trait] impl DotSubscribeApi for DotRpcClient { async fn subscribe_best_heads( From 61dbd66dacf80a31236fb0b09e13275de9ae0920 Mon Sep 17 00:00:00 2001 From: Marcello Date: Mon, 25 Sep 2023 11:49:17 +0200 Subject: [PATCH 08/15] Prometheus metric ceremony (#4034) * added macro to create gauges that get deleted * added ceremony_duration metric * fixed gauge to handle convertion to i64 * ceremony missing messages on timeout metric added * added chain label to CEREMONY_PROCESSED_MSG, CEREMONY_DURATION, CEREMONY_TIMEOUT_MISSING_MSG * modified macro to support drop (deletion of labels) on all the types of wrapper, passed as a parameter when calling the macro * added STAGE_DURATION metric * use collect_array * added STAGE_COMPLETING/STAGE_FAILING metrics * avoid saving labels already seen (add to the hashset) if we don't drop the metric * fixed missing imports caused by rebasing * fix double imports * avoid using format! and to_string every time -> use clone() * fixed typo * addressed PR comments * use Option for stage/ceremony _start do conversion inside the constructor add test to check deletion of metrics inside CeremonyMetrics strucs * fixed test * cargo fmt * added manual deletion inside tests and make sure it returns an error -> deletion done before as expected * address review comments --- engine/multisig/src/client/ceremony_runner.rs | 63 ++- .../multisig/src/client/common/broadcast.rs | 30 +- .../src/client/common/ceremony_stage.rs | 15 +- utilities/src/with_std/metrics.rs | 439 ++++++++++++++++-- 4 files changed, 460 insertions(+), 87 deletions(-) diff --git a/engine/multisig/src/client/ceremony_runner.rs b/engine/multisig/src/client/ceremony_runner.rs index 2ff2d0d3af..d2ac89291a 100644 --- a/engine/multisig/src/client/ceremony_runner.rs +++ b/engine/multisig/src/client/ceremony_runner.rs @@ -4,7 +4,7 @@ mod tests; use std::{ collections::{btree_map, BTreeMap, BTreeSet}, pin::Pin, - time::Duration, + time::{Duration, Instant}, }; use anyhow::Result; @@ -15,13 +15,7 @@ use tokio::sync::{ oneshot, }; use tracing::{debug, warn, Instrument}; -use utilities::{ - format_iterator, - metrics::{ - CeremonyBadMsgNotDrop, CeremonyMetrics, CeremonyProcessedMsgDrop, CEREMONY_BAD_MSG, - CEREMONY_PROCESSED_MSG, - }, -}; +use utilities::{format_iterator, metrics::CeremonyMetrics}; use crate::{ client::{ @@ -86,7 +80,7 @@ where // We always create unauthorised first, it can get promoted to // an authorised one with a ceremony request let mut runner = Self::new_unauthorised(outcome_sender, ceremony_id); - + let mut ceremony_start: Option = None; // Fuse the oneshot future so it will not get called twice let mut request_receiver = request_receiver.fuse(); @@ -102,7 +96,7 @@ where request = &mut request_receiver => { let PreparedRequest { initial_stage } = request.expect("Ceremony request channel was dropped unexpectedly"); - + ceremony_start = Some(Instant::now()); if let Some(result) = runner.on_ceremony_request(initial_stage).instrument(span.clone()).await { break result; } @@ -116,7 +110,16 @@ where } } }; - + if let Some(start_instant) = ceremony_start { + let duration = start_instant.elapsed().as_millis(); + runner.metrics.ceremony_duration.set(duration); + tracing::info!( + "Ceremony {} ({}) took {}ms to complete", + Ceremony::CEREMONY_TYPE, + ceremony_id, + duration + ); + } let _result = runner.outcome_sender.send((ceremony_id, outcome)); Ok(()) } @@ -135,13 +138,7 @@ where timeout_handle: Box::pin(tokio::time::sleep(tokio::time::Duration::ZERO)), outcome_sender, _phantom: Default::default(), - metrics: CeremonyMetrics { - processed_messages: CeremonyProcessedMsgDrop::new( - &CEREMONY_PROCESSED_MSG, - [format!("{}", ceremony_id)], - ), - bad_message: CeremonyBadMsgNotDrop::new(&CEREMONY_BAD_MSG, [Chain::NAME]), - }, + metrics: CeremonyMetrics::new(ceremony_id, Chain::NAME, Ceremony::CEREMONY_TYPE), } } @@ -151,7 +148,7 @@ where &mut self, mut initial_stage: DynStage, ) -> OptionalCeremonyReturn { - let single_party_result = initial_stage.init(&self.metrics); + let single_party_result = initial_stage.init(&mut self.metrics); // This function is only ever called from a oneshot channel, // so it should never get called twice. @@ -180,14 +177,15 @@ where .stage .take() .expect("Ceremony must be authorised to finalize any of its stages"); - + let stage_name = stage.get_stage_name().to_string(); let validator_mapping = stage.ceremony_common().validator_mapping.clone(); - match stage.finalize().await { + match stage.finalize(&mut self.metrics).await { StageResult::NextStage(mut next_stage) => { debug!("Ceremony transitions to {}", next_stage.get_stage_name()); + self.metrics.stage_completing.inc(&[&stage_name]); - let single_party_result = next_stage.init(&self.metrics); + let single_party_result = next_stage.init(&mut self.metrics); self.stage = Some(next_stage); @@ -208,10 +206,13 @@ where self.process_delayed().await } }, - StageResult::Error(bad_validators, reason) => - Some(Err((validator_mapping.get_ids(bad_validators), reason))), + StageResult::Error(bad_validators, reason) => { + self.metrics.stage_failing.inc(&[&stage_name, &format!("{:?}", reason)]); + Some(Err((validator_mapping.get_ids(bad_validators), reason))) + }, StageResult::Done(result) => { debug!("Ceremony reached the final stage!"); + self.metrics.stage_completing.inc(&[&stage_name]); Some(Ok(result)) }, @@ -281,7 +282,7 @@ where } if let ProcessMessageResult::Ready = - stage.process_message(sender_idx, data, &self.metrics) + stage.process_message(sender_idx, data, &mut self.metrics) { return self.finalize_current_stage().await } @@ -353,14 +354,10 @@ where stage.get_stage_name(), missing_messages_from_accounts.len() ); - - warn!( - missing_ids = format_iterator(missing_messages_from_accounts).to_string(), - "Ceremony stage {} timed out before all messages collected ({} missing), trying to finalize current stage anyway.", - stage.get_stage_name(), - missing_messages_from_accounts.len() - ); - + let stage_name = stage.get_stage_name().to_string(); + self.metrics + .missing_messages + .set(&[&stage_name], missing_messages_from_accounts.len()); self.finalize_current_stage().await } else { panic!("Unauthorised ceremonies cannot timeout"); diff --git a/engine/multisig/src/client/common/broadcast.rs b/engine/multisig/src/client/common/broadcast.rs index 86faafb527..96efb51702 100644 --- a/engine/multisig/src/client/common/broadcast.rs +++ b/engine/multisig/src/client/common/broadcast.rs @@ -1,21 +1,21 @@ use std::{ collections::{btree_map, BTreeMap}, fmt::Display, + time::Instant, }; use async_trait::async_trait; use cf_primitives::{AuthorityCount, CeremonyId}; use tracing::warn; +use super::ceremony_stage::{CeremonyCommon, CeremonyStage, ProcessMessageResult, StageResult}; use crate::{ client::{ceremony_manager::CeremonyTrait, MultisigMessage}, p2p::{OutgoingMultisigStageMessages, ProtocolVersion, CURRENT_PROTOCOL_VERSION}, }; - -use super::ceremony_stage::{CeremonyCommon, CeremonyStage, ProcessMessageResult, StageResult}; +use utilities::metrics::CeremonyMetrics; pub use super::broadcast_verification::verify_broadcasts_non_blocking; -use utilities::metrics::CeremonyMetrics; /// Used by individual stages to distinguish between /// a public message that should be broadcast to everyone @@ -61,6 +61,7 @@ where /// Determines the actual computations before/after /// the data is collected processor: Stage, + stage_started: Option, } impl BroadcastStage @@ -68,7 +69,7 @@ where Stage: BroadcastStageProcessor, { pub fn new(processor: Stage, common: CeremonyCommon) -> Self { - BroadcastStage { common, messages: BTreeMap::new(), processor } + BroadcastStage { common, messages: BTreeMap::new(), processor, stage_started: None } } } @@ -99,9 +100,9 @@ impl CeremonyStage for BroadcastStage where Stage: BroadcastStageProcessor + Send, { - fn init(&mut self, metrics: &CeremonyMetrics) -> ProcessMessageResult { + fn init(&mut self, metrics: &mut CeremonyMetrics) -> ProcessMessageResult { let common = &self.common; - + self.stage_started = Some(Instant::now()); let idx_to_id = |idx: &AuthorityCount| common.validator_mapping.get_id(*idx).clone(); let (own_message, outgoing_messages) = match self.processor.init() { @@ -158,7 +159,7 @@ where &mut self, signer_idx: AuthorityCount, m: C::Data, - metrics: &CeremonyMetrics, + metrics: &mut CeremonyMetrics, ) -> ProcessMessageResult { metrics.processed_messages.inc(); let m: Stage::Message = match m.try_into() { @@ -209,11 +210,18 @@ where } } - async fn finalize(mut self: Box) -> StageResult { + async fn finalize(mut self: Box, metrics: &mut CeremonyMetrics) -> StageResult { // Because we might want to finalize the stage before // all data has been received (e.g. due to a timeout), // we insert None for any missing data + let stage_name = self.get_stage_name().to_string(); + if let Some(start_instant) = self.stage_started { + metrics + .stage_duration + .set(&[&stage_name, "receiving"], start_instant.elapsed().as_millis()); + } + let process_msg_instant = Instant::now(); let mut received_messages = std::mem::take(&mut self.messages); // Turns values T into Option, inserting `None` where @@ -225,7 +233,11 @@ where .map(|idx| (*idx, received_messages.remove(idx))) .collect(); - self.processor.process(messages).await + let result = self.processor.process(messages).await; + metrics + .stage_duration + .set(&[&stage_name, "processing"], process_msg_instant.elapsed().as_millis()); + result } fn awaited_parties(&self) -> std::collections::BTreeSet { diff --git a/engine/multisig/src/client/common/ceremony_stage.rs b/engine/multisig/src/client/common/ceremony_stage.rs index 5b1e98eebf..646e3d0c1c 100644 --- a/engine/multisig/src/client/common/ceremony_stage.rs +++ b/engine/multisig/src/client/common/ceremony_stage.rs @@ -1,16 +1,15 @@ use std::{collections::BTreeSet, sync::Arc}; -use async_trait::async_trait; -use cf_primitives::{AuthorityCount, CeremonyId}; -use tokio::sync::mpsc::UnboundedSender; -use utilities::metrics::CeremonyMetrics; - use crate::{ client::{ceremony_manager::CeremonyTrait, utils::PartyIdxMapping}, crypto::Rng, p2p::OutgoingMultisigStageMessages, ChainSigning, }; +use async_trait::async_trait; +use cf_primitives::{AuthorityCount, CeremonyId}; +use tokio::sync::mpsc::UnboundedSender; +use utilities::metrics::CeremonyMetrics; /// Outcome of a given ceremony stage pub enum StageResult { @@ -36,7 +35,7 @@ pub enum ProcessMessageResult { #[async_trait] pub trait CeremonyStage { /// Perform initial computation for this stage (and initiate communication with other parties) - fn init(&mut self, metrics: &CeremonyMetrics) -> ProcessMessageResult; + fn init(&mut self, metrics: &mut CeremonyMetrics) -> ProcessMessageResult; /// Process message from signer at index `signer_idx`. Precondition: the signer is a valid /// holder of the key and selected to participate in this ceremony (TODO: also check that @@ -45,12 +44,12 @@ pub trait CeremonyStage { &mut self, signer_idx: AuthorityCount, m: C::Data, - metrics: &CeremonyMetrics, + metrics: &mut CeremonyMetrics, ) -> ProcessMessageResult; /// Verify data for this stage after it is received from all other parties, /// either abort or proceed to the next stage based on the result - async fn finalize(self: Box) -> StageResult; + async fn finalize(self: Box, metrics: &mut CeremonyMetrics) -> StageResult; /// Parties we haven't heard from for the current stage fn awaited_parties(&self) -> BTreeSet; diff --git a/utilities/src/with_std/metrics.rs b/utilities/src/with_std/metrics.rs index be7e1d8d13..ca461a8f8c 100644 --- a/utilities/src/with_std/metrics.rs +++ b/utilities/src/with_std/metrics.rs @@ -3,6 +3,7 @@ //! Returns the metrics encoded in a prometheus format //! Method returns a Sender, allowing graceful termination of the infinite loop use super::{super::Port, task_scope}; +use crate::ArrayCollect; use async_channel::{unbounded, Receiver, Sender}; use lazy_static; use prometheus::{ @@ -11,7 +12,7 @@ use prometheus::{ IntCounterVec, IntGauge, IntGaugeVec, Opts, Registry, }; use serde::Deserialize; -use std::net::IpAddr; +use std::{collections::HashSet, net::IpAddr}; use tracing::info; use warp::Filter; @@ -162,32 +163,166 @@ macro_rules! build_counter_vec { } } } -/// The idea behind this macro is to help to create the wrapper for the metrics at compile time, -/// without having to specify number of labels etc, but still enforcing the correct use of the -/// metric there are 2 possibilities here: -/// - a metric with some const labels value -> these metrics get created specifying the const label -/// values and when used we need to specify the value for the other labels these metrics are kept -/// around even after being dropped, these type of metrics are used because -/// it simplify referring some values at runtime which wouldn't be available otherwise -/// - a metric with no const values -> these metrics are created with all the necessary labels -/// supplied, when interacting with them we don't have to specify any labels anymore when these -/// metrics go out of scope and get dropped the label combination is also deleted (we -/// won't refer to that specific combination ever again) + +macro_rules! build_gauge_vec_struct { + ($metric_ident:ident, $struct_ident:ident, $name:literal, $help:literal, $drop:expr, $labels:tt) => { + build_gauge_vec!($metric_ident, $name, $help, $labels); + + #[derive(Clone)] + pub struct $struct_ident { + metric: &'static $metric_ident, + labels: [String; { $labels.len() }], + drop: bool, + } + impl $struct_ident { + pub fn new( + metric: &'static $metric_ident, + labels: [String; { $labels.len() }], + ) -> $struct_ident { + $struct_ident { metric, labels, drop: $drop } + } + + pub fn inc(&self) { + let labels = self.labels.each_ref().map(|s| s.as_str()); + self.metric.inc(&labels); + } + + pub fn dec(&self) { + let labels = self.labels.each_ref().map(|s| s.as_str()); + self.metric.dec(&labels); + } + + pub fn set>(&self, val: T) + where + >::Error: std::fmt::Debug, + { + let labels = self.labels.each_ref().map(|s| s.as_str()); + self.metric.set(&labels, val); + } + } + impl Drop for $struct_ident { + fn drop(&mut self) { + if self.drop { + let metric = self.metric.prom_metric.clone(); + let labels: Vec = self.labels.to_vec(); + + DELETE_METRIC_CHANNEL + .0 + .try_send(DeleteMetricCommand::GaugePair(metric, labels)) + .expect("DELETE_METRIC_CHANNEL should never be closed!"); + } + } + } + }; + ($metric_ident:ident, $struct_ident:ident, $name:literal, $help:literal, $drop:expr, $labels:tt, $const_labels:tt) => { + build_gauge_vec!($metric_ident, $name, $help, $labels); + + #[derive(Clone)] + pub struct $struct_ident { + metric: &'static $metric_ident, + const_labels: [String; { $const_labels.len() }], + non_const_labels_used: HashSet<[String; { $labels.len() - $const_labels.len() }]>, + drop: bool, + } + impl $struct_ident { + pub fn new( + metric: &'static $metric_ident, + const_labels: [String; { $const_labels.len() }], + ) -> $struct_ident { + $struct_ident { + metric, + const_labels, + non_const_labels_used: HashSet::new(), + drop: $drop, + } + } + + pub fn inc( + &mut self, + non_const_labels: &[&str; { $labels.len() - $const_labels.len() }], + ) { + if self.drop { + self.non_const_labels_used.insert(non_const_labels.map(|s| s.to_string())); + } + let labels: [&str; { $labels.len() }] = self + .const_labels + .iter() + .map(|s| s.as_str()) + .chain(*non_const_labels) + .collect_array(); + self.metric.inc(&labels); + } + + pub fn dec( + &mut self, + non_const_labels: &[&str; { $labels.len() - $const_labels.len() }], + ) { + if self.drop { + self.non_const_labels_used.insert(non_const_labels.map(|s| s.to_string())); + } + let labels: [&str; { $labels.len() }] = self + .const_labels + .iter() + .map(|s| s.as_str()) + .chain(*non_const_labels) + .collect_array(); + self.metric.dec(&labels); + } + + pub fn set>( + &mut self, + non_const_labels: &[&str; { $labels.len() - $const_labels.len() }], + val: T, + ) where + >::Error: std::fmt::Debug, + { + if self.drop { + self.non_const_labels_used.insert(non_const_labels.map(|s| s.to_string())); + } + let labels: [&str; { $labels.len() }] = self + .const_labels + .iter() + .map(|s| s.as_str()) + .chain(*non_const_labels) + .collect_array(); + self.metric.set(&labels, val); + } + } + impl Drop for $struct_ident { + fn drop(&mut self) { + if self.drop { + let metric = self.metric.prom_metric.clone(); + let labels: Vec = self.const_labels.to_vec(); + for non_const_labels in self.non_const_labels_used.drain() { + let mut final_labels = labels.clone(); + final_labels.append(&mut non_const_labels.to_vec()); + DELETE_METRIC_CHANNEL + .0 + .try_send(DeleteMetricCommand::GaugePair(metric.clone(), final_labels)) + .expect("DELETE_METRIC_CHANNEL should never be closed!"); + } + } + } + } + }; +} + macro_rules! build_counter_vec_struct { - ($metric_ident:ident, $struct_ident:ident, $name:literal, $help:literal, $labels:tt) => { + ($metric_ident:ident, $struct_ident:ident, $name:literal, $help:literal, $drop:expr, $labels:tt) => { build_counter_vec!($metric_ident, $name, $help, $labels); #[derive(Clone)] pub struct $struct_ident { metric: &'static $metric_ident, labels: [String; { $labels.len() }], + drop: bool, } impl $struct_ident { pub fn new( metric: &'static $metric_ident, labels: [String; { $labels.len() }], ) -> $struct_ident { - $struct_ident { metric, labels } + $struct_ident { metric, labels, drop: $drop } } pub fn inc(&self) { @@ -197,43 +332,76 @@ macro_rules! build_counter_vec_struct { } impl Drop for $struct_ident { fn drop(&mut self) { - let metric = self.metric.prom_metric.clone(); - let labels: Vec = self.labels.iter().map(|s| s.to_string()).collect(); - - DELETE_METRIC_CHANNEL - .0 - .try_send(DeleteMetricCommand::CounterPair(metric, labels)) - .expect("DELETE_METRIC_CHANNEL should never be closed!"); + if self.drop { + let metric = self.metric.prom_metric.clone(); + let labels: Vec = self.labels.to_vec(); + + DELETE_METRIC_CHANNEL + .0 + .try_send(DeleteMetricCommand::CounterPair(metric, labels)) + .expect("DELETE_METRIC_CHANNEL should never be closed!"); + } } } }; - ($metric_ident:ident, $structNotDrop:ident, $name:literal, $help:literal, $labels:tt, $const_labels:tt) => { + ($metric_ident:ident, $struct_ident:ident, $name:literal, $help:literal, $drop:expr, $labels:tt, $const_labels:tt) => { build_counter_vec!($metric_ident, $name, $help, $labels); #[derive(Clone)] - pub struct $structNotDrop { + pub struct $struct_ident { metric: &'static $metric_ident, - const_labels: [&'static str; { $const_labels.len() }], + const_labels: [String; { $const_labels.len() }], + non_const_labels_used: HashSet<[String; { $labels.len() - $const_labels.len() }]>, + drop: bool, } - impl $structNotDrop { + impl $struct_ident { pub fn new( metric: &'static $metric_ident, - const_labels: [&'static str; { $const_labels.len() }], - ) -> $structNotDrop { - $structNotDrop { metric, const_labels } + const_labels: [String; { $const_labels.len() }], + ) -> $struct_ident { + $struct_ident { + metric, + const_labels, + drop: $drop, + non_const_labels_used: HashSet::new(), + } } - pub fn inc(&self, non_const_labels: &[&str; { $labels.len() - $const_labels.len() }]) { - let labels: [&str; { $labels.len() }] = { - let mut whole: [&str; { $labels.len() }] = [""; { $labels.len() }]; - let (one, two) = whole.split_at_mut(self.const_labels.len()); - one.copy_from_slice(&self.const_labels); - two.copy_from_slice(non_const_labels); - whole - }; + pub fn inc( + &mut self, + non_const_labels: &[&str; { $labels.len() - $const_labels.len() }], + ) { + if self.drop { + self.non_const_labels_used.insert(non_const_labels.map(|s| s.to_string())); + } + let labels: [&str; { $labels.len() }] = self + .const_labels + .iter() + .map(|s| s.as_str()) + .chain(*non_const_labels) + .collect_array(); self.metric.inc(&labels); } } + impl Drop for $struct_ident { + fn drop(&mut self) { + if self.drop { + let metric = self.metric.prom_metric.clone(); + let labels: Vec = self.const_labels.to_vec(); + for non_const_labels in self.non_const_labels_used.drain() { + let mut final_labels = labels.clone(); + final_labels.append(&mut non_const_labels.to_vec()); + DELETE_METRIC_CHANNEL + .0 + .try_send(DeleteMetricCommand::CounterPair( + metric.clone(), + final_labels, + )) + .expect("DELETE_METRIC_CHANNEL should never be closed!"); + } + } + } + } }; } @@ -284,22 +452,101 @@ build_counter_vec_struct!( CeremonyProcessedMsgDrop, "ceremony_msg", "Count all the processed messages for a given ceremony", - ["ceremony_id"] + true, + ["chain", "ceremony_id", "ceremony_type"] ); build_counter_vec_struct!( CEREMONY_BAD_MSG, CeremonyBadMsgNotDrop, "ceremony_bad_msg", "Count all the bad msgs processed during a ceremony", + false, ["chain", "reason"], ["chain"] //const labels ); +build_gauge_vec_struct!( + CEREMONY_DURATION, + CeremonyDurationDrop, + "ceremony_duration", + "Measure the duration of a ceremony in ms", + true, + ["chain", "ceremony_id", "ceremony_type"] +); +build_gauge_vec_struct!( + CEREMONY_TIMEOUT_MISSING_MSG, + CeremonyTimeoutMissingMsgDrop, + "ceremony_timeout_missing_msg", + "Measure the number of missing messages when reaching timeout", + true, + ["chain", "ceremony_id", "ceremony_type", "stage"], + ["chain", "ceremony_id", "ceremony_type"] +); +build_gauge_vec_struct!( + STAGE_DURATION, + StageDurationDrop, + "stage_duration", + "Measure the duration of a stage in ms", + true, + ["chain", "ceremony_id", "stage", "phase"], //phase can be either receiving or processing + ["chain", "ceremony_id"] +); +build_counter_vec_struct!( + STAGE_FAILING, + StageFailingNotDrop, + "stage_failing", + "Count the number of stages which are failing with the cause of the failure attached", + false, + ["chain", "stage", "reason"], + ["chain"] +); +build_counter_vec_struct!( + STAGE_COMPLETING, + StageCompletingNotDrop, + "stage_completing", + "Count the number of stages which are completing successfully", + false, + ["chain", "stage"], + ["chain"] +); /// structure containing the metrics used during a ceremony #[derive(Clone)] pub struct CeremonyMetrics { pub processed_messages: CeremonyProcessedMsgDrop, pub bad_message: CeremonyBadMsgNotDrop, + pub ceremony_duration: CeremonyDurationDrop, + pub missing_messages: CeremonyTimeoutMissingMsgDrop, + pub stage_duration: StageDurationDrop, + pub stage_failing: StageFailingNotDrop, + pub stage_completing: StageCompletingNotDrop, +} +impl CeremonyMetrics { + pub fn new(ceremony_id: u64, chain_name: &str, ceremony_type: &str) -> Self { + let ceremony_id = ceremony_id.to_string(); + let chain_name = chain_name.to_string(); + let ceremony_type = ceremony_type.to_string(); + CeremonyMetrics { + processed_messages: CeremonyProcessedMsgDrop::new( + &CEREMONY_PROCESSED_MSG, + [chain_name.clone(), ceremony_id.clone(), ceremony_type.clone()], + ), + bad_message: CeremonyBadMsgNotDrop::new(&CEREMONY_BAD_MSG, [chain_name.clone()]), + ceremony_duration: CeremonyDurationDrop::new( + &CEREMONY_DURATION, + [chain_name.clone(), ceremony_id.clone(), ceremony_type.clone()], + ), + missing_messages: CeremonyTimeoutMissingMsgDrop::new( + &CEREMONY_TIMEOUT_MISSING_MSG, + [chain_name.clone(), ceremony_id.clone(), ceremony_type], + ), + stage_duration: StageDurationDrop::new( + &STAGE_DURATION, + [chain_name.clone(), ceremony_id], + ), + stage_failing: StageFailingNotDrop::new(&STAGE_FAILING, [chain_name.clone()]), + stage_completing: StageCompletingNotDrop::new(&STAGE_COMPLETING, [chain_name]), + } + } } #[tracing::instrument(name = "prometheus-metric", skip_all)] @@ -405,6 +652,101 @@ mod test { request_test("metrics", reqwest::StatusCode::OK, "# HELP test test help\n# TYPE test counter\ntest{label=\"B\"} 10\ntest{label=\"C\"} 100\n").await; request_test("metrics", reqwest::StatusCode::OK, "# HELP test test help\n# TYPE test counter\ntest{label=\"B\"} 10\n").await; + REGISTRY.unregister(Box::new(metric)).unwrap(); + request_test("metrics", reqwest::StatusCode::OK, "").await; + + + //test CeremonyMetrics correct deletion + + //we create the ceremony struct and put some metrics in it + { + let mut metrics = CeremonyMetrics::new(7, "Chain1", "Keygen"); + metrics.bad_message.inc(&["AA"]); + metrics.ceremony_duration.set(999); + metrics.missing_messages.set(&["stage1",], 5); + metrics.processed_messages.inc(); + metrics.processed_messages.inc(); + metrics.stage_completing.inc(&["stage1"]); + metrics.stage_completing.inc(&["stage1"]); + metrics.stage_completing.inc(&["stage2"]); + metrics.stage_duration.set(&["stage1", "receiving"], 780); + metrics.stage_duration.set(&["stage1", "processing"], 78); + metrics.stage_failing.inc(&["stage3", "NotEnoughMessages"]); + + //This request does nothing, the ceremony is still ongoning so there is no deletion + request_test("metrics", reqwest::StatusCode::OK, +r#"# HELP ceremony_bad_msg Count all the bad msgs processed during a ceremony +# TYPE ceremony_bad_msg counter +ceremony_bad_msg{chain="Chain1",reason="AA"} 1 +# HELP ceremony_duration Measure the duration of a ceremony in ms +# TYPE ceremony_duration gauge +ceremony_duration{ceremony_id="7",ceremony_type="Keygen",chain="Chain1"} 999 +# HELP ceremony_msg Count all the processed messages for a given ceremony +# TYPE ceremony_msg counter +ceremony_msg{ceremony_id="7",ceremony_type="Keygen",chain="Chain1"} 2 +# HELP ceremony_timeout_missing_msg Measure the number of missing messages when reaching timeout +# TYPE ceremony_timeout_missing_msg gauge +ceremony_timeout_missing_msg{ceremony_id="7",ceremony_type="Keygen",chain="Chain1",stage="stage1"} 5 +# HELP stage_completing Count the number of stages which are completing successfully +# TYPE stage_completing counter +stage_completing{chain="Chain1",stage="stage1"} 2 +stage_completing{chain="Chain1",stage="stage2"} 1 +# HELP stage_duration Measure the duration of a stage in ms +# TYPE stage_duration gauge +stage_duration{ceremony_id="7",chain="Chain1",phase="processing",stage="stage1"} 78 +stage_duration{ceremony_id="7",chain="Chain1",phase="receiving",stage="stage1"} 780 +# HELP stage_failing Count the number of stages which are failing with the cause of the failure attached +# TYPE stage_failing counter +stage_failing{chain="Chain1",reason="NotEnoughMessages",stage="stage3"} 1 +"#).await; + + //End of ceremony + //struct gets dropped + } + + //First request after the ceremony ended we get all the metrics (same as the request above), and after we delete the ones that have no more reason to exists + request_test("metrics", reqwest::StatusCode::OK, +r#"# HELP ceremony_bad_msg Count all the bad msgs processed during a ceremony +# TYPE ceremony_bad_msg counter +ceremony_bad_msg{chain="Chain1",reason="AA"} 1 +# HELP ceremony_duration Measure the duration of a ceremony in ms +# TYPE ceremony_duration gauge +ceremony_duration{ceremony_id="7",ceremony_type="Keygen",chain="Chain1"} 999 +# HELP ceremony_msg Count all the processed messages for a given ceremony +# TYPE ceremony_msg counter +ceremony_msg{ceremony_id="7",ceremony_type="Keygen",chain="Chain1"} 2 +# HELP ceremony_timeout_missing_msg Measure the number of missing messages when reaching timeout +# TYPE ceremony_timeout_missing_msg gauge +ceremony_timeout_missing_msg{ceremony_id="7",ceremony_type="Keygen",chain="Chain1",stage="stage1"} 5 +# HELP stage_completing Count the number of stages which are completing successfully +# TYPE stage_completing counter +stage_completing{chain="Chain1",stage="stage1"} 2 +stage_completing{chain="Chain1",stage="stage2"} 1 +# HELP stage_duration Measure the duration of a stage in ms +# TYPE stage_duration gauge +stage_duration{ceremony_id="7",chain="Chain1",phase="processing",stage="stage1"} 78 +stage_duration{ceremony_id="7",chain="Chain1",phase="receiving",stage="stage1"} 780 +# HELP stage_failing Count the number of stages which are failing with the cause of the failure attached +# TYPE stage_failing counter +stage_failing{chain="Chain1",reason="NotEnoughMessages",stage="stage3"} 1 +"#).await; + + //Second request we get only the metrics which don't depend on a specific label like ceremony_id + request_test("metrics", reqwest::StatusCode::OK, +r#"# HELP ceremony_bad_msg Count all the bad msgs processed during a ceremony +# TYPE ceremony_bad_msg counter +ceremony_bad_msg{chain="Chain1",reason="AA"} 1 +# HELP stage_completing Count the number of stages which are completing successfully +# TYPE stage_completing counter +stage_completing{chain="Chain1",stage="stage1"} 2 +stage_completing{chain="Chain1",stage="stage2"} 1 +# HELP stage_failing Count the number of stages which are failing with the cause of the failure attached +# TYPE stage_failing counter +stage_failing{chain="Chain1",reason="NotEnoughMessages",stage="stage3"} 1 +"#).await; + + check_deleted_metrics(); + Ok(()) } .boxed() @@ -430,4 +772,27 @@ mod test { metric } + + fn check_deleted_metrics() { + assert!(STAGE_DURATION + .prom_metric + .remove_label_values(&["Chain1", "7", "stage1", "receiving"]) + .is_err()); + assert!(STAGE_DURATION + .prom_metric + .remove_label_values(&["Chain1", "7", "stage1", "processing"]) + .is_err()); + assert!(CEREMONY_TIMEOUT_MISSING_MSG + .prom_metric + .remove_label_values(&["Chain1", "7", "Keygen", "stage1"]) + .is_err()); + assert!(CEREMONY_DURATION + .prom_metric + .remove_label_values(&["Chain1", "7", "Keygen"]) + .is_err()); + assert!(CEREMONY_PROCESSED_MSG + .prom_metric + .remove_label_values(&["Chain1", "7", "Keygen"]) + .is_err()); + } } From f872142a83c4fc789ff3dd8ee85e52387b70354c Mon Sep 17 00:00:00 2001 From: Alastair Holmes <42404303+AlastairHolmes@users.noreply.github.com> Date: Mon, 25 Sep 2023 16:52:13 +0200 Subject: [PATCH 09/15] feat: speedy scc (PRO-777 PRO-593) (#3986) * disable gap filling * resubmit_window * next_account_nonce rpc squash * use next_account_nonce * New InBlock interface changes only * submission ids * submit with watch stream * check if submission in block * Cache blocks inside SubmissionWatcher * comments * use until_in_block inside api code * fix * fix test * fix for real * logs --- Cargo.lock | 1 + api/lib/src/lib.rs | 21 +- api/lib/src/lp.rs | 16 +- engine/Cargo.toml | 1 + .../client/base_rpc_api.rs | 39 +- .../client/error_decoder.rs | 2 +- .../client/extrinsic_api/signed.rs | 168 ++++--- .../signed/submission_watcher.rs | 411 ++++++++++++------ .../signed/submission_watcher/tests.rs | 196 +++------ engine/src/state_chain_observer/client/mod.rs | 16 +- .../state_chain_observer/sc_observer/tests.rs | 21 +- 11 files changed, 543 insertions(+), 349 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 68ea096442..49d92053d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1504,6 +1504,7 @@ dependencies = [ "rlp", "rocksdb", "sc-rpc-api", + "sc-transaction-pool-api", "scale-info", "secp256k1 0.27.0", "serde", diff --git a/api/lib/src/lib.rs b/api/lib/src/lib.rs index fcbfd933ee..7e90b35632 100644 --- a/api/lib/src/lib.rs +++ b/api/lib/src/lib.rs @@ -43,7 +43,8 @@ pub use chainflip_engine::settings; pub use chainflip_node::chain_spec::use_chainflip_account_id_encoding; use chainflip_engine::state_chain_observer::client::{ - base_rpc_api::BaseRpcClient, DefaultRpcClient, StateChainClient, + base_rpc_api::BaseRpcClient, extrinsic_api::signed::UntilInBlock, DefaultRpcClient, + StateChainClient, }; use utilities::{clean_hex_address, task_scope::Scope}; @@ -178,7 +179,7 @@ pub trait OperatorApi: SignedExtrinsicApi + RotateSessionKeysApi + AuctionPhaseA let (tx_hash, ..) = self .submit_signed_extrinsic(pallet_cf_funding::Call::redeem { amount, address, executor }) .await - .until_finalized() + .until_in_block() .await?; Ok(tx_hash) @@ -188,7 +189,7 @@ pub trait OperatorApi: SignedExtrinsicApi + RotateSessionKeysApi + AuctionPhaseA let (tx_hash, ..) = self .submit_signed_extrinsic(pallet_cf_funding::Call::bind_redeem_address { address }) .await - .until_finalized() + .until_in_block() .await?; Ok(tx_hash) @@ -210,7 +211,7 @@ pub trait OperatorApi: SignedExtrinsicApi + RotateSessionKeysApi + AuctionPhaseA let (tx_hash, ..) = self .submit_signed_extrinsic(call) .await - .until_finalized() + .until_in_block() .await .context("Could not register account role for account")?; Ok(tx_hash) @@ -233,7 +234,7 @@ pub trait OperatorApi: SignedExtrinsicApi + RotateSessionKeysApi + AuctionPhaseA proof: [0; 1].to_vec(), }) .await - .until_finalized() + .until_in_block() .await?; Ok(tx_hash) @@ -243,7 +244,7 @@ pub trait OperatorApi: SignedExtrinsicApi + RotateSessionKeysApi + AuctionPhaseA let (tx_hash, ..) = self .submit_signed_extrinsic(pallet_cf_funding::Call::stop_bidding {}) .await - .until_finalized() + .until_in_block() .await .context("Could not stop bidding")?; println!("Account stopped bidding, in tx {tx_hash:#x}."); @@ -254,7 +255,7 @@ pub trait OperatorApi: SignedExtrinsicApi + RotateSessionKeysApi + AuctionPhaseA let (tx_hash, ..) = self .submit_signed_extrinsic(pallet_cf_funding::Call::start_bidding {}) .await - .until_finalized() + .until_in_block() .await .context("Could not start bidding")?; println!("Account started bidding at tx {tx_hash:#x}."); @@ -272,7 +273,7 @@ pub trait OperatorApi: SignedExtrinsicApi + RotateSessionKeysApi + AuctionPhaseA name: name.as_bytes().to_vec(), }) .await - .until_finalized() + .until_in_block() .await .context("Could not set vanity name for your account")?; println!("Vanity name set at tx {tx_hash:#x}."); @@ -295,7 +296,7 @@ pub trait GovernanceApi: SignedExtrinsicApi { execution: ExecutionMode::Automatic, }) .await - .until_finalized() + .until_in_block() .await .context("Failed to submit rotation governance proposal")?; @@ -331,7 +332,7 @@ pub trait BrokerApi: SignedExtrinsicApi { channel_metadata, }) .await - .until_finalized() + .until_in_block() .await?; if let Some(state_chain_runtime::RuntimeEvent::Swapping( diff --git a/api/lib/src/lp.rs b/api/lib/src/lp.rs index a2116f5cd0..5abd815812 100644 --- a/api/lib/src/lp.rs +++ b/api/lib/src/lp.rs @@ -9,7 +9,7 @@ pub use cf_amm::{ use cf_chains::address::EncodedAddress; use cf_primitives::{Asset, AssetAmount, EgressId}; use chainflip_engine::state_chain_observer::client::{ - extrinsic_api::signed::{SignedExtrinsicApi, UntilFinalized}, + extrinsic_api::signed::{SignedExtrinsicApi, UntilInBlock}, StateChainClient, }; use pallet_cf_pools::{AssetAmounts, IncreaseOrDecrease, OrderId, RangeOrderSize}; @@ -105,7 +105,7 @@ pub trait LpApi: SignedExtrinsicApi { pallet_cf_lp::Call::register_liquidity_refund_address { address }, )) .await - .until_finalized() + .until_in_block() .await .context("Registration for Liquidity Refund Address failed.")?; Ok(tx_hash) @@ -117,7 +117,7 @@ pub trait LpApi: SignedExtrinsicApi { asset, }) .await - .until_finalized() + .until_in_block() .await?; Ok(events @@ -144,7 +144,7 @@ pub trait LpApi: SignedExtrinsicApi { destination_address, }) .await - .until_finalized() + .until_in_block() .await?; Ok(events @@ -178,7 +178,7 @@ pub trait LpApi: SignedExtrinsicApi { size, }) .await - .until_finalized() + .until_in_block() .await?; Ok(collect_range_order_returns(events)) @@ -202,7 +202,7 @@ pub trait LpApi: SignedExtrinsicApi { size, }) .await - .until_finalized() + .until_in_block() .await?; Ok(collect_range_order_returns(events)) @@ -228,7 +228,7 @@ pub trait LpApi: SignedExtrinsicApi { amount, }) .await - .until_finalized() + .until_in_block() .await?; Ok(collect_limit_order_returns(events)) @@ -252,7 +252,7 @@ pub trait LpApi: SignedExtrinsicApi { sell_amount, }) .await - .until_finalized() + .until_in_block() .await?; Ok(collect_limit_order_returns(events)) diff --git a/engine/Cargo.toml b/engine/Cargo.toml index 117e68239d..1e7e57d468 100644 --- a/engine/Cargo.toml +++ b/engine/Cargo.toml @@ -118,6 +118,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", features = [ frame-support = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } frame-system = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } sc-rpc-api = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } +sc-transaction-pool-api = { git = "https://github.com/chainflip-io/substrate.git", tag = 'chainflip-monthly-2023-08+2' } scale-info = { version = "2.5.0", features = ["derive"] } sp-core = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } sp-rpc = { git = "https://github.com/chainflip-io/substrate.git", tag = "chainflip-monthly-2023-08+2" } diff --git a/engine/src/state_chain_observer/client/base_rpc_api.rs b/engine/src/state_chain_observer/client/base_rpc_api.rs index 0c13b7aa86..8589f88df5 100644 --- a/engine/src/state_chain_observer/client/base_rpc_api.rs +++ b/engine/src/state_chain_observer/client/base_rpc_api.rs @@ -3,9 +3,10 @@ use async_trait::async_trait; use cf_amm::{common::Tick, range_orders::Liquidity}; use cf_primitives::Asset; use jsonrpsee::core::{ - client::{ClientT, SubscriptionClientT}, + client::{ClientT, Subscription, SubscriptionClientT}, RpcResult, }; +use sc_transaction_pool_api::TransactionStatus; use sp_core::{ storage::{StorageData, StorageKey}, Bytes, @@ -62,6 +63,10 @@ impl< state_chain_runtime::Hash, state_chain_runtime::Header, state_chain_runtime::SignedBlock, + > + substrate_frame_rpc_system::SystemApiClient< + state_chain_runtime::Block, + state_chain_runtime::AccountId, + state_chain_runtime::Nonce, >, > RawRpcApi for T { @@ -78,11 +83,21 @@ impl< pub trait BaseRpcApi { async fn health(&self) -> RpcResult; + async fn next_account_nonce( + &self, + account_id: state_chain_runtime::AccountId, + ) -> RpcResult; + async fn submit_extrinsic( &self, extrinsic: state_chain_runtime::UncheckedExtrinsic, ) -> RpcResult; + async fn submit_and_watch_extrinsic( + &self, + extrinsic: state_chain_runtime::UncheckedExtrinsic, + ) -> RpcResult>>; + async fn storage( &self, block_hash: state_chain_runtime::Hash, @@ -111,9 +126,7 @@ pub trait BaseRpcApi { async fn subscribe_finalized_block_headers( &self, - ) -> RpcResult< - jsonrpsee::core::client::Subscription>, - >; + ) -> RpcResult>>; async fn runtime_version(&self) -> RpcResult; @@ -154,6 +167,13 @@ impl BaseRpcApi for BaseRpcClient RpcResult { + self.raw_rpc_client.nonce(account_id).await + } + async fn submit_extrinsic( &self, extrinsic: state_chain_runtime::UncheckedExtrinsic, @@ -161,6 +181,13 @@ impl BaseRpcApi for BaseRpcClient RpcResult>> { + self.raw_rpc_client.watch_extrinsic(Bytes::from(extrinsic.encode())).await + } + async fn storage( &self, block_hash: state_chain_runtime::Hash, @@ -205,9 +232,7 @@ impl BaseRpcApi for BaseRpcClient RpcResult< - jsonrpsee::core::client::Subscription>, - > { + ) -> RpcResult>> { self.raw_rpc_client.subscribe_finalized_heads().await } diff --git a/engine/src/state_chain_observer/client/error_decoder.rs b/engine/src/state_chain_observer/client/error_decoder.rs index 8a68791bff..0606d4563f 100644 --- a/engine/src/state_chain_observer/client/error_decoder.rs +++ b/engine/src/state_chain_observer/client/error_decoder.rs @@ -94,7 +94,7 @@ impl ErrorDecoder { } } -#[derive(Error, Debug)] +#[derive(Error, Debug, Clone)] pub enum DispatchError { #[error("{0:?}")] DispatchError(sp_runtime::DispatchError), diff --git a/engine/src/state_chain_observer/client/extrinsic_api/signed.rs b/engine/src/state_chain_observer/client/extrinsic_api/signed.rs index e487e68833..2afe7ef2ff 100644 --- a/engine/src/state_chain_observer/client/extrinsic_api/signed.rs +++ b/engine/src/state_chain_observer/client/extrinsic_api/signed.rs @@ -4,11 +4,12 @@ use anyhow::{bail, Result}; use async_trait::async_trait; use cf_primitives::AccountRole; use futures::StreamExt; +use futures_util::FutureExt; use sp_core::H256; use state_chain_runtime::AccountId; use tokio::sync::{mpsc, oneshot}; -use tracing::{debug, trace, warn}; -use utilities::task_scope::{Scope, ScopedJoinHandle, OR_CANCEL}; +use tracing::{trace, warn}; +use utilities::task_scope::{task_scope, Scope, ScopedJoinHandle, OR_CANCEL}; use crate::constants::SIGNED_EXTRINSIC_LIFETIME; @@ -24,18 +25,51 @@ mod submission_watcher; #[cfg_attr(test, mockall::automock)] #[async_trait] pub trait UntilFinalized { - async fn until_finalized(self) -> submission_watcher::ExtrinsicResult; + async fn until_finalized(self) -> submission_watcher::FinalizationResult; } #[async_trait] impl UntilFinalized for (state_chain_runtime::Hash, W) { - async fn until_finalized(self) -> submission_watcher::ExtrinsicResult { + async fn until_finalized(self) -> submission_watcher::FinalizationResult { self.1.until_finalized().await } } -pub struct UntilFinalizedFuture(oneshot::Receiver); +#[async_trait] +impl UntilFinalized for (T, W) { + async fn until_finalized(self) -> submission_watcher::FinalizationResult { + self.1.until_finalized().await + } +} + +pub struct UntilFinalizedFuture(oneshot::Receiver); #[async_trait] impl UntilFinalized for UntilFinalizedFuture { - async fn until_finalized(self) -> submission_watcher::ExtrinsicResult { + async fn until_finalized(self) -> submission_watcher::FinalizationResult { + self.0.await.expect(OR_CANCEL) + } +} + +// Wrapper type to avoid await.await on submits/finalize calls being possible +#[cfg_attr(test, mockall::automock)] +#[async_trait] +pub trait UntilInBlock { + async fn until_in_block(self) -> submission_watcher::InBlockResult; +} +#[async_trait] +impl UntilInBlock for (state_chain_runtime::Hash, W) { + async fn until_in_block(self) -> submission_watcher::InBlockResult { + self.1.until_in_block().await + } +} +#[async_trait] +impl UntilInBlock for (W, T) { + async fn until_in_block(self) -> submission_watcher::InBlockResult { + self.0.until_in_block().await + } +} +pub struct UntilInBlockFuture(oneshot::Receiver); +#[async_trait] +impl UntilInBlock for UntilInBlockFuture { + async fn until_in_block(self) -> submission_watcher::InBlockResult { self.0.await.expect(OR_CANCEL) } } @@ -44,10 +78,14 @@ impl UntilFinalized for UntilFinalizedFuture { #[async_trait] pub trait SignedExtrinsicApi { type UntilFinalizedFuture: UntilFinalized + Send; + type UntilInBlockFuture: UntilInBlock + Send; fn account_id(&self) -> AccountId; - async fn submit_signed_extrinsic(&self, call: Call) -> (H256, Self::UntilFinalizedFuture) + async fn submit_signed_extrinsic( + &self, + call: Call, + ) -> (H256, (Self::UntilInBlockFuture, Self::UntilFinalizedFuture)) where Call: Into + Clone @@ -56,7 +94,10 @@ pub trait SignedExtrinsicApi { + Sync + 'static; - async fn finalize_signed_extrinsic(&self, call: Call) -> Self::UntilFinalizedFuture + async fn finalize_signed_extrinsic( + &self, + call: Call, + ) -> (Self::UntilInBlockFuture, Self::UntilFinalizedFuture) where Call: Into + Clone @@ -70,7 +111,8 @@ pub struct SignedExtrinsicClient { account_id: AccountId, request_sender: mpsc::Sender<( state_chain_runtime::RuntimeCall, - oneshot::Sender, + oneshot::Sender, + oneshot::Sender, submission_watcher::RequestStrategy, )>, _task_handle: ScopedJoinHandle<()>, @@ -137,62 +179,36 @@ impl SignedExtrinsicClient { _task_handle: scope.spawn_with_handle({ let mut state_chain_stream = state_chain_stream.clone(); - async move { - let (mut submission_watcher, mut requests) = submission_watcher::SubmissionWatcher::new( - signer, - account_nonce, - state_chain_stream.cache().block_hash, - state_chain_stream.cache().block_number, - base_rpc_client.runtime_version().await?, - genesis_hash, - SIGNED_EXTRINSIC_LIFETIME, - base_rpc_client.clone() - ); + task_scope(move |scope| async move { + let (mut submission_watcher, mut requests) = + submission_watcher::SubmissionWatcher::new( + scope, + signer, + account_nonce, + state_chain_stream.cache().block_hash, + state_chain_stream.cache().block_number, + base_rpc_client.runtime_version().await?, + genesis_hash, + SIGNED_EXTRINSIC_LIFETIME, + base_rpc_client.clone(), + ); utilities::loop_select! { - if let Some((call, result_sender, strategy)) = request_receiver.recv() => { - submission_watcher.new_request(&mut requests, call, result_sender, strategy).await?; + if let Some((call, until_in_block_sender, until_finalized_sender, strategy)) = request_receiver.recv() => { + submission_watcher.new_request(&mut requests, call, until_in_block_sender, until_finalized_sender, strategy).await?; } else break Ok(()), + let submission_details = submission_watcher.watch_for_submission_in_block() => { + submission_watcher.on_submission_in_block(&mut requests, submission_details).await?; + }, if let Some((block_hash, block_header)) = state_chain_stream.next() => { trace!("Received state chain block: {number} ({block_hash:x?})", number = block_header.number); submission_watcher.on_block_finalized( &mut requests, block_hash, ).await?; - - // TODO: Handle possibility of stuck nonce caused submissions being dropped from the mempool or broken submissions either submitted here or externally when only using submit_signed_extrinsics - // TODO: Improve handling only submit_signed_extrinsic requests (using pending_extrinsics rpc call) - // TODO: Use system_accountNextIndex - { - let mut shuffled_requests = { - use rand::prelude::SliceRandom; - let mut requests = requests.iter_mut().filter(|(_, request)| request.allow_resubmits).collect::>(); - requests.shuffle(&mut rand::thread_rng()); - requests.into_iter() - }; - - if let Some((_, request)) = shuffled_requests.next() { - // TODO: Detect stuck state via getting all pending extrinsics, and checking for missing extrinsics above finalized nonce - match submission_watcher.submit_extrinsic_at_nonce(request, submission_watcher.finalized_nonce()).await? { - Ok(_) => { - debug!("Detected a gap in the account's submitted nonce values, pending extrinsics after this gap will not be including in blocks, unless the gap is filled. Attempting to resolve."); - submission_watcher.anticipated_nonce = submission_watcher.finalized_nonce() + 1; - for (_, request) in shuffled_requests { - match submission_watcher.submit_extrinsic_at_nonce(request, submission_watcher.anticipated_nonce).await? { - Ok(_) => { - submission_watcher.anticipated_nonce += 1; - }, - Err(submission_watcher::SubmissionLogicError::NonceTooLow) => break - } - } - }, - Err(submission_watcher::SubmissionLogicError::NonceTooLow) => {} // Expected case, so we ignore - } - } - } } else break Ok(()), } - } + }.boxed()) }), }) } @@ -201,12 +217,16 @@ impl SignedExtrinsicClient { #[async_trait] impl SignedExtrinsicApi for SignedExtrinsicClient { type UntilFinalizedFuture = UntilFinalizedFuture; + type UntilInBlockFuture = UntilInBlockFuture; fn account_id(&self) -> AccountId { self.account_id.clone() } - async fn submit_signed_extrinsic(&self, call: Call) -> (H256, Self::UntilFinalizedFuture) + async fn submit_signed_extrinsic( + &self, + call: Call, + ) -> (H256, (Self::UntilInBlockFuture, Self::UntilFinalizedFuture)) where Call: Into + Clone @@ -215,23 +235,31 @@ impl SignedExtrinsicApi for SignedExtrinsicClient { + Sync + 'static, { - let (result_sender, result_receiver) = oneshot::channel(); + let (until_in_block_sender, until_in_block_receiver) = oneshot::channel(); + let (until_finalized_sender, until_finalized_receiver) = oneshot::channel(); ( send_request(&self.request_sender, |hash_sender| { ( call.into(), - result_sender, - submission_watcher::RequestStrategy::Submit(hash_sender), + until_in_block_sender, + until_finalized_sender, + submission_watcher::RequestStrategy::StrictlyOneSubmission(hash_sender), ) }) .await .await .expect(OR_CANCEL), - UntilFinalizedFuture(result_receiver), + ( + UntilInBlockFuture(until_in_block_receiver), + UntilFinalizedFuture(until_finalized_receiver), + ), ) } - async fn finalize_signed_extrinsic(&self, call: Call) -> Self::UntilFinalizedFuture + async fn finalize_signed_extrinsic( + &self, + call: Call, + ) -> (Self::UntilInBlockFuture, Self::UntilFinalizedFuture) where Call: Into + Clone @@ -240,11 +268,21 @@ impl SignedExtrinsicApi for SignedExtrinsicClient { + Sync + 'static, { - UntilFinalizedFuture( - send_request(&self.request_sender, |result_sender| { - (call.into(), result_sender, submission_watcher::RequestStrategy::Finalize) - }) - .await, + let (until_finalized_sender, until_finalized_receiver) = oneshot::channel(); + + ( + UntilInBlockFuture( + send_request(&self.request_sender, |until_in_block_sender| { + ( + call.into(), + until_in_block_sender, + until_finalized_sender, + submission_watcher::RequestStrategy::AllowMultipleSubmissions, + ) + }) + .await, + ), + UntilFinalizedFuture(until_finalized_receiver), ) } } diff --git a/engine/src/state_chain_observer/client/extrinsic_api/signed/submission_watcher.rs b/engine/src/state_chain_observer/client/extrinsic_api/signed/submission_watcher.rs index b48c75b7d0..a1f387920d 100644 --- a/engine/src/state_chain_observer/client/extrinsic_api/signed/submission_watcher.rs +++ b/engine/src/state_chain_observer/client/extrinsic_api/signed/submission_watcher.rs @@ -1,13 +1,23 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::{ + collections::{BTreeMap, VecDeque}, + sync::Arc, +}; use anyhow::{anyhow, Result}; use frame_support::{dispatch::DispatchInfo, pallet_prelude::InvalidTransaction}; use itertools::Itertools; +use sc_transaction_pool_api::TransactionStatus; use sp_core::H256; use sp_runtime::{traits::Hash, MultiAddress}; +use state_chain_runtime::{BlockNumber, Nonce, UncheckedExtrinsic}; use thiserror::Error; use tokio::sync::oneshot; -use tracing::{debug, warn}; +use tracing::{debug, info, warn}; +use utilities::{ + future_map::FutureMap, + task_scope::{self, Scope}, + UnendingStream, +}; use crate::state_chain_observer::client::{ base_rpc_api, @@ -23,45 +33,54 @@ mod tests; const REQUEST_LIFETIME: u32 = 128; +#[derive(Error, Debug)] +pub enum ExtrinsicError { + #[error(transparent)] + Other(OtherError), + #[error(transparent)] + Dispatch(DispatchError), +} + +pub type ExtrinsicResult = Result< + (H256, Vec, state_chain_runtime::Header, DispatchInfo), + ExtrinsicError, +>; + #[derive(Error, Debug)] pub enum FinalizationError { #[error("The requested transaction was not and will not be included in a finalized block")] NotFinalized, - #[error( - "The requested transaction was not (but maybe in the future) included in a finalized block" - )] - Unknown, } +pub type FinalizationResult = ExtrinsicResult; + #[derive(Error, Debug)] -pub enum ExtrinsicError { - #[error(transparent)] - Finalization(FinalizationError), - #[error(transparent)] - Dispatch(DispatchError), +pub enum InBlockError { + #[error("The requested transaction was not and will not be included in a block")] + NotInBlock, } -pub type ExtrinsicResult = Result< - (H256, Vec, state_chain_runtime::Header, DispatchInfo), - ExtrinsicError, ->; +pub type InBlockResult = ExtrinsicResult; pub type RequestID = u64; +pub type SubmissionID = u64; #[derive(Debug)] pub struct Request { id: RequestID, - pending_submissions: usize, - pub allow_resubmits: bool, - lifetime: std::ops::RangeToInclusive, + next_submission_id: SubmissionID, + pending_submissions: BTreeMap, + strictly_one_submission: bool, + resubmit_window: std::ops::RangeToInclusive, call: state_chain_runtime::RuntimeCall, - result_sender: oneshot::Sender, + until_in_block_sender: Option>, + until_finalized_sender: oneshot::Sender, } #[derive(Debug)] pub enum RequestStrategy { - Submit(oneshot::Sender), - Finalize, + StrictlyOneSubmission(oneshot::Sender), + AllowMultipleSubmissions, } pub struct Submission { @@ -70,16 +89,30 @@ pub struct Submission { request_id: RequestID, } -pub struct SubmissionWatcher { - submissions_by_nonce: BTreeMap>, - pub anticipated_nonce: state_chain_runtime::Nonce, +pub struct SubmissionWatcher< + 'a, + 'env, + BaseRpcClient: base_rpc_api::BaseRpcApi + Send + Sync + 'static, +> { + scope: &'a Scope<'env, anyhow::Error>, + submissions_by_nonce: BTreeMap>, + #[allow(clippy::type_complexity)] + submission_status_futures: + FutureMap<(RequestID, SubmissionID), task_scope::ScopedJoinHandle>>, signer: signer::PairSigner, - finalized_nonce: state_chain_runtime::Nonce, + finalized_nonce: Nonce, finalized_block_hash: state_chain_runtime::Hash, - finalized_block_number: state_chain_runtime::BlockNumber, + finalized_block_number: BlockNumber, runtime_version: sp_version::RuntimeVersion, genesis_hash: state_chain_runtime::Hash, - extrinsic_lifetime: state_chain_runtime::BlockNumber, + extrinsic_lifetime: BlockNumber, + #[allow(clippy::type_complexity)] + block_cache: VecDeque<( + state_chain_runtime::Hash, + state_chain_runtime::Header, + Vec, + Vec>>, + )>, base_rpc_client: Arc, error_decoder: ErrorDecoder, } @@ -88,23 +121,25 @@ pub enum SubmissionLogicError { NonceTooLow, } -impl - SubmissionWatcher +impl<'a, 'env, BaseRpcClient: base_rpc_api::BaseRpcApi + Send + Sync + 'static> + SubmissionWatcher<'a, 'env, BaseRpcClient> { pub fn new( + scope: &'a Scope<'env, anyhow::Error>, signer: signer::PairSigner, - finalized_nonce: state_chain_runtime::Nonce, + finalized_nonce: Nonce, finalized_block_hash: state_chain_runtime::Hash, - finalized_block_number: state_chain_runtime::BlockNumber, + finalized_block_number: BlockNumber, runtime_version: sp_version::RuntimeVersion, genesis_hash: state_chain_runtime::Hash, - extrinsic_lifetime: state_chain_runtime::BlockNumber, + extrinsic_lifetime: BlockNumber, base_rpc_client: Arc, ) -> (Self, BTreeMap) { ( Self { + scope, submissions_by_nonce: Default::default(), - anticipated_nonce: finalized_nonce, + submission_status_futures: Default::default(), signer, finalized_nonce, finalized_block_hash, @@ -112,21 +147,22 @@ impl runtime_version, genesis_hash, extrinsic_lifetime, + block_cache: Default::default(), base_rpc_client, error_decoder: Default::default(), }, + // Return an empty requests map. This is done so that initial state of the requests + // matches the submission watchers state. The requests must be stored outside of + // the watcher so it can be manipulated by it's parent while holding a mut reference + // to the watcher. Default::default(), ) } - pub fn finalized_nonce(&self) -> state_chain_runtime::Nonce { - self.finalized_nonce - } - - pub async fn submit_extrinsic_at_nonce( + async fn submit_extrinsic_at_nonce( &mut self, request: &mut Request, - nonce: state_chain_runtime::Nonce, + nonce: Nonce, ) -> Result, anyhow::Error> { loop { let (signed_extrinsic, lifetime) = self.signer.new_signed_extrinsic( @@ -140,14 +176,37 @@ impl ); assert!(lifetime.contains(&(self.finalized_block_number + 1))); - match self.base_rpc_client.submit_extrinsic(signed_extrinsic).await { - Ok(tx_hash) => { - request.pending_submissions += 1; - self.submissions_by_nonce.entry(nonce).or_default().push(Submission { - lifetime, - tx_hash, - request_id: request.id, - }); + let tx_hash: H256 = { + use sp_core::{blake2_256, Encode}; + let encoded = signed_extrinsic.encode(); + blake2_256(&encoded).into() + }; + + match self.base_rpc_client.submit_and_watch_extrinsic(signed_extrinsic).await { + Ok(mut transaction_status_stream) => { + request.pending_submissions.insert(request.next_submission_id, nonce); + self.submissions_by_nonce.entry(nonce).or_default().insert( + request.next_submission_id, + Submission { lifetime, tx_hash, request_id: request.id }, + ); + self.submission_status_futures.insert( + (request.id, request.next_submission_id), + self.scope.spawn_with_handle(async move { + while let Some(status) = transaction_status_stream.next().await { + // NOTE: Currently the _extrinsic_index returned by substrate + // through the subscription is wrong and is always 0 + if let TransactionStatus::InBlock((block_hash, _extrinsic_index)) = + status? + { + return Ok(Some((block_hash, tx_hash))) + } + } + + Ok(None) + }), + ); + info!(target: "state_chain_client", request_id = request.id, submission_id = request.next_submission_id, "Submission succeeded"); + request.next_submission_id += 1; break Ok(Ok(tx_hash)) }, Err(rpc_err) => { @@ -168,7 +227,7 @@ impl jsonrpsee::core::Error::Call( jsonrpsee::types::error::CallError::Custom(ref obj), ) if obj.code() == 1014 => { - debug!("Failed as transaction with same nonce found in transaction pool: {obj:?}"); + debug!(target: "state_chain_client", request_id = request.id, "Submission failed as transaction with same nonce found in transaction pool: {obj:?}"); break Ok(Err(SubmissionLogicError::NonceTooLow)) }, // This occurs when the nonce has already been *consumed* i.e a @@ -176,13 +235,13 @@ impl jsonrpsee::core::Error::Call( jsonrpsee::types::error::CallError::Custom(ref obj), ) if obj == &invalid_err_obj(InvalidTransaction::Stale) => { - debug!("Failed as the transaction is stale: {obj:?}"); + debug!(target: "state_chain_client", request_id = request.id, "Submission failed as the transaction is stale: {obj:?}"); break Ok(Err(SubmissionLogicError::NonceTooLow)) }, jsonrpsee::core::Error::Call( jsonrpsee::types::error::CallError::Custom(ref obj), ) if obj == &invalid_err_obj(InvalidTransaction::BadProof) => { - warn!("Failed due to a bad proof: {obj:?}. Refetching the runtime version."); + warn!(target: "state_chain_client", request_id = request.id, "Submission failed due to a bad proof: {obj:?}. Refetching the runtime version."); // TODO: Check if hash and block number should also be updated // here @@ -204,16 +263,13 @@ impl } } - pub async fn submit_extrinsic(&mut self, request: &mut Request) -> Result { + async fn submit_extrinsic(&mut self, request: &mut Request) -> Result { Ok(loop { - match self.submit_extrinsic_at_nonce(request, self.anticipated_nonce).await? { - Ok(tx_hash) => { - self.anticipated_nonce += 1; - break tx_hash - }, - Err(SubmissionLogicError::NonceTooLow) => { - self.anticipated_nonce += 1; - }, + let nonce = + self.base_rpc_client.next_account_nonce(self.signer.account_id.clone()).await?; + match self.submit_extrinsic_at_nonce(request, nonce).await? { + Ok(tx_hash) => break tx_hash, + Err(SubmissionLogicError::NonceTooLow) => {}, } }) } @@ -222,7 +278,8 @@ impl &mut self, requests: &mut BTreeMap, call: state_chain_runtime::RuntimeCall, - result_sender: oneshot::Sender, + until_in_block_sender: oneshot::Sender, + until_finalized_sender: oneshot::Sender, strategy: RequestStrategy, ) -> Result<(), anyhow::Error> { let id = requests.keys().next_back().map(|id| id + 1).unwrap_or(0); @@ -231,24 +288,133 @@ impl id, Request { id, - pending_submissions: 0, - allow_resubmits: match &strategy { - RequestStrategy::Submit(_) => false, - RequestStrategy::Finalize => true, - }, - lifetime: ..=(self.finalized_block_number + 1 + REQUEST_LIFETIME), + next_submission_id: 0, + pending_submissions: Default::default(), + strictly_one_submission: matches!( + strategy, + RequestStrategy::StrictlyOneSubmission(_) + ), + resubmit_window: ..=(self.finalized_block_number + 1 + REQUEST_LIFETIME), call, - result_sender, + until_in_block_sender: Some(until_in_block_sender), + until_finalized_sender, }, ) .unwrap(); let tx_hash: H256 = self.submit_extrinsic(request).await?; - if let RequestStrategy::Submit(hash_sender) = strategy { + info!(target: "state_chain_client", request_id = request.id, "New request: {:?}", request.call); + if let RequestStrategy::StrictlyOneSubmission(hash_sender) = strategy { let _result = hash_sender.send(tx_hash); }; Ok(()) } + fn decide_extrinsic_success( + &self, + tx_hash: H256, + extrinsic_events: Vec, + header: state_chain_runtime::Header, + ) -> ExtrinsicResult { + // We expect to find a Success or Failed event, grab the dispatch info and send + // it with the events + extrinsic_events + .iter() + .find_map(|event| match event { + state_chain_runtime::RuntimeEvent::System( + frame_system::Event::ExtrinsicSuccess { dispatch_info }, + ) => Some(Ok(*dispatch_info)), + state_chain_runtime::RuntimeEvent::System( + frame_system::Event::ExtrinsicFailed { dispatch_error, dispatch_info: _ }, + ) => Some(Err(ExtrinsicError::Dispatch( + self.error_decoder.decode_dispatch_error(*dispatch_error), + ))), + _ => None, + }) + .expect(SUBSTRATE_BEHAVIOUR) + .map(|dispatch_info| (tx_hash, extrinsic_events, header, dispatch_info)) + } + + pub async fn watch_for_submission_in_block(&mut self) -> (RequestID, SubmissionID, H256, H256) { + loop { + if let ((request_id, submission_id), Some((block_hash, tx_hash))) = + self.submission_status_futures.next_or_pending().await + { + return (request_id, submission_id, block_hash, tx_hash) + } + } + } + + pub async fn on_submission_in_block( + &mut self, + requests: &mut BTreeMap, + (request_id, submission_id, block_hash, tx_hash): (RequestID, SubmissionID, H256, H256), + ) -> Result<(), anyhow::Error> { + if let Some((header, extrinsics, events)) = { + if let Some((_, header, extrinsics, events)) = self + .block_cache + .iter() + .find(|(cached_block_hash, ..)| block_hash == *cached_block_hash) + { + Some((header, extrinsics, events)) + } else if let (Some(block), events) = ( + self.base_rpc_client.block(block_hash).await?, + self.base_rpc_client + .storage_value::>(block_hash) + .await?, + ) { + if self.block_cache.len() >= 4 { + self.block_cache.pop_front(); + } + self.block_cache.push_back(( + block_hash, + block.block.header, + block.block.extrinsics, + events, + )); + let (_, header, extrinsics, events) = self.block_cache.back().unwrap(); + Some((header, extrinsics, events)) + } else { + warn!(target: "state_chain_client", request_id = request_id, submission_id = submission_id, "Block not found with hash {block_hash:?}"); + None + } + } { + let (extrinsic_index, _extrinsic) = extrinsics + .iter() + .enumerate() + .find(|(_extrinsic_index, extrinsic)| { + tx_hash == + ::Hashing::hash_of( + extrinsic, + ) + }) + .expect(SUBSTRATE_BEHAVIOUR); + + let extrinsic_events = events + .iter() + .filter_map(|event_record| match event_record.as_ref() { + frame_system::EventRecord { + phase: frame_system::Phase::ApplyExtrinsic(index), + event, + .. + } if *index as usize == extrinsic_index => Some(event.clone()), + _ => None, + }) + .collect::>(); + + if let Some(request) = requests.get_mut(&request_id) { + warn!(target: "state_chain_client", request_id = request_id, submission_id = submission_id, "Request found in block with hash {block_hash:?}, tx_hash {tx_hash:?}, and extrinsic index {extrinsic_index}."); + let until_in_block_sender = request.until_in_block_sender.take().unwrap(); + let _result = until_in_block_sender.send(self.decide_extrinsic_success( + tx_hash, + extrinsic_events, + header.clone(), + )); + } + } + + Ok(()) + } + pub async fn on_block_finalized( &mut self, requests: &mut BTreeMap, @@ -263,6 +429,7 @@ impl assert_eq!(block.header.number, self.finalized_block_number + 1, "{SUBSTRATE_BEHAVIOUR}"); + // Get our account nonce and compare it to the finalized nonce let nonce = self .base_rpc_client .storage_map_entry::>( @@ -275,12 +442,10 @@ impl if nonce < self.finalized_nonce { Err(anyhow!("Extrinsic signer's account was reaped")) } else { - // TODO: Get hash and number from the RPC and use std::cmp::max() here + // Update the finalized data self.finalized_block_number = block.header.number; self.finalized_block_hash = block_hash; - self.finalized_nonce = nonce; - self.anticipated_nonce = state_chain_runtime::Nonce::max(self.anticipated_nonce, nonce); for (extrinsic_index, extrinsic_events) in events .into_iter() @@ -299,9 +464,11 @@ impl (extrinsic_index, extrinsic_events.map(|(_extrinsics_index, event)| event)) }) { let extrinsic = &block.extrinsics[extrinsic_index as usize]; - // TODO: Assumption needs checking + + // Find any submissions that are for the nonce of the extrinsic if let Some(submissions) = extrinsic.signature.as_ref().and_then( |(address, _, (.., frame_system::CheckNonce(nonce), _, _))| { + // We only care about the extrinsic if it is from our account (*address == MultiAddress::Id(self.signer.account_id.clone())) .then_some(()) .and_then(|_| self.submissions_by_nonce.remove(nonce)) @@ -312,11 +479,12 @@ impl extrinsic, ); - let mut not_found_matching_submission = Some(extrinsic_events); + let mut optional_extrinsic_events = Some(extrinsic_events); - for submission in submissions { + for (submission_id, submission) in submissions { if let Some(request) = requests.get_mut(&submission.request_id) { - request.pending_submissions -= 1; + request.pending_submissions.remove(&submission_id).unwrap(); + self.submission_status_futures.remove((request.id, submission_id)); } // Note: It is technically possible for a hash collision to @@ -325,57 +493,54 @@ impl // notice the extrinsic was not actually the requested one, // but otherwise would continue to work. if let Some((extrinsic_events, matching_request)) = - (not_found_matching_submission.is_some() && - submission.tx_hash == tx_hash) + (optional_extrinsic_events.is_some() && submission.tx_hash == tx_hash) .then_some(()) .and_then(|_| requests.remove(&submission.request_id)) .map(|request| { - (not_found_matching_submission.take().unwrap(), request) + // If its the right hash, take the events and the request + (optional_extrinsic_events.take().unwrap(), request) }) { let extrinsic_events = extrinsic_events.collect::>(); - let _result = matching_request.result_sender.send({ - extrinsic_events - .iter() - .find_map(|event| match event { - state_chain_runtime::RuntimeEvent::System( - frame_system::Event::ExtrinsicSuccess { dispatch_info }, - ) => Some(Ok(*dispatch_info)), - state_chain_runtime::RuntimeEvent::System( - frame_system::Event::ExtrinsicFailed { - dispatch_error, - dispatch_info: _, - }, - ) => Some(Err(ExtrinsicError::Dispatch( - self.error_decoder - .decode_dispatch_error(*dispatch_error), - ))), - _ => None, - }) - .expect(SUBSTRATE_BEHAVIOUR) - .map(|dispatch_info| { - ( - tx_hash, - extrinsic_events, - block.header.clone(), - dispatch_info, - ) - }) - }); + let result = self.decide_extrinsic_success( + tx_hash, + extrinsic_events, + block.header.clone(), + ); + info!(target: "state_chain_client", request_id = matching_request.id, submission_id = submission_id, "Request found in finalized block with hash {block_hash:?}, tx_hash {tx_hash:?}, and extrinsic index {extrinsic_index}."); + if let Some(until_in_block_sender) = + matching_request.until_in_block_sender + { + let _result = until_in_block_sender.send( + result.as_ref().map(Clone::clone).map_err( + |error| match error { + ExtrinsicError::Dispatch(dispatch_error) => + ExtrinsicError::Dispatch(dispatch_error.clone()), + ExtrinsicError::Other( + FinalizationError::NotFinalized, + ) => ExtrinsicError::Other(InBlockError::NotInBlock), + }, + ), + ); + } + let _result = matching_request.until_finalized_sender.send(result); } } } } + // Remove any submissions that have expired self.submissions_by_nonce.retain(|nonce, submissions| { assert!(self.finalized_nonce <= *nonce, "{SUBSTRATE_BEHAVIOUR}"); - submissions.retain(|submission| { + submissions.retain(|submission_id, submission| { let alive = submission.lifetime.contains(&(block.header.number + 1)); if !alive { + info!(target: "state_chain_client", request_id = submission.request_id, submission_id = submission_id, "Submission has timed out."); if let Some(request) = requests.get_mut(&submission.request_id) { - request.pending_submissions -= 1; + request.pending_submissions.remove(submission_id).unwrap(); } + self.submission_status_futures.remove((submission.request_id, *submission_id)); } alive @@ -384,24 +549,28 @@ impl !submissions.is_empty() }); + // Remove any requests that have all their submission have expired and whose + // resubmission window has past. for (_request_id, request) in requests.extract_if(|_request_id, request| { - !request.lifetime.contains(&(block.header.number + 1)) || - !request.allow_resubmits && request.pending_submissions == 0 + request.pending_submissions.is_empty() && + (!request.resubmit_window.contains(&(block.header.number + 1)) || + request.strictly_one_submission) }) { - let _result = request.result_sender.send(Err(ExtrinsicError::Finalization( - if request.pending_submissions == 0 { - FinalizationError::NotFinalized - } else { - FinalizationError::Unknown - }, - ))); + info!(target: "state_chain_client", request_id = request.id, "Request has timed out."); + if let Some(until_in_block_sender) = request.until_in_block_sender { + let _result = until_in_block_sender + .send(Err(ExtrinsicError::Other(InBlockError::NotInBlock))); + } + let _result = request + .until_finalized_sender + .send(Err(ExtrinsicError::Other(FinalizationError::NotFinalized))); } - - // Has to be a separate loop from the above due to not being able to await inside + // Resubmit any expired requests that have no unexpired submission. + // This has to be a separate loop from the above due to not being able to await inside // extract_if for (_request_id, request) in requests.iter_mut() { - if request.pending_submissions == 0 { - debug!("Resubmitting extrinsic as all existing submissions have expired."); + if request.pending_submissions.is_empty() { + info!("Resubmitting extrinsic as all existing submissions have expired."); self.submit_extrinsic(request).await?; } } diff --git a/engine/src/state_chain_observer/client/extrinsic_api/signed/submission_watcher/tests.rs b/engine/src/state_chain_observer/client/extrinsic_api/signed/submission_watcher/tests.rs index 8d02ef17e1..e1c5dd709e 100644 --- a/engine/src/state_chain_observer/client/extrinsic_api/signed/submission_watcher/tests.rs +++ b/engine/src/state_chain_observer/client/extrinsic_api/signed/submission_watcher/tests.rs @@ -1,5 +1,7 @@ use cf_chains::{dot, ChainState}; -use jsonrpsee::types::ErrorObject; +use futures_util::FutureExt; +use jsonrpsee::core::client::{Subscription, SubscriptionKind}; +use utilities::task_scope::task_scope; use crate::{ constants::SIGNED_EXTRINSIC_LIFETIME, @@ -10,138 +12,71 @@ use super::*; const INITIAL_NONCE: state_chain_runtime::Nonce = 10; -#[tokio::test] -async fn should_increment_nonce_on_success() { - let mut mock_rpc_api = MockBaseRpcApi::new(); - - // Return a success, cause the nonce to increment - mock_rpc_api - .expect_submit_extrinsic() - .times(1) - .returning(move |_| Ok(H256::default())); - - let watcher = new_watcher_and_submit_test_extrinsic(mock_rpc_api).await; - - assert_eq!(watcher.anticipated_nonce, INITIAL_NONCE + 1); -} - -/// If the tx fails due to the same nonce existing in the pool already, it should increment the -/// nonce and try again. -#[tokio::test] -async fn should_increment_and_retry_if_nonce_in_pool() { - let mut mock_rpc_api = MockBaseRpcApi::new(); - - mock_rpc_api.expect_submit_extrinsic().times(1).returning(move |_| { - Err(jsonrpsee::core::Error::Call(jsonrpsee::types::error::CallError::Custom( - ErrorObject::from(jsonrpsee::types::error::ErrorCode::ServerError(1014)), - ))) - }); - - // On the retry, return a success. - mock_rpc_api - .expect_submit_extrinsic() - .times(1) - .returning(move |_| Ok(H256::default())); - - let watcher = new_watcher_and_submit_test_extrinsic(mock_rpc_api).await; - - // Nonce should be +2, once for the initial submission, and once for the retry - assert_eq!(watcher.anticipated_nonce, INITIAL_NONCE + 2); -} - -#[tokio::test] -async fn should_increment_and_retry_if_nonce_consumed_in_prev_blocks() { - let mut mock_rpc_api = MockBaseRpcApi::new(); - - mock_rpc_api.expect_submit_extrinsic().times(1).returning(move |_| { - Err(jsonrpsee::core::Error::Call(jsonrpsee::types::error::CallError::Custom( - jsonrpsee::types::ErrorObject::owned( - 1010, - "Invalid Transaction", - Some("Transaction is outdated"), - ), - ))) - }); - - // On the retry, return a success. - mock_rpc_api - .expect_submit_extrinsic() - .times(1) - .returning(move |_| Ok(H256::default())); - - let watcher = new_watcher_and_submit_test_extrinsic(mock_rpc_api).await; - - // Nonce should be +2, once for the initial submission, and once for the retry - assert_eq!(watcher.anticipated_nonce, INITIAL_NONCE + 2); -} - /// If the tx fails due to a bad proof, it should fetch the runtime version and retry. #[tokio::test] async fn should_update_version_on_bad_proof() { - let mut mock_rpc_api = MockBaseRpcApi::new(); - - mock_rpc_api.expect_submit_extrinsic().times(1).returning(move |_| { - Err(jsonrpsee::core::Error::Call(jsonrpsee::types::error::CallError::Custom( - jsonrpsee::types::ErrorObject::owned( - 1010, - "Invalid Transaction", - Some("Transaction has a bad signature"), - ), - ))) - }); - - mock_rpc_api.expect_runtime_version().times(1).returning(move || { - let new_runtime_version = sp_version::RuntimeVersion { - spec_name: "test".into(), - impl_name: "test".into(), - authoring_version: 0, - spec_version: 0, - impl_version: 0, - apis: vec![].into(), - transaction_version: 0, - state_version: 0, - }; - assert_ne!( - new_runtime_version, - Default::default(), - "The new runtime version must be different from the version that the watcher started with" - ); - - Ok(new_runtime_version) - }); - - // On the retry, return a success. - mock_rpc_api - .expect_submit_extrinsic() - .times(1) - .returning(move |_| Ok(H256::default())); - - let watcher = new_watcher_and_submit_test_extrinsic(mock_rpc_api).await; - - // The bad proof should not have incremented the nonce, so it should only be +1 from the retry. - assert_eq!(watcher.anticipated_nonce, INITIAL_NONCE + 1); -} - -/// If the tx fails due to an error that is unrelated to the nonce, it should not increment the -/// nonce and not retry. -#[tokio::test] -async fn should_not_increment_nonce_on_unrelated_failure() { - let mut mock_rpc_api = MockBaseRpcApi::new(); - - mock_rpc_api.expect_submit_extrinsic().times(1).returning(move |_| { - Err(jsonrpsee::core::Error::Custom("some unrelated error".to_string())) - }); - - let watcher = new_watcher_and_submit_test_extrinsic(mock_rpc_api).await; - - assert_eq!(watcher.anticipated_nonce, INITIAL_NONCE); + task_scope(|scope| { + async { + let mut mock_rpc_api = MockBaseRpcApi::new(); + + mock_rpc_api.expect_next_account_nonce().return_once(move |_| Ok(1)); + mock_rpc_api.expect_submit_and_watch_extrinsic().times(1).returning(move |_| { + Err(jsonrpsee::core::Error::Call(jsonrpsee::types::error::CallError::Custom( + jsonrpsee::types::ErrorObject::owned( + 1010, + "Invalid Transaction", + Some("Transaction has a bad signature"), + ), + ))) + }); + + mock_rpc_api.expect_runtime_version().times(1).returning(move || { + let new_runtime_version = sp_version::RuntimeVersion { + spec_name: "test".into(), + impl_name: "test".into(), + authoring_version: 0, + spec_version: 0, + impl_version: 0, + apis: vec![].into(), + transaction_version: 0, + state_version: 0, + }; + assert_ne!( + new_runtime_version, + Default::default(), + "The new runtime version must be different from the version that the watcher started with" + ); + + Ok(new_runtime_version) + }); + + // On the retry, return a success. + mock_rpc_api.expect_next_account_nonce().return_once(move |_| Ok(1)); + mock_rpc_api.expect_submit_and_watch_extrinsic().return_once(move |_| { + Ok(Subscription::new( + futures::channel::mpsc::channel(1).0, + futures::channel::mpsc::channel(1).1, + SubscriptionKind::Subscription(jsonrpsee::types::SubscriptionId::Num(0)), + )) + }); + + let _watcher = new_watcher_and_submit_test_extrinsic(scope, mock_rpc_api).await; + + Ok(()) + } + .boxed() + }) + .await + .unwrap(); } /// Create a new watcher and submit a dummy extrinsic. -async fn new_watcher_and_submit_test_extrinsic( +async fn new_watcher_and_submit_test_extrinsic<'a, 'env>( + scope: &'a Scope<'env, anyhow::Error>, mock_rpc_api: MockBaseRpcApi, -) -> SubmissionWatcher { +) -> SubmissionWatcher<'a, 'env, MockBaseRpcApi> { let (mut watcher, _requests) = SubmissionWatcher::new( + scope, signer::PairSigner::new(sp_core::Pair::generate().0), INITIAL_NONCE, H256::default(), @@ -170,14 +105,15 @@ async fn new_watcher_and_submit_test_extrinsic( }); let mut request = Request { id: 0, - pending_submissions: 0, - allow_resubmits: false, - lifetime: ..=1, + next_submission_id: 0, + pending_submissions: Default::default(), + strictly_one_submission: false, + resubmit_window: ..=1, call, - result_sender: oneshot::channel().0, + until_in_block_sender: Some(oneshot::channel().0), + until_finalized_sender: oneshot::channel().0, }; - assert_eq!(watcher.anticipated_nonce, INITIAL_NONCE, "Nonce should start at INITIAL_NONCE"); let _result = watcher.submit_extrinsic(&mut request).await; watcher diff --git a/engine/src/state_chain_observer/client/mod.rs b/engine/src/state_chain_observer/client/mod.rs index 4d3aa8a21a..13a8702e3d 100644 --- a/engine/src/state_chain_observer/client/mod.rs +++ b/engine/src/state_chain_observer/client/mod.rs @@ -436,13 +436,17 @@ impl< for StateChainClient { type UntilFinalizedFuture = SignedExtrinsicClient::UntilFinalizedFuture; + type UntilInBlockFuture = SignedExtrinsicClient::UntilInBlockFuture; fn account_id(&self) -> AccountId { self.signed_extrinsic_client.account_id() } /// Submit an signed extrinsic, returning the hash of the submission - async fn submit_signed_extrinsic(&self, call: Call) -> (H256, Self::UntilFinalizedFuture) + async fn submit_signed_extrinsic( + &self, + call: Call, + ) -> (H256, (Self::UntilInBlockFuture, Self::UntilFinalizedFuture)) where Call: Into + Clone @@ -455,7 +459,10 @@ impl< } /// Sign, submit, and watch an extrinsic retrying if submissions fail be to finalized - async fn finalize_signed_extrinsic(&self, call: Call) -> Self::UntilFinalizedFuture + async fn finalize_signed_extrinsic( + &self, + call: Call, + ) -> (Self::UntilInBlockFuture, Self::UntilFinalizedFuture) where Call: Into + Clone @@ -521,10 +528,11 @@ pub mod mocks { #[async_trait] impl SignedExtrinsicApi for StateChainClient { type UntilFinalizedFuture = extrinsic_api::signed::MockUntilFinalized; + type UntilInBlockFuture = extrinsic_api::signed::MockUntilInBlock; fn account_id(&self) -> AccountId; - async fn submit_signed_extrinsic(&self, call: Call) -> (H256, ::UntilFinalizedFuture) + async fn submit_signed_extrinsic(&self, call: Call) -> (H256, (::UntilInBlockFuture, ::UntilFinalizedFuture)) where Call: Into + Clone @@ -533,7 +541,7 @@ pub mod mocks { + Sync + 'static; - async fn finalize_signed_extrinsic(&self, call: Call) -> ::UntilFinalizedFuture + async fn finalize_signed_extrinsic(&self, call: Call) -> (::UntilInBlockFuture, ::UntilFinalizedFuture) where Call: Into + Clone diff --git a/engine/src/state_chain_observer/sc_observer/tests.rs b/engine/src/state_chain_observer/sc_observer/tests.rs index 4856c3a82e..003fd49cd4 100644 --- a/engine/src/state_chain_observer/sc_observer/tests.rs +++ b/engine/src/state_chain_observer/sc_observer/tests.rs @@ -215,7 +215,12 @@ ChainCrypto>::ThresholdSignature: std::convert::From<::Signat offenders: BTreeSet::default(), })) .once() - .return_once(|_| extrinsic_api::signed::MockUntilFinalized::new()); + .return_once(|_| { + ( + extrinsic_api::signed::MockUntilInBlock::new(), + extrinsic_api::signed::MockUntilFinalized::new(), + ) + }); // ceremony_id_3 is a success and should submit an unsigned extrinsic let ceremony_id_3 = ceremony_id_2 + 1; @@ -328,7 +333,12 @@ where state_chain_client .expect_finalize_signed_extrinsic::>() .once() - .return_once(|_| extrinsic_api::signed::MockUntilFinalized::new()); + .return_once(|_| { + ( + extrinsic_api::signed::MockUntilInBlock::new(), + extrinsic_api::signed::MockUntilFinalized::new(), + ) + }); let state_chain_client = Arc::new(state_chain_client); let mut multisig_client = MockMultisigClientApi::::new(); @@ -450,7 +460,12 @@ where state_chain_client .expect_finalize_signed_extrinsic::>() .once() - .return_once(|_| extrinsic_api::signed::MockUntilFinalized::new()); + .return_once(|_| { + ( + extrinsic_api::signed::MockUntilInBlock::new(), + extrinsic_api::signed::MockUntilFinalized::new(), + ) + }); let state_chain_client = Arc::new(state_chain_client); task_scope(|scope| { From 0aaaaa1b9980316538510b02288607f068ba854d Mon Sep 17 00:00:00 2001 From: Alastair Holmes <42404303+AlastairHolmes@users.noreply.github.com> Date: Mon, 25 Sep 2023 17:38:35 +0200 Subject: [PATCH 10/15] doc: amm and pools pallet (#4005) * doc: AMM and LP comments * Update state-chain/amm/src/common.rs * Update state-chain/amm/src/common.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/limit_orders.rs * Update state-chain/amm/src/range_orders.rs * Update state-chain/amm/src/range_orders.rs * Update state-chain/amm/src/range_orders.rs * Update state-chain/amm/src/range_orders.rs * Update state-chain/pallets/cf-pools/src/lib.rs * Update state-chain/pallets/cf-pools/src/lib.rs * Update state-chain/pallets/cf-pools/src/lib.rs * Update state-chain/pallets/cf-pools/src/lib.rs * Update state-chain/pallets/cf-pools/src/lib.rs * Update state-chain/pallets/cf-pools/src/lib.rs * Update state-chain/pallets/cf-pools/src/lib.rs * cargo fmt --all * corrections --- state-chain/amm/src/common.rs | 30 ++++++- state-chain/amm/src/lib.rs | 3 + state-chain/amm/src/limit_orders.rs | 112 +++++++++++++++++++++--- state-chain/amm/src/range_orders.rs | 55 +++++++++++- state-chain/pallets/cf-pools/src/lib.rs | 53 +++++++++-- 5 files changed, 235 insertions(+), 18 deletions(-) diff --git a/state-chain/amm/src/common.rs b/state-chain/amm/src/common.rs index 39bb990d37..12e34b492f 100644 --- a/state-chain/amm/src/common.rs +++ b/state-chain/amm/src/common.rs @@ -5,9 +5,17 @@ use sp_core::{U256, U512}; pub const ONE_IN_HUNDREDTH_PIPS: u32 = 1000000; +/// Represents an amount of an asset, in its smallest unit i.e. Ethereum has 10^-18 precision, and +/// therefore an `Amount` with the literal value of `1` would represent 10^-18 Ethereum. pub type Amount = U256; +/// The `log1.0001(price)` rounded to the nearest integer. Note [Price] is always +/// in units of asset One. pub type Tick = i32; +/// The square root of the price, represented as a fixed point integer with 96 fractional bits and +/// 64 integer bits (The higher bits past 96+64 th aren't used). [SqrtPriceQ64F96] is always in sqrt +/// units of asset one. pub type SqrtPriceQ64F96 = U256; +/// The number of fractional bits used by `SqrtPriceQ64F96`. pub const SQRT_PRICE_FRACTIONAL_BITS: u32 = 96; #[derive( @@ -149,13 +157,16 @@ pub(super) fn mul_div>(a: U256, b: U256, c: C) -> (U256, U256) { ) } +/// A marker type to represent a swap that buys asset One, and sells asset Zero pub(super) struct ZeroToOne {} +/// A marker type to represent a swap that buys asset Zero, and sells asset One pub(super) struct OneToZero {} pub(super) trait SwapDirection { + /// The asset this type of swap sells, i.e. the asset the swapper provides const INPUT_SIDE: Side; - /// Determines if a given sqrt_price is more than another + /// Determines if a given sqrt_price is more than another for this direction of swap. fn sqrt_price_op_more_than( sqrt_price: SqrtPriceQ64F96, sqrt_price_other: SqrtPriceQ64F96, @@ -215,9 +226,16 @@ impl SwapDirection for OneToZero { // TODO: Consider increasing Price to U512 or switch to a f64 (f64 would only be for the external // price representation), as at low ticks the precision in the price is VERY LOW, but this does not // cause any problems for the AMM code in terms of correctness +/// This is the ratio of equivalently valued amounts of asset One and asset Zero. The price is +/// always measured in amount of asset One per unit of asset Zero. Therefore as asset zero becomes +/// more valuable relative to asset one the price's literal value goes up, and vice versa. This +/// ratio is represented as a fixed point number with `PRICE_FRACTIONAL_BITS` fractional bits. pub type Price = U256; pub const PRICE_FRACTIONAL_BITS: u32 = 128; +/// Converts from a [SqrtPriceQ64F96] to a [Price]. +/// +/// Will panic for `sqrt_price`'s outside `MIN_SQRT_PRICE..=MAX_SQRT_PRICE` pub(super) fn sqrt_price_to_price(sqrt_price: SqrtPriceQ64F96) -> Price { assert!((MIN_SQRT_PRICE..=MAX_SQRT_PRICE).contains(&sqrt_price)); @@ -231,6 +249,9 @@ pub(super) fn sqrt_price_to_price(sqrt_price: SqrtPriceQ64F96) -> Price { ) } +/// Converts from a `price` to a `sqrt_price` +/// +/// This function never panics. pub(super) fn price_to_sqrt_price(price: Price) -> SqrtPriceQ64F96 { ((U512::from(price) << PRICE_FRACTIONAL_BITS).integer_sqrt() >> (PRICE_FRACTIONAL_BITS - SQRT_PRICE_FRACTIONAL_BITS)) @@ -238,6 +259,9 @@ pub(super) fn price_to_sqrt_price(price: Price) -> SqrtPriceQ64F96 { .unwrap_or(SqrtPriceQ64F96::MAX) } +/// Converts a `tick` to a `price`. Will return `None` for ticks outside MIN_TICK..=MAX_TICK +/// +/// This function never panics. pub fn price_at_tick(tick: Tick) -> Option { if is_tick_valid(tick) { Some(sqrt_price_to_price(sqrt_price_at_tick(tick))) @@ -246,6 +270,10 @@ pub fn price_at_tick(tick: Tick) -> Option { } } +/// Converts a `price` to a `tick`. Will return `None` is the price is too high or low to be +/// represented by a valid tick i.e. one inside MIN_TICK..=MAX_TICK. +/// +/// This function never panics. pub fn tick_at_price(price: Price) -> Option { let sqrt_price = price_to_sqrt_price(price); if is_sqrt_price_valid(sqrt_price) { diff --git a/state-chain/amm/src/lib.rs b/state-chain/amm/src/lib.rs index 020c878985..0880273742 100644 --- a/state-chain/amm/src/lib.rs +++ b/state-chain/amm/src/lib.rs @@ -78,6 +78,9 @@ impl PoolState { .map(sqrt_price_to_price) } + /// Performs a swap to sell or buy an amount of either side/asset. + /// + /// This function never panics. pub fn swap(&mut self, side: Side, order: Order, amount: Amount) -> (Amount, Amount) { match (side, order) { (Side::Zero, Order::Sell) => self.inner_swap::(amount, None), diff --git a/state-chain/amm/src/limit_orders.rs b/state-chain/amm/src/limit_orders.rs index f69edff751..0a2b8306bb 100644 --- a/state-chain/amm/src/limit_orders.rs +++ b/state-chain/amm/src/limit_orders.rs @@ -1,3 +1,22 @@ +//! This code implements a single liquidity pool pair, that allows LPs to specify particular prices +//! at with they want to sell one of the two assets in the pair. The price an LP wants to sell at +//! is specified using `Tick`s. +//! +//! This type of pool doesn't do automated market making, as in the price of the pool is purely +//! determined be the best priced position currently in the pool. +//! +//! Swaps in this pool will execute on the best priced positions first. Note if two positions +//! have the same price, both positions will be partially executed, and neither will receive +//! "priority" regardless of when they were created, i.e. an equal percentage of all positions at +//! the same price will be executed. So larger positions will earn more fees (and the absolute +//! amount of the position that is executed will be greater, but the same percentage-wise) as they +//! contribute more to the swap. +//! +//! To track fees earned and remaining liquidity in each position, the pool records the big product +//! of the "percent_remaining" of each swap. Using two of these values you can calculate the +//! percentage of liquidity swapped in a position between the two points in time at which those +//! percent_remaining values were recorded. + #[cfg(test)] mod tests; @@ -16,17 +35,35 @@ use crate::common::{ ONE_IN_HUNDREDTH_PIPS, PRICE_FRACTIONAL_BITS, }; -const MAX_FIXED_POOL_LIQUIDITY: Amount = U256([u64::MAX, u64::MAX, 0, 0]); +// This is the maximum liquidity/amount of an asset that can be sold at a single tick/price. If an +// LP attempts to add more liquidity that would increase the total at the tick past this value, the +// minting operation will error. Note this maximum is for all lps combined, and not a single lp, +// therefore it is possible for an LP to "consume" a tick by filling it up to the maximum, and +// thereby not allowing other LPs to mint at that price (But the maximum is high enough that this is +// not feasible). +const MAX_FIXED_POOL_LIQUIDITY: Amount = U256([u64::MAX, u64::MAX, 0, 0] /* little endian */); /// Represents a number exclusively between 0 and 1. #[derive(Clone, Debug, PartialEq, Eq, TypeInfo, Encode, Decode, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Default))] struct FloatBetweenZeroAndOne { + /// A fixed point number where the msb has a value of `0.5`, + /// therefore it cannot represent 1.0, only numbers inside + /// `0.0..1.0`, although note the mantissa will never be zero, and + /// this is enforced by the public functions of the type. We also + /// enforce that the top bit of the mantissa is always set, i.e. + /// the float point number is `normalised`. Therefore the mantissa + /// always has a value between `0.5..1.0`. normalised_mantissa: U256, + /// As we are only interested in representing real numbers below 1, + /// the exponent is either 0 or negative. negative_exponent: U256, } impl Ord for FloatBetweenZeroAndOne { fn cmp(&self, other: &Self) -> core::cmp::Ordering { + // Because the float is normalised we can get away with comparing only the exponents (unless + // they are the same). Also note the exponent comparison is reversed, as the exponent is + // implicitly negative. other .negative_exponent .cmp(&self.negative_exponent) @@ -39,13 +76,14 @@ impl PartialOrd for FloatBetweenZeroAndOne { } } impl FloatBetweenZeroAndOne { - /// Returns the largest possible value. + /// Returns the largest possible value i.e. `1.0 - (2^-256)`. fn max() -> Self { Self { normalised_mantissa: U256::max_value(), negative_exponent: U256::zero() } } /// Rights shifts x by shift_bits bits, returning the result and the bits that were shifted - /// out/remainder. + /// out/the remainder. You can think of this as a div_mod, but we are always dividing by powers + /// of 2. fn right_shift_mod(x: U512, shift_bits: U256) -> (U512, U512) { if shift_bits >= U256::from(512) { (U512::zero(), x) @@ -56,6 +94,8 @@ impl FloatBetweenZeroAndOne { } /// Returns the result of `self * numerator / denominator` with the result rounded up. + /// + /// This function will panic if the numerator is zero, or if numerator > denominator fn mul_div_ceil(&self, numerator: U256, denominator: U256) -> Self { // We cannot use the `mul_div_ceil` function here (and then right-shift the result) to // calculate the normalised_mantissa as the low zero bits (where we shifted) could be wrong. @@ -64,6 +104,8 @@ impl FloatBetweenZeroAndOne { assert!(numerator <= denominator); self.assert_valid(); + // We do the mul first to avoid losing precision as in the division bits will possibly get + // shifted off the "bottom" of the mantissa. let (mul_normalised_mantissa, mul_normalise_shift) = { let unnormalised_mantissa = U256::full_mul(self.normalised_mantissa, numerator); let normalize_shift = unnormalised_mantissa.leading_zeros(); @@ -77,9 +119,11 @@ impl FloatBetweenZeroAndOne { // As the denominator <= U256::MAX, this div will not right-shift the mantissa more than // 256 bits, so we maintain at least 256 accurate bits in the result. let (d, div_remainder) = - U512::div_mod(mul_normalised_mantissa, U512::from(denominator)); + U512::div_mod(mul_normalised_mantissa, U512::from(denominator)); // Note that d can never be zero as mul_normalised_mantissa always has atleast one bit + // set above the 256th bit. let d = if div_remainder.is_zero() { d } else { d + U512::one() }; let normalise_shift = d.leading_zeros(); + // We right shift and use the lower 256 bits for the mantissa let shift_bits = 256 - normalise_shift; let (d, shift_remainder) = Self::right_shift_mod(d, shift_bits.into()); let d = U256::try_from(d).unwrap(); @@ -105,7 +149,9 @@ impl FloatBetweenZeroAndOne { } } - /// Returns both floor and ceil of `x * numerator / denominator` + /// Returns both floor and ceil of `y = x * numerator / denominator`. + /// + /// This will panic if the numerator is more than the denominator. fn integer_mul_div(x: U256, numerator: &Self, denominator: &Self) -> (U256, U256) { // Note this does not imply numerator.normalised_mantissa <= denominator.normalised_mantissa assert!(numerator <= denominator); @@ -117,19 +163,21 @@ impl FloatBetweenZeroAndOne { denominator.normalised_mantissa.into(), ); + // Unwrap safe as numerator is smaller than denominator, so its negative_exponent must be + // greater than or equal to the denominator's let negative_exponent = numerator.negative_exponent.checked_sub(denominator.negative_exponent).unwrap(); let (y_floor, shift_remainder) = Self::right_shift_mod(y_shifted_floor, negative_exponent); - let y_floor = y_floor.try_into().unwrap(); + let y_floor = y_floor.try_into().unwrap(); // Unwrap safe as numerator <= demoninator and therefore y cannot be greater than x ( y_floor, if div_remainder.is_zero() && shift_remainder.is_zero() { y_floor } else { - y_floor + 1 + y_floor + 1 // Safe as for there to be a remainder y_floor must be atleast 1 less than x }, ) } @@ -247,12 +295,16 @@ pub enum CollectError {} #[derive(Default, Debug, PartialEq, Eq, TypeInfo, Encode, Decode, MaxEncodedLen)] pub struct Collected { + /// The amount of fees earned by this position since the last collect. pub fees: Amount, + /// The amount of assets purchased by the LP using the liquidity in this position since the + /// last collect. pub bought_amount: Amount, } #[derive(Default, Debug, PartialEq, Eq, TypeInfo, Encode, Decode, MaxEncodedLen)] pub struct PositionInfo { + /// The amount of liquidity in the position after the operation. pub amount: Amount, } impl PositionInfo { @@ -266,30 +318,68 @@ impl<'a> From<&'a Position> for PositionInfo { } } +/// Represents a single LP position #[derive(Clone, Debug, TypeInfo, Encode, Decode, MaxEncodedLen)] struct Position { + /// Used to identify when the position was created and thereby determine if all the liquidity + /// in the position has been used or not. As once all the liquidity at a tick has been used, + /// the internal record of that tick/fixed pool is deleted, and if liquidity is added back + /// later the record will have a different pool_instance. Therefore a position can tell if all + /// its liquidity has been used, by seeing if there is not a fixed pool at the same tick, or if + /// that fixed pool has a different pool_instance. pool_instance: u128, + /// The total amount of liquidity provided by this position as of the last operation on the + /// position. I.e. This value is not updated when swaps occur, only when the LP updates their + /// position in some way. amount: Amount, + /// This value is used in combination with the FixedPool's `percent_remaining` to determine how + /// much liquidity/amount is remaining in a position when an LP does a collect/update of the + /// position. It is the percent_remaining of the FixedPool when the position was last + /// updated/collected from. last_percent_remaining: FloatBetweenZeroAndOne, } +/// Represents a pool that is selling an amount of an asset at a specific/fixed price. A +/// single fixed pool will contain the liquidity/assets for all limit orders at that specific price. #[derive(Clone, Debug, TypeInfo, Encode, Decode, MaxEncodedLen)] pub(super) struct FixedPool { + /// Whenever a FixedPool is destroyed and recreated i.e. all the liquidity in the FixedPool is + /// used, a new value for pool_instance is used, and the previously used value will never be + /// used again. This is used to determine whether a position was created during the current + /// FixedPool's lifetime and therefore that FixedPool's `percent_remaining` is meaningful for + /// the position, or if the position was created before the current FixedPool's lifetime. pool_instance: u128, + /// This is the total liquidity/amount available for swaps at this price. This value is greater + /// than or equal to the amount provided currently by all positions at the same tick. It is not + /// always equal due to rounding, and therefore it is possible for a FixedPool to have no + /// associated position but have some liquidity available, but this would likely be a very + /// small amount. available: Amount, + /// This is the big product of all `1.0 - percent_used_by_swap` for all swaps that have occured + /// since this FixedPool instance was created and used liquidity from it. percent_remaining: FloatBetweenZeroAndOne, } #[derive(Clone, Debug, TypeInfo, Encode, Decode)] pub(super) struct PoolState { + /// The percentage fee taken from swap inputs and earned by LPs. It is in units of 0.0001%. + /// I.e. 5000 means 0.5%. pub(super) fee_hundredth_pips: u32, + /// The ID the next FixedPool that is created will use. next_pool_instance: u128, + /// All the FixedPools that have some liquidity. They are grouped into all those that are + /// selling asset `Zero` and all those that are selling asset `one` used the SideMap. fixed_pools: SideMap>, + /// All the Positions that either are providing liquidity currently, or were providing + /// liquidity directly after the last time they where updated. They are grouped into all those + /// that are selling asset `Zero` and all those that are selling asset `one` used the SideMap. + /// Therefore there can be positions stored here that don't provide any liquidity. positions: SideMap>, } impl PoolState { - /// Creates a new pool state with the given fee. The pool is created with no liquidity. + /// Creates a new pool state with the given fee. The pool is created with no liquidity. The pool + /// may not be created with a fee higher than 50%. /// /// This function never panics. pub(super) fn new(fee_hundredth_pips: u32) -> Result { @@ -305,8 +395,8 @@ impl PoolState { }) } - /// Sets the fee for the pool. This will apply to future swaps. This function will fail if the - /// fee is greater than 50%. Also runs collect for all positions in the pool. + /// Sets the fee for the pool. This will apply to future swaps. The fee may not be set + /// higher than 50%. Also runs collect for all positions in the pool. /// /// This function never panics. #[allow(clippy::type_complexity)] @@ -356,7 +446,7 @@ impl PoolState { Ok(SideMap::from_array(collected_amounts)) } - /// Returns the current price of the pool, if some liquidity exists. + /// Returns the current price of the pool for a given swap direction, if some liquidity exists. /// /// This function never panics. pub(super) fn current_sqrt_price(&mut self) -> Option { diff --git a/state-chain/amm/src/range_orders.rs b/state-chain/amm/src/range_orders.rs index 4febb3bb57..5c1e7a11e5 100644 --- a/state-chain/amm/src/range_orders.rs +++ b/state-chain/amm/src/range_orders.rs @@ -33,14 +33,25 @@ use crate::common::{ MAX_TICK, MIN_TICK, ONE_IN_HUNDREDTH_PIPS, SQRT_PRICE_FRACTIONAL_BITS, }; +/// This is the invariant wrt xy = k. It represents / is proportional to the depth of the +/// pool/position. pub type Liquidity = u128; type FeeGrowthQ128F128 = U256; +/// This is the maximum Liquidity that can be associated with a given tick. Note this doesn't mean +/// the maximum amount of Liquidity a tick can have, but is the maximum allowed value of the sum of +/// the liquidity associated with all range orders that start or end at this tick. +/// This does indirectly limit the maximum liquidity at any price/tick, due to the fact there is +/// also a finite number of ticks i.e. all those in MIN_TICK..MAX_TICK. This limit exists to ensure +/// the output amount of a swap will never overflow a U256, even if the swap used all the liquidity +/// in the pool. pub const MAX_TICK_GROSS_LIQUIDITY: Liquidity = Liquidity::MAX / ((1 + MAX_TICK - MIN_TICK) as u128); #[derive(Clone, Debug, TypeInfo, Encode, Decode, MaxEncodedLen)] pub struct Position { + /// The `depth` of this range order, this value is proportional to the value of the order i.e. + /// the amount of assets that make up the order. liquidity: Liquidity, last_fee_growth_inside: SideMap, } @@ -99,6 +110,9 @@ impl Position { upper_tick: Tick, upper_delta: &TickDelta, ) -> (Collected, PositionInfo) { + // Before you can change the liquidity of a Position you must collect_fees, as the + // `last_fee_growth_inside` member (which is used to calculate earned fees) is only + // meaningful while liquidity is constant. let collected_fees = self.collect_fees(pool_state, lower_tick, lower_delta, upper_tick, upper_delta); self.liquidity = new_liquidity; @@ -108,19 +122,42 @@ impl Position { #[derive(Clone, Debug, TypeInfo, Encode, Decode, MaxEncodedLen)] pub struct TickDelta { + /// This is the change in the total amount of liquidity in the pool at this price, i.e. if the + /// price moves from a lower price to a higher one, above this tick (higher/lower in literal + /// integer value), the liquidity will increase by `liquidity_delta` and therefore swaps (In + /// both directions) will experience less slippage (Assuming liquidity_delta is positive). liquidity_delta: i128, + /// This is the sum of the liquidity of all the orders that start or end at this tick. Note + /// this is the value that MAX_TICK_GROSS_LIQUIDITY applies to. liquidity_gross: u128, + /// This is the fees per unit liquidity earned over all time while the current/swapping price + /// was on the opposite side of this tick than it is at the moment. This can be used to + /// calculate the fees earned by an order. It is stored this way as this value will only change + /// when the price moves across this tick, thereby limiting the computation/state changes + /// needed during a swap. fee_growth_outside: SideMap, } #[derive(Clone, Debug, TypeInfo, Encode, Decode)] pub struct PoolState { + /// The percentage fee taken from swap inputs and earned by LPs. It is in units of 0.0001%. + /// I.e. 5000 means 0.5%. pub(super) fee_hundredth_pips: u32, - // Note the current_sqrt_price can reach MAX_SQRT_PRICE, but only if the tick is MAX_TICK + /// Note the current_sqrt_price can reach MAX_SQRT_PRICE, but only if the tick is MAX_TICK current_sqrt_price: SqrtPriceQ64F96, + /// This is the highest tick that represents a strictly lower price than the + /// current_sqrt_price. `current_tick` is the tick that when you swap ZeroToOne the + /// `current_sqrt_price` is moving towards (going down in literal value), and will cross when + /// `current_sqrt_price` reachs it. `current_tick + 1` is the tick the price is moving towards + /// (going up in literal value) when you swap OneToZero and will cross when + /// `current_sqrt_price` reaches it, current_tick: Tick, + /// The total liquidity/depth at the `current_sqrt_price` current_liquidity: Liquidity, + /// The total fees earned over all time per unit liquidity global_fee_growth: SideMap, + /// All the ticks that have at least one range order that starts or ends at it, i.e. those + /// ticks where liquidity_gross is non-zero. liquidity_map: BTreeMap, positions: BTreeMap<(LiquidityProvider, Tick, Tick), Position>, } @@ -849,6 +886,11 @@ impl PoolState { .0) } + /// Returns the value of a range order with a given amount of liquidity, i.e. the assets that + /// you would need to create such as position, or that you would get if such a position was + /// burned. + /// + /// This function never panics pub(super) fn liquidity_to_amounts( &self, liquidity: Liquidity, @@ -984,6 +1026,10 @@ impl PoolState { .unwrap_or(MAX_TICK_GROSS_LIQUIDITY) } + /// Returns the current value of a position i.e. the assets you would receive by burning the + /// position, and the fees earned by the position since the last time it was updated/collected. + /// + /// This function never panics pub(super) fn position( &self, lp: &LiquidityProvider, @@ -1008,6 +1054,13 @@ impl PoolState { )) } + /// Returns a histogram of all the liquidity in the pool. Each entry in the returned vec is the + /// "start" tick, and the amount of liquidity in the pool from that tick, until the next tick, + /// i.e. the next tick in the pool. The first element will always be the MIN_TICK with some + /// amount of liquidity, and the last element will always be the MAX_TICK with a zero amount of + /// liquidity. + /// + /// This function never panics pub(super) fn liquidity(&self) -> Vec<(Tick, Liquidity)> { let mut liquidity = 0u128; self.liquidity_map diff --git a/state-chain/pallets/cf-pools/src/lib.rs b/state-chain/pallets/cf-pools/src/lib.rs index 554f2ecc57..7a2ecbd525 100644 --- a/state-chain/pallets/cf-pools/src/lib.rs +++ b/state-chain/pallets/cf-pools/src/lib.rs @@ -33,11 +33,16 @@ mod tests; impl_pallet_safe_mode!(PalletSafeMode; range_order_update_enabled, limit_order_update_enabled); +/// For referring to either the stable or unstable asset of a particular pool enum Stability { Stable, Unstable, } +// TODO Add custom serialize/deserialize and encode/decode implementations that preserve canonical +// nature. +/// Represents a pair of assets in a canonical ordering, so given two different assets they are +/// always the same way around. In this case the unstable asset is `zero` and the stable is `one`. #[derive( Clone, DebugNoBound, Encode, Decode, TypeInfo, MaxEncodedLen, PartialEqNoBound, EqNoBound, )] @@ -104,6 +109,8 @@ impl AssetPair { }) } + /// Remaps the amounts into a SideMap, assuming the base and pair are the same way around as the + /// assets when this AssetPair was created. pub fn asset_amounts_to_side_map( &self, asset_amounts: AssetAmounts, @@ -114,6 +121,8 @@ impl AssetPair { }) } + /// Remaps the amounts into an AssetsMap, assuming the base and pair should be the same way + /// around as the assets when this AssetPair was created. pub fn side_map_to_asset_amounts( &self, side_map: cf_amm::common::SideMap, @@ -121,6 +130,8 @@ impl AssetPair { Ok(self.side_map_to_assets_map(side_map.try_map(|_, amount| amount.try_into())?)) } + /// Remaps a SideMap into an AssetsMap, assuming the base and pair should be the same way around + /// as the assets when this AssetPair was created. pub fn side_map_to_assets_map(&self, side_map: cf_amm::common::SideMap) -> AssetsMap { match self.base_side { Side::Zero => AssetsMap { base: side_map.zero, pair: side_map.one }, @@ -140,6 +151,8 @@ impl AssetPair { }) } + /// Debits the specified amounts, and returns the amounts debited. If the requested amounts + /// couldn't be debited this is a noop and returns an error. fn try_debit_assets( &self, lp: &T::AccountId, @@ -148,6 +161,8 @@ impl AssetPair { self.try_xxx_assets(lp, side_map, T::LpBalance::try_debit_account) } + /// Credits the specified amounts, and returns the amounts credited. If the requested amounts + /// couldn't be credited this is a noop and returns an error. fn try_credit_assets( &self, lp: &T::AccountId, @@ -168,6 +183,8 @@ impl AssetPair { Ok(asset_amount) } + /// Debits the specified amount, and returns the amount debited. If the requested amount + /// couldn't be debited this is a noop and returns an error. fn try_debit_asset( &self, lp: &T::AccountId, @@ -177,6 +194,8 @@ impl AssetPair { self.try_xxx_asset(lp, side, amount, T::LpBalance::try_debit_account) } + /// Credits the specified amount, and returns the amount credited. If the requested amount + /// couldn't be credited this is a noop and returns an error. fn try_credit_asset( &self, lp: &T::AccountId, @@ -273,6 +292,9 @@ pub mod pallet { pub type AssetAmounts = AssetsMap; + /// Represents an amount of liquidity, either as an exact amount, or through maximum and minimum + /// amounts of both assets. Internally those max/min are converted into exact liquidity amounts, + /// that is if the appropiate asset ratio can be achieved while maintaining the max/min bounds. #[derive( Copy, Clone, @@ -291,6 +313,7 @@ pub mod pallet { Liquidity { liquidity: Liquidity }, } + /// Indicates if an LP wishs to increase or decreease the size of an order. #[derive( Copy, Clone, @@ -332,7 +355,7 @@ pub mod pallet { #[pallet::without_storage_info] pub struct Pallet(PhantomData); - /// Pools are indexed by single asset since USDC is implicit. + /// All the available pools. #[pallet::storage] pub type Pools = StorageMap<_, Twox64Concat, CanonicalAssetPair, Pool, OptionQuery>; @@ -458,6 +481,9 @@ pub mod pallet { fee_hundredth_pips: u32, initial_price: Price, }, + /// Indicates the details of a change made to a range order. A single update extrinsic may + /// produce multiple of these events, particularly for example if the update changes the + /// price/range of the order. RangeOrderUpdated { lp: T::AccountId, base_asset: Asset, @@ -470,6 +496,9 @@ pub mod pallet { assets_delta: AssetAmounts, collected_fees: AssetAmounts, }, + /// Indicates the details of a change made to a limit order. A single update extrinsic may + /// produce multiple of these events, particularly for example if the update changes the + /// price of the order. LimitOrderUpdated { lp: T::AccountId, sell_asset: Asset, @@ -612,7 +641,8 @@ pub mod pallet { /// optionally moving the order it may not be possible to allocate all the assets previously /// associated with the order to the new range; If so the unused assets will be returned to /// your balance. The appropriate assets will be debited or credited from your balance as - /// needed. + /// needed. If the order_id isn't being used at the moment you must specify a tick_range, + /// otherwise it will not know what range you want the order to be over. #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::update_range_order())] pub fn update_range_order( @@ -694,7 +724,9 @@ pub mod pallet { } /// Optionally move the order to a different range and then set its amount of liquidity. The - /// appropriate assets will be debited or credited from your balance as needed. + /// appropriate assets will be debited or credited from your balance as needed. If the + /// order_id isn't being used at the moment you must specify a tick_range, otherwise it will + /// not know what range you want the order to be over. #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::set_range_order())] pub fn set_range_order( @@ -760,7 +792,12 @@ pub mod pallet { /// Optionally move the order to a different tick and then increase or decrease its amount /// of liquidity. The appropiate assets will be debited or credited from your balance as - /// needed. + /// needed. If the order_id isn't being used at the moment you must specify a tick, + /// otherwise it will not know what tick you want the order to be over. Note limit order + /// order_id's are independent of range order order_id's. In addition to that, order_id's + /// for buy and sell limit orders i.e. those in different directions are independent. + /// Therefore you may have two limit orders with the same order_id in the same pool, one to + /// buy Eth and one to sell Eth for example. #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::update_limit_order())] pub fn update_limit_order( @@ -830,7 +867,13 @@ pub mod pallet { } /// Optionally move the order to a different tick and then set its amount of liquidity. The - /// appropiate assets will be debited or credited from your balance as needed. + /// appropiate assets will be debited or credited from your balance as needed. If the + /// order_id isn't being used at the moment you must specify a tick, otherwise it will not + /// know what tick you want the order to be over. Note limit order order_id's are + /// independent of range order order_id's. In addition to that, order_id's for buy and sell + /// limit orders i.e. those in different directions are independent. Therefore you may have + /// two limit orders with the same order_id in the same pool, one to buy Eth and one to sell + /// Eth for example. #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::set_limit_order())] pub fn set_limit_order( From 3e47468db44c5d11c49232eedf4057c127cff46b Mon Sep 17 00:00:00 2001 From: Jamie Ford Date: Tue, 26 Sep 2023 09:41:45 +1000 Subject: [PATCH 11/15] CLI binding executor address (#4042) * feat: cli binding executor address * refactor: avoid unwrap * docs: update readme and cmd description * chore: add example --------- Co-authored-by: Daniel --- api/bin/chainflip-cli/src/main.rs | 90 ++++++++++++++++++++--- api/bin/chainflip-cli/src/settings.rs | 17 ++++- api/lib/src/lib.rs | 12 +++ api/lib/src/queries.rs | 18 +++++ bouncer/shared/fund_redeem.ts | 2 +- state-chain/pallets/cf-funding/README.md | 12 ++- state-chain/pallets/cf-funding/src/lib.rs | 8 +- 7 files changed, 138 insertions(+), 21 deletions(-) diff --git a/api/bin/chainflip-cli/src/main.rs b/api/bin/chainflip-cli/src/main.rs index 76d949cb6d..2f423a1a1c 100644 --- a/api/bin/chainflip-cli/src/main.rs +++ b/api/bin/chainflip-cli/src/main.rs @@ -87,15 +87,21 @@ async fn run_cli() -> Result<()> { api.lp_api().register_liquidity_refund_address(lra_address).await?; println!("Liquidity Refund address registered. Tx hash: {tx_hash}"); }, - Redeem { amount, eth_address, executor } => { - request_redemption(api, amount, eth_address, executor).await?; + Redeem { amount, eth_address, executor_address } => { + request_redemption(api, amount, eth_address, executor_address).await?; }, BindRedeemAddress { eth_address } => { bind_redeem_address(api.operator_api(), ð_address).await?; }, + BindExecutorAddress { eth_address } => { + bind_executor_address(api.operator_api(), ð_address).await?; + }, GetBoundRedeemAddress {} => { get_bound_redeem_address(api.query_api()).await?; }, + GetBoundExecutorAddress {} => { + get_bound_executor_address(api.query_api()).await?; + }, RegisterAccountRole { role } => { println!( "Submitting `register-account-role` with role: {role:?}. This cannot be reversed for your account.", @@ -134,22 +140,23 @@ async fn run_cli() -> Result<()> { async fn request_redemption( api: StateChainApi, amount: Option, - supplied_address: Option, - executor: Option, + supplied_redeem_address: Option, + supplied_executor_address: Option, ) -> Result<()> { - let supplied_address = if let Some(address) = supplied_address { + let account_id = api.state_chain_client.account_id(); + + // Check the bound redeem address for this account + let supplied_redeem_address = if let Some(address) = supplied_redeem_address { Some(EthereumAddress::from( clean_hex_address::<[u8; 20]>(&address).context("Invalid ETH address supplied")?, )) } else { None }; - - let account_id = api.state_chain_client.account_id(); - let bound_address = + let bound_redeem_address = api.query_api().get_bound_redeem_address(None, Some(account_id.clone())).await?; - let redeem_address = match (supplied_address, bound_address) { + let redeem_address = match (supplied_redeem_address, bound_redeem_address) { (Some(supplied_address), Some(bound_address)) => if supplied_address != bound_address { bail!("Supplied ETH address `{supplied_address:?}` does not match bound address for this account `{bound_address:?}`."); @@ -165,6 +172,35 @@ async fn request_redemption( bail!("No redeem address supplied and no bound redeem address found for your account {account_id}."), }; + // Check the bound executor address for this account + let supplied_executor_address = if let Some(address) = supplied_executor_address { + Some(EthereumAddress::from( + clean_hex_address::<[u8; 20]>(&address).context("Invalid ETH address supplied")?, + )) + } else { + None + }; + let bound_executor_address = api + .query_api() + .get_bound_executor_address(None, Some(account_id.clone())) + .await?; + + let executor_address = match (bound_executor_address, supplied_executor_address) { + (Some(bound_address), Some(supplied_address)) => + if bound_address != supplied_address { + bail!("Supplied executor address `{supplied_address:?}` does not match bound address for this account `{bound_address:?}`."); + } else { + Some(supplied_address) + }, + (Some(bound_address), None) => { + println!("Using bound executor address {bound_address}."); + Some(bound_address) + }, + (None, Some(executor)) => Some(executor), + (None, None) => None, + }; + + // Calculate the redemption amount let amount = match amount { Some(amount_float) => { let atomic_amount = (amount_float * 10_f64.powi(18)) as u128; @@ -186,7 +222,10 @@ async fn request_redemption( return Ok(()) } - let tx_hash = api.operator_api().request_redemption(amount, redeem_address, executor).await?; + let tx_hash = api + .operator_api() + .request_redemption(amount, redeem_address, executor_address) + .await?; println!( "Your redemption request has transaction hash: `{tx_hash:#x}`. View your redemption's progress on the funding app." @@ -209,7 +248,26 @@ async fn bind_redeem_address(api: Arc, eth_address: &st let tx_hash = api.bind_redeem_address(eth_address).await?; - println!("Account bound to address {eth_address}, transaction hash: `{tx_hash:#x}`."); + println!("Account bound to redeem address {eth_address}, transaction hash: `{tx_hash:#x}`."); + + Ok(()) +} + +async fn bind_executor_address(api: Arc, eth_address: &str) -> Result<()> { + let eth_address = EthereumAddress::from( + clean_hex_address::<[u8; 20]>(eth_address).context("Invalid ETH address supplied")?, + ); + + println!( + "Binding your account to an executor address is irreversible. You will only ever be able to execute registered redemptions with this address: {eth_address:?}.", + ); + if !confirm_submit() { + return Ok(()) + } + + let tx_hash = api.bind_executor_address(eth_address).await?; + + println!("Account bound to executor address {eth_address}, transaction hash: `{tx_hash:#x}`."); Ok(()) } @@ -224,6 +282,16 @@ async fn get_bound_redeem_address(api: QueryApi) -> Result<()> { Ok(()) } +async fn get_bound_executor_address(api: QueryApi) -> Result<()> { + if let Some(bound_address) = api.get_bound_executor_address(None, None).await? { + println!("Your account is bound to executor address: {bound_address:?}"); + } else { + println!("Your account is not bound to any executor address."); + } + + Ok(()) +} + fn confirm_submit() -> bool { use std::{io, io::*}; diff --git a/api/bin/chainflip-cli/src/settings.rs b/api/bin/chainflip-cli/src/settings.rs index 8f920a7b9a..7e8e628d47 100644 --- a/api/bin/chainflip-cli/src/settings.rs +++ b/api/bin/chainflip-cli/src/settings.rs @@ -109,17 +109,28 @@ pub enum CliCommand { )] eth_address: Option, #[clap( - help = "Optional executor. If specified, only this address will be able to execute the redemption." + help = "Optional executor address. If specified, only this address will be able to execute the redemption." )] - executor: Option, + executor_address: Option, }, - #[clap(about = "Restricts your account to only be able to redeem to the specified address")] + #[clap( + about = "Irreversible action that restricts your account to only be able to redeem to the specified address" + )] BindRedeemAddress { #[clap(help = "The Ethereum address you wish to bind your account to")] eth_address: String, }, + #[clap( + about = "Irreversible action that restricts your account to only be able to execute registered redemptions with the specified address" + )] + BindExecutorAddress { + #[clap(help = "The Ethereum address you wish to bind your account to")] + eth_address: String, + }, #[clap(about = "Shows the redeem address your account is bound to")] GetBoundRedeemAddress, + #[clap(about = "Shows the executor address your account is bound to")] + GetBoundExecutorAddress, #[clap( about = "Submit an extrinsic to request generation of a redemption certificate (redeeming all available FLIP)" )] diff --git a/api/lib/src/lib.rs b/api/lib/src/lib.rs index 7e90b35632..860c88414d 100644 --- a/api/lib/src/lib.rs +++ b/api/lib/src/lib.rs @@ -195,6 +195,18 @@ pub trait OperatorApi: SignedExtrinsicApi + RotateSessionKeysApi + AuctionPhaseA Ok(tx_hash) } + async fn bind_executor_address(&self, executor_address: EthereumAddress) -> Result { + let (tx_hash, ..) = self + .submit_signed_extrinsic(pallet_cf_funding::Call::bind_executor_address { + executor_address, + }) + .await + .until_finalized() + .await?; + + Ok(tx_hash) + } + async fn register_account_role(&self, role: AccountRole) -> Result { let call = match role { AccountRole::Validator => diff --git a/api/lib/src/queries.rs b/api/lib/src/queries.rs index e5e999c347..eacd1e5a6f 100644 --- a/api/lib/src/queries.rs +++ b/api/lib/src/queries.rs @@ -139,4 +139,22 @@ impl QueryApi { ) .await?) } + + pub async fn get_bound_executor_address( + &self, + block_hash: Option, + account_id: Option, + ) -> Result, anyhow::Error> { + let block_hash = + block_hash.unwrap_or_else(|| self.state_chain_client.latest_finalized_hash()); + let account_id = account_id.unwrap_or_else(|| self.state_chain_client.account_id()); + + Ok(self + .state_chain_client + .storage_map_entry::>( + block_hash, + &account_id, + ) + .await?) + } } diff --git a/bouncer/shared/fund_redeem.ts b/bouncer/shared/fund_redeem.ts index d764a964e8..58d23e3999 100644 --- a/bouncer/shared/fund_redeem.ts +++ b/bouncer/shared/fund_redeem.ts @@ -19,7 +19,7 @@ export async function testFundRedeem(seed: string) { // We fund to a specific SC address. await fundFlip(redeemSCAddress, amount.toString()); - // The ERC20 FLIP is sent back to an ETH address, and the registered claim can only be executed by that address. + // The ERC20 FLIP is sent back to an ETH address, and the registered redemption can only be executed by that address. await redeemFlip(seed, redeemEthAddress as HexString, (amount / 2).toString()); console.log('Observed RedemptionSettled event'); const newBalance = await observeBalanceIncrease('FLIP', redeemEthAddress, initBalance); diff --git a/state-chain/pallets/cf-funding/README.md b/state-chain/pallets/cf-funding/README.md index 871972b494..c03b865bc0 100644 --- a/state-chain/pallets/cf-funding/README.md +++ b/state-chain/pallets/cf-funding/README.md @@ -52,7 +52,7 @@ Validators who are actively bidding in an auction cannot redeem funds. This is t > > *The bond is 1,000 FLIP as before, and the account balance is 1,200 FLIP. When a new auction starts, all available funds are implicitly used for bidding, and so all 1,200 FLIP are restricted and cannot be redeemed until the conclusion of the auction.* -#### Address Binding +#### Redeem Address Binding Any account may be explicitly *bound* to a single redemption address. Henceforth, any redemption request from that account can *only redeem to this exact address*. @@ -66,6 +66,16 @@ Note, address binding is a one-off *irreversible* operation. > > *A liquid staking provider wants to allow users to pool their FLIP through a smart contract on Ethereum, to then fund a validator account. They bind their validator account to the smart contract address. This binding is permanent and irrevocable, so users can now rest assured that there is no way the pooled funds can be redeemed to any other address.* +#### Executor Address Binding + +Any account may be explicitly *bound* to a single executor address. Henceforth, any registered redemption from that account can *only be executed by this exact address*. + +Note, address binding is a one-off *irreversible* operation. + +> *Example:* +> +> *A service provider wants to integrate with Chainflip's funding/redeeming mechanism. To this end they want to ensure that they are in full control of when the redeemed funds are actually swept into the redemption account. In order to achieve this, they bind the executor address to a known Ethereum account. Without this it's technically possible for anyone to submit the `executeRedemption` call on their behalf.* + #### Restricted Balances Certain funding *sources* are considered restricted, such that funds originating from that source can only be redeemed back to whence they came. In order to enforce this, we track the amount of funds added from restricted addresses and ensure that the account always has enough funds to cover its obligations to these addresses. diff --git a/state-chain/pallets/cf-funding/src/lib.rs b/state-chain/pallets/cf-funding/src/lib.rs index daa20cfae9..a2898051dd 100644 --- a/state-chain/pallets/cf-funding/src/lib.rs +++ b/state-chain/pallets/cf-funding/src/lib.rs @@ -363,16 +363,14 @@ pub mod pallet { origin: OriginFor, amount: RedemptionAmount>, address: EthereumAddress, - // Only this address can execute the claim. + // Only this address can execute the redemption. executor: Option, ) -> DispatchResultWithPostInfo { let account_id = ensure_signed(origin)?; if let Some(executor_addr) = BoundExecutorAddress::::get(&account_id) { - ensure!( - executor_addr == executor.unwrap_or_default(), - Error::::ExecutorBindingRestrictionViolated - ); + let executor = executor.ok_or(Error::::ExecutorBindingRestrictionViolated)?; + ensure!(executor_addr == executor, Error::::ExecutorBindingRestrictionViolated); } ensure!(T::SafeMode::get().redeem_enabled, Error::::RedeemDisabled); From d69ebb6a868ac41a5426990d807bfd8c42456d94 Mon Sep 17 00:00:00 2001 From: Maxim Shishmarev Date: Tue, 26 Sep 2023 16:09:42 +1000 Subject: [PATCH 12/15] Fix: ensure existing p2p connection is removed before reconnecting (#4045) * refactor: simplify initial connection to peers `peer_infos` has the most up-to-date info on current nodes, and can't contain duplicates, so we use that instead of maintaining a separate list of peers to connect to. * fix: allow overwriting existing p2p connection * chore: more accurate comment * refactor: send_and_receive helper in p2p tests * chore: use different ports in p2p tests --- engine/src/p2p/core.rs | 36 +++++++++++---- engine/src/p2p/core/tests.rs | 86 +++++++++++++++++++++++++----------- 2 files changed, 88 insertions(+), 34 deletions(-) diff --git a/engine/src/p2p/core.rs b/engine/src/p2p/core.rs index 720f3ee7e8..3f6eb774fa 100644 --- a/engine/src/p2p/core.rs +++ b/engine/src/p2p/core.rs @@ -93,8 +93,7 @@ impl std::fmt::Display for PeerInfo { enum RegistrationStatus { /// The node is not yet known to the network (its peer info /// may not be known to the network yet) - /// (Stores future peers to connect to when then node is registered) - Pending(Vec), + Pending, /// The node is registered, i.e. its peer info has been /// recorded/updated Registered, @@ -209,6 +208,11 @@ struct P2PContext { /// A handle to the authenticator thread that can be used to make changes to the /// list of allowed peers authenticator: Arc, + /// Contains all existing ZMQ sockets for "client" connections. Note that ZMQ socket + /// exists even when there is no internal TCP connection (e.g. before the connection + /// is established for the first time, or when ZMQ it is reconnecting). Also, when + /// our own (independent from ZMQ) reconnection mechanism kicks in, the entry is removed + /// (because we don't want ZMQ's socket behaviour). /// NOTE: The mapping is from AccountId because we want to optimise for message /// sending, which uses AccountId active_connections: ActiveConnectionWrapper, @@ -277,7 +281,7 @@ pub(super) fn start( incoming_message_sender, own_peer_info_sender, our_account_id, - status: RegistrationStatus::Pending(vec![]), + status: RegistrationStatus::Pending, }; debug!("Registering peer info for {} peers", current_peers.len()); @@ -437,6 +441,14 @@ impl P2PContext { fn reconnect_to_peer(&mut self, account_id: &AccountId) { if let Some(peer_info) = self.peer_infos.get(account_id) { info!("Reconnecting to peer: {}", peer_info.account_id); + + // It is possible that while we were waiting to reconnect, + // we received a peer info update and created a new "connection". + // This connection might be "healthy", but it is safer/easier to + // remove it and proceed with reconnecting. + if self.active_connections.remove(account_id).is_some() { + debug!("Reconnecting to a peer that's already connected: {}. Existing connection was removed.", account_id); + } self.connect_to_peer(peer_info.clone()); } else { error!("Failed to reconnect to peer {account_id}. (Peer info not found.)"); @@ -452,7 +464,13 @@ impl P2PContext { let connected_socket = socket.connect(peer); - assert!(self.active_connections.insert(account_id, connected_socket).is_none()); + if let Some(old_socket) = self.active_connections.insert(account_id, connected_socket) { + // This should not happen because we always remove existing connection/socket + // prior to connecting, but even if it does, it should be OK to replace the + // connection (this doesn't break any invariants and the new peer info is + // likely to be more up-to-date). + warn!("Replacing existing ZMQ socket: {:?}", old_socket.peer()); + } } fn handle_own_registration(&mut self, own_info: PeerInfo) { @@ -460,8 +478,8 @@ impl P2PContext { self.own_peer_info_sender.send(own_info).unwrap(); - if let RegistrationStatus::Pending(peers) = &mut self.status { - let peers = std::mem::take(peers); + if let RegistrationStatus::Pending = &mut self.status { + let peers: Vec<_> = self.peer_infos.values().cloned().collect(); // Connect to all outstanding peers for peer in peers { self.connect_to_peer(peer) @@ -494,10 +512,10 @@ impl P2PContext { self.x25519_to_account_id.insert(peer.pubkey, peer.account_id.clone()); match &mut self.status { - RegistrationStatus::Pending(peers) => { - // Not ready to start connecting to peers yet + RegistrationStatus::Pending => { + // We will connect to all peers in `self.peer_infos` once we receive our own + // registration info!("Delaying connecting to {}", peer.account_id); - peers.push(peer); }, RegistrationStatus::Registered => { self.connect_to_peer(peer); diff --git a/engine/src/p2p/core/tests.rs b/engine/src/p2p/core/tests.rs index 89bf7d4b10..756ec560f2 100644 --- a/engine/src/p2p/core/tests.rs +++ b/engine/src/p2p/core/tests.rs @@ -5,7 +5,7 @@ use state_chain_runtime::AccountId; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; use tracing::{info_span, Instrument}; use utilities::{ - testing::{expect_recv_with_custom_timeout, expect_recv_with_timeout}, + testing::{expect_recv_with_timeout, recv_with_custom_timeout}, Port, }; @@ -16,7 +16,15 @@ fn create_node_info(id: AccountId, node_key: &ed25519_dalek::Keypair, port: Port PeerInfo::new(id, pubkey, ip, port) } +use std::time::Duration; + +/// This has to be large enough to account for the possibility of +/// the initial handshake failing and the node having to reconnect +/// after `RECONNECT_INTERVAL` +const MAX_CONNECTION_DELAY: Duration = Duration::from_millis(500); + struct Node { + account_id: AccountId, msg_sender: UnboundedSender, peer_update_sender: UnboundedSender, _own_peer_info_receiver: UnboundedReceiver, @@ -36,11 +44,12 @@ fn spawn_node( let key = P2PKey::new(secret); let (msg_sender, peer_update_sender, msg_receiver, own_peer_info_receiver, fut) = - super::start(&key, our_peer_info.port, peer_infos.to_vec(), account_id); + super::start(&key, our_peer_info.port, peer_infos.to_vec(), account_id.clone()); tokio::spawn(fut.instrument(info_span!("node", idx = idx))); Node { + account_id, msg_sender, peer_update_sender, _own_peer_info_receiver: own_peer_info_receiver, @@ -120,12 +129,25 @@ async fn connect_two_nodes() { let _ = expect_recv_with_timeout(&mut node2.msg_receiver).await; } -#[tokio::test] -async fn can_connect_after_pubkey_change() { - use std::time::Duration; +async fn send_and_receive_message(from: &Node, to: &mut Node) -> Option<(AccountId, Vec)> { + println!( + "[{:?}] Sending from {:?} to {:?}", + std::time::Instant::now(), + from.account_id, + to.account_id, + ); + from.msg_sender + .send(OutgoingMultisigStageMessages::Private(vec![( + to.account_id.clone(), + b"test".to_vec(), + )])) + .unwrap(); - const MAX_CONNECTION_DELAY: Duration = Duration::from_millis(200); + recv_with_custom_timeout(&mut to.msg_receiver, MAX_CONNECTION_DELAY).await +} +#[tokio::test] +async fn can_connect_after_pubkey_change() { let node_key1 = create_keypair(); let node_key2 = create_keypair(); @@ -135,18 +157,11 @@ async fn can_connect_after_pubkey_change() { let pi2 = create_node_info(AccountId::new([2; 32]), &node_key2, 8090); let mut node1 = spawn_node(&node_key1, 0, pi1.clone(), &[pi1.clone(), pi2.clone()]); - let node2 = spawn_node(&node_key2, 1, pi2.clone(), &[pi1.clone(), pi2.clone()]); + let mut node2 = spawn_node(&node_key2, 1, pi2.clone(), &[pi1.clone(), pi2.clone()]); // Check that node 2 can communicate with node 1: - node2 - .msg_sender - .send(OutgoingMultisigStageMessages::Private(vec![( - pi1.account_id.clone(), - b"test".to_vec(), - )])) - .unwrap(); - - let _ = expect_recv_with_custom_timeout(&mut node1.msg_receiver, MAX_CONNECTION_DELAY).await; + send_and_receive_message(&node2, &mut node1).await.unwrap(); + send_and_receive_message(&node1, &mut node2).await.unwrap(); // Node 2 disconnects: drop(node2); @@ -154,19 +169,40 @@ async fn can_connect_after_pubkey_change() { // Node 2 connects with a different key: let node_key2b = create_keypair(); let pi2 = create_node_info(AccountId::new([2; 32]), &node_key2b, 8091); - let node2b = spawn_node(&node_key2b, 1, pi2.clone(), &[pi1.clone(), pi2.clone()]); + let mut node2b = spawn_node(&node_key2b, 1, pi2.clone(), &[pi1.clone(), pi2.clone()]); // Node 1 learn about Node 2's new key: node1.peer_update_sender.send(PeerUpdate::Registered(pi2.clone())).unwrap(); // Node 2 should be able to send messages again: - node2b - .msg_sender - .send(OutgoingMultisigStageMessages::Private(vec![( - pi1.account_id.clone(), - b"test".to_vec(), - )])) - .unwrap(); + send_and_receive_message(&node2b, &mut node1).await.unwrap(); + send_and_receive_message(&node1, &mut node2b).await.unwrap(); +} + +/// Test the behaviour around receiving own registration: at first, if our node +/// is not registered, we delay connecting to other nodes; once we receive our +/// own registration, we connect to other registered nodes. +#[tokio::test] +async fn connects_after_registration() { + let node_key1 = create_keypair(); + let node_key2 = create_keypair(); + + let pi1 = create_node_info(AccountId::new([1; 32]), &node_key1, 8092); + let pi2 = create_node_info(AccountId::new([2; 32]), &node_key2, 8093); + + // Node 1 doesn't get its own peer info at first and will wait for registration + let node1 = spawn_node(&node_key1, 0, pi1.clone(), &[pi2.clone()]); + let mut node2 = spawn_node(&node_key2, 1, pi2.clone(), &[pi1.clone(), pi2.clone()]); + + // For sanity, check that node 1 can't yet communicate with node 2: + assert!(send_and_receive_message(&node1, &mut node2).await.is_none()); + + // Update node 1 with its own peer info + node1.peer_update_sender.send(PeerUpdate::Registered(pi1.clone())).unwrap(); + + // Allow some time for the above command to propagate through the channel + tokio::time::sleep(std::time::Duration::from_millis(100)).await; - let _ = expect_recv_with_custom_timeout(&mut node1.msg_receiver, MAX_CONNECTION_DELAY).await; + // It should now be able to communicate with node 2: + assert!(send_and_receive_message(&node1, &mut node2).await.is_some()); } From 26ccf9c7f78669f268aaa46926cebdb37511b54c Mon Sep 17 00:00:00 2001 From: kylezs Date: Wed, 27 Sep 2023 01:16:30 +1000 Subject: [PATCH 13/15] refactor: use async closures for SC specific witnessing functionality (#4049) * refactor: pull out extrinsic submission from erc20 witnessing * refactor: pull out eth deposit submission * refactor: factor out finalize extrinsic code * refactor: remove unnecessary macro * factor out extrinsic code for BTC * refactor: factor out extrinsic submission for DOT * refactor: use it for the rest of the witnessers for consistency * chore: remove unnecessary clippy allow --- engine/src/witness/btc.rs | 73 ++++----- engine/src/witness/dot.rs | 89 +++++------ engine/src/witness/eth.rs | 28 ++-- engine/src/witness/eth/erc20_deposits.rs | 47 +++--- engine/src/witness/eth/ethereum_deposits.rs | 73 +++++---- engine/src/witness/eth/key_manager.rs | 147 +++++++----------- engine/src/witness/eth/state_chain_gateway.rs | 105 +++++-------- engine/src/witness/eth/vault.rs | 32 ++-- engine/src/witness/start.rs | 18 +++ 9 files changed, 278 insertions(+), 334 deletions(-) diff --git a/engine/src/witness/btc.rs b/engine/src/witness/btc.rs index fa0cfbcbe4..d78511eca5 100644 --- a/engine/src/witness/btc.rs +++ b/engine/src/witness/btc.rs @@ -8,7 +8,8 @@ use cf_chains::{ btc::{deposit_address::DepositAddress, ScriptPubkey, UtxoId, CHANGE_ADDRESS_SALT}, Bitcoin, }; -use cf_primitives::chains::assets::btc; +use cf_primitives::{chains::assets::btc, EpochIndex}; +use futures_core::Future; use pallet_cf_ingress_egress::{DepositChannelDetails, DepositWitness}; use secp256k1::hashes::Hash; use state_chain_runtime::BitcoinInstance; @@ -29,9 +30,10 @@ use anyhow::Result; const SAFETY_MARGIN: usize = 6; -pub async fn start( +pub async fn start( scope: &Scope<'_, anyhow::Error>, btc_client: BtcRetryRpcClient, + process_call: ProcessCall, state_chain_client: Arc, state_chain_stream: StateChainStream, epoch_source: EpochSourceBuilder<'_, '_, StateChainClient, (), ()>, @@ -40,6 +42,12 @@ pub async fn start( where StateChainClient: StorageApi + SignedExtrinsicApi + 'static + Send + Sync, StateChainStream: StateChainStreamApi + Clone + 'static + Send + Sync, + ProcessCall: Fn(state_chain_runtime::RuntimeCall, EpochIndex) -> ProcessingFut + + Send + + Sync + + Clone + + 'static, + ProcessingFut: Future + Send + 'static, { let btc_source = BtcSource::new(btc_client.clone()).shared(scope); @@ -68,9 +76,9 @@ where .deposit_addresses(scope, state_chain_stream.clone(), state_chain_client.clone()) .await .then({ - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); move |epoch, header| { - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); async move { // TODO: Make addresses a Map of some kind? let (((), txs), addresses) = header.data; @@ -81,18 +89,12 @@ where // Submit all deposit witnesses for the block. if !deposit_witnesses.is_empty() { - state_chain_client - .finalize_signed_extrinsic(pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_ingress_egress::Call::<_, BitcoinInstance>::process_deposits { - deposit_witnesses, - block_height: header.index, - } - .into(), - ), - epoch_index: epoch.index, - }) - .await; + process_call( + pallet_cf_ingress_egress::Call::<_, BitcoinInstance>::process_deposits { + deposit_witnesses, + block_height: header.index, + }.into(), + epoch.index).await; } txs } @@ -101,30 +103,29 @@ where .egress_items(scope, state_chain_stream, state_chain_client.clone()) .await .then(move |epoch, header| { - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); async move { let (txs, monitored_tx_hashes) = header.data; for tx_hash in success_witnesses(&monitored_tx_hashes, &txs) { - state_chain_client - .finalize_signed_extrinsic(pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new(state_chain_runtime::RuntimeCall::BitcoinBroadcaster( - pallet_cf_broadcast::Call::transaction_succeeded { - tx_out_id: tx_hash, - signer_id: DepositAddress::new( - epoch.info.0.public_key.current, - CHANGE_ADDRESS_SALT, - ) - .script_pubkey(), - // TODO: Ideally we can submit an empty type here. For - // Bitcoin and some other chains fee tracking is not - // necessary. PRO-370. - tx_fee: Default::default(), - }, - )), - epoch_index: epoch.index, - }) - .await; + process_call( + state_chain_runtime::RuntimeCall::BitcoinBroadcaster( + pallet_cf_broadcast::Call::transaction_succeeded { + tx_out_id: tx_hash, + signer_id: DepositAddress::new( + epoch.info.0.public_key.current, + CHANGE_ADDRESS_SALT, + ) + .script_pubkey(), + // TODO: Ideally we can submit an empty type here. For + // Bitcoin and some other chains fee tracking is not + // necessary. PRO-370. + tx_fee: Default::default(), + }, + ), + epoch.index, + ) + .await; } } }) diff --git a/engine/src/witness/dot.rs b/engine/src/witness/dot.rs index 2369431b21..16d06ac275 100644 --- a/engine/src/witness/dot.rs +++ b/engine/src/witness/dot.rs @@ -5,7 +5,8 @@ use cf_chains::{ dot::{PolkadotAccountId, PolkadotBalance, PolkadotExtrinsicIndex, PolkadotUncheckedExtrinsic}, Polkadot, }; -use cf_primitives::{chains::assets, PolkadotBlockNumber, TxId}; +use cf_primitives::{chains::assets, EpochIndex, PolkadotBlockNumber, TxId}; +use futures_core::Future; use pallet_cf_ingress_egress::{DepositChannelDetails, DepositWitness}; use state_chain_runtime::PolkadotInstance; use subxt::{ @@ -81,9 +82,10 @@ fn filter_map_events( } } -pub async fn start( +pub async fn start( scope: &Scope<'_, anyhow::Error>, dot_client: DotRetryRpcClient, + process_call: ProcessCall, state_chain_client: Arc, state_chain_stream: StateChainStream, epoch_source: EpochSourceBuilder<'_, '_, StateChainClient, (), ()>, @@ -92,6 +94,12 @@ pub async fn start( where StateChainClient: StorageApi + SignedExtrinsicApi + 'static + Send + Sync, StateChainStream: StateChainStreamApi + Clone, + ProcessCall: Fn(state_chain_runtime::RuntimeCall, EpochIndex) -> ProcessingFut + + Send + + Sync + + Clone + + 'static, + ProcessingFut: Future + Send + 'static, { DotUnfinalisedSource::new(dot_client.clone()) .then(|header| async move { header.data.iter().filter_map(filter_map_events).collect() }) @@ -127,9 +135,9 @@ where .await // Deposit witnessing .then({ - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); move |epoch, header| { - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); async move { let (events, addresses_and_details) = header.data; @@ -139,18 +147,14 @@ where deposit_witnesses(header.index, addresses, &events, &epoch.info.1); if !deposit_witnesses.is_empty() { - state_chain_client - .finalize_signed_extrinsic(pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_ingress_egress::Call::<_, PolkadotInstance>::process_deposits { - deposit_witnesses, - block_height: header.index, - } - .into(), - ), - epoch_index: epoch.index, - }) - .await; + process_call( + pallet_cf_ingress_egress::Call::<_, PolkadotInstance>::process_deposits { + deposit_witnesses, + block_height: header.index, + } + .into(), + epoch.index + ).await } (events, broadcast_indices) @@ -159,9 +163,9 @@ where }) // Proxy added witnessing .then({ - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); move |epoch, header| { - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); async move { let (events, mut broadcast_indices) = header.data; @@ -169,12 +173,7 @@ where broadcast_indices.append(&mut proxy_added_broadcasts); for call in vault_key_rotated_calls { - state_chain_client - .finalize_signed_extrinsic(pallet_cf_witnesser::Call::witness_at_epoch { - call, - epoch_index: epoch.index, - }) - .await; + process_call(call, epoch.index).await; } (events, broadcast_indices) @@ -185,10 +184,10 @@ where .egress_items(scope, state_chain_stream.clone(), state_chain_client.clone()) .await .then({ - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); let dot_client = dot_client.clone(); move |epoch, header| { - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); let dot_client = dot_client.clone(); async move { let ((events, broadcast_indices), monitored_egress_ids) = header.data; @@ -206,25 +205,18 @@ where if let Some(signature) = unchecked.signature() { if monitored_egress_ids.contains(&signature) { tracing::info!("Witnessing transaction_succeeded. signature: {signature:?}"); - state_chain_client - .finalize_signed_extrinsic( - pallet_cf_witnesser::Call::witness_at_epoch { - call: - Box::new( - pallet_cf_broadcast::Call::< - _, - PolkadotInstance, - >::transaction_succeeded { - tx_out_id: signature, - signer_id: epoch.info.1, - tx_fee, - } - .into(), - ), - epoch_index: epoch.index, - }, - ) - .await; + process_call( + pallet_cf_broadcast::Call::< + _, + PolkadotInstance, + >::transaction_succeeded { + tx_out_id: signature, + signer_id: epoch.info.1, + tx_fee, + } + .into(), + epoch.index, + ).await; } } } else { @@ -312,12 +304,11 @@ fn transaction_fee_paids( indices_with_fees } -#[allow(clippy::vec_box)] fn proxy_addeds( block_number: PolkadotBlockNumber, events: &Vec<(Phase, EventWrapper)>, our_vault: &PolkadotAccountId, -) -> (Vec>, BTreeSet) { +) -> (Vec, BTreeSet) { let mut vault_key_rotated_calls = vec![]; let mut extrinsic_indices = BTreeSet::new(); for (phase, wrapped_event) in events { @@ -329,13 +320,13 @@ fn proxy_addeds( tracing::info!("Witnessing ProxyAdded. new delegatee: {delegatee:?} at block number {block_number} and extrinsic_index; {extrinsic_index}"); - vault_key_rotated_calls.push(Box::new( + vault_key_rotated_calls.push( pallet_cf_vaults::Call::<_, PolkadotInstance>::vault_key_rotated { block_number, tx_id: TxId { block_number, extrinsic_index }, } .into(), - )); + ); extrinsic_indices.insert(extrinsic_index); } diff --git a/engine/src/witness/eth.rs b/engine/src/witness/eth.rs index 930cf8646a..5934cc69a4 100644 --- a/engine/src/witness/eth.rs +++ b/engine/src/witness/eth.rs @@ -9,7 +9,8 @@ pub mod vault; use std::{collections::HashMap, sync::Arc}; -use cf_primitives::chains::assets::eth; +use cf_primitives::{chains::assets::eth, EpochIndex}; +use futures_core::Future; use sp_core::H160; use utilities::task_scope::Scope; @@ -33,9 +34,10 @@ use anyhow::{Context, Result}; const SAFETY_MARGIN: usize = 7; -pub async fn start( +pub async fn start( scope: &Scope<'_, anyhow::Error>, eth_client: EthersRetryRpcClient, + process_call: ProcessCall, state_chain_client: Arc, state_chain_stream: StateChainStream, epoch_source: EpochSourceBuilder<'_, '_, StateChainClient, (), ()>, @@ -44,6 +46,12 @@ pub async fn start( where StateChainClient: StorageApi + ChainApi + SignedExtrinsicApi + 'static + Send + Sync, StateChainStream: StateChainStreamApi + Clone, + ProcessCall: Fn(state_chain_runtime::RuntimeCall, EpochIndex) -> ProcessingFut + + Send + + Sync + + Clone + + 'static, + ProcessingFut: Future + Send + 'static, { let state_chain_gateway_address = state_chain_client .storage_value::>( @@ -111,7 +119,7 @@ where eth_safe_vault_source .clone() - .key_manager_witnessing(state_chain_client.clone(), eth_client.clone(), key_manager_address) + .key_manager_witnessing(process_call.clone(), eth_client.clone(), key_manager_address) .continuous("KeyManager".to_string(), db.clone()) .logging("KeyManager") .spawn(scope); @@ -119,7 +127,7 @@ where eth_safe_vault_source .clone() .state_chain_gateway_witnessing( - state_chain_client.clone(), + process_call.clone(), eth_client.clone(), state_chain_gateway_address, ) @@ -131,8 +139,8 @@ where .clone() .deposit_addresses(scope, state_chain_stream.clone(), state_chain_client.clone()) .await - .erc20_deposits::<_, _, UsdcEvents>( - state_chain_client.clone(), + .erc20_deposits::<_, _, _, UsdcEvents>( + process_call.clone(), eth_client.clone(), cf_primitives::chains::assets::eth::Asset::Usdc, usdc_contract_address, @@ -146,8 +154,8 @@ where .clone() .deposit_addresses(scope, state_chain_stream.clone(), state_chain_client.clone()) .await - .erc20_deposits::<_, _, FlipEvents>( - state_chain_client.clone(), + .erc20_deposits::<_, _, _, FlipEvents>( + process_call.clone(), eth_client.clone(), cf_primitives::chains::assets::eth::Asset::Flip, flip_contract_address, @@ -162,7 +170,7 @@ where .deposit_addresses(scope, state_chain_stream.clone(), state_chain_client.clone()) .await .ethereum_deposits( - state_chain_client.clone(), + process_call.clone(), eth_client.clone(), eth::Asset::Eth, address_checker_address, @@ -175,7 +183,7 @@ where eth_safe_vault_source .vault_witnessing( - state_chain_client.clone(), + process_call, eth_client.clone(), vault_address, cf_primitives::Asset::Eth, diff --git a/engine/src/witness/eth/erc20_deposits.rs b/engine/src/witness/eth/erc20_deposits.rs index 77ff9a4afa..3d488815dd 100644 --- a/engine/src/witness/eth/erc20_deposits.rs +++ b/engine/src/witness/eth/erc20_deposits.rs @@ -1,15 +1,14 @@ -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; +use cf_primitives::EpochIndex; use ethers::types::{Bloom, H160}; +use futures_core::Future; use pallet_cf_ingress_egress::DepositWitness; use sp_core::{H256, U256}; use state_chain_runtime::PalletInstanceAlias; use crate::{ eth::retry_rpc::EthersRetryRpcApi, - state_chain_observer::client::{ - chain_api::ChainApi, extrinsic_api::signed::SignedExtrinsicApi, storage_api::StorageApi, - }, witness::common::{ chunked_chain_source::chunked_by_vault::deposit_addresses::Addresses, RuntimeCallHasChain, RuntimeHasChain, @@ -62,9 +61,9 @@ define_erc20!( define_erc20!(usdc, Usdc, UsdcEvents, "$CF_ETH_CONTRACT_ABI_ROOT/IUSDC.json"); impl ChunkedByVaultBuilder { - pub async fn erc20_deposits( + pub async fn erc20_deposits( self, - state_chain_client: Arc, + process_call: ProcessCall, eth_rpc: EthRetryRpcClient, asset: ::ChainAsset, asset_contract_address: H160, @@ -73,7 +72,12 @@ impl ChunkedByVaultBuilder { Inner::Chain: cf_chains::Chain, Inner: ChunkedByVault)>, - StateChainClient: SignedExtrinsicApi + StorageApi + ChainApi + Send + Sync + 'static, + ProcessCall: Fn(state_chain_runtime::RuntimeCall, EpochIndex) -> ProcessingFut + + Send + + Sync + + Clone + + 'static, + ProcessingFut: Future + Send + 'static, EthRetryRpcClient: EthersRetryRpcApi + Send + Sync + Clone, Events: std::fmt::Debug + ethers::contract::EthLogDecode @@ -86,7 +90,7 @@ impl ChunkedByVaultBuilder { RuntimeCallHasChain, { Ok(self.then(move |epoch, header| { - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); let eth_rpc = eth_rpc.clone(); async move { let addresses = header @@ -126,21 +130,18 @@ impl ChunkedByVaultBuilder { .collect::>(); if !deposit_witnesses.is_empty() { - state_chain_client - .finalize_signed_extrinsic(pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_ingress_egress::Call::< - _, - ::Instance, - >::process_deposits { - deposit_witnesses, - block_height: header.index, - } - .into(), - ), - epoch_index: epoch.index, - }) - .await; + process_call( + pallet_cf_ingress_egress::Call::< + _, + ::Instance, + >::process_deposits { + deposit_witnesses, + block_height: header.index, + } + .into(), + epoch.index, + ) + .await; } Ok::<(), anyhow::Error>(()) diff --git a/engine/src/witness/eth/ethereum_deposits.rs b/engine/src/witness/eth/ethereum_deposits.rs index 741fed9607..36407e0abf 100644 --- a/engine/src/witness/eth/ethereum_deposits.rs +++ b/engine/src/witness/eth/ethereum_deposits.rs @@ -1,15 +1,13 @@ use crate::{ eth::retry_rpc::address_checker::*, - state_chain_observer::client::{ - chain_api::ChainApi, extrinsic_api::signed::SignedExtrinsicApi, storage_api::StorageApi, - }, witness::common::{RuntimeCallHasChain, RuntimeHasChain}, }; use anyhow::ensure; +use cf_primitives::EpochIndex; use ethers::types::Bloom; +use futures_core::Future; use sp_core::H256; use state_chain_runtime::PalletInstanceAlias; -use std::sync::Arc; use crate::witness::{ common::chunked_chain_source::chunked_by_vault::deposit_addresses::Addresses, @@ -40,9 +38,9 @@ impl ChunkedByVaultBuilder { /// standard transfers since the `to` field would not be set /// We do *not* officially support ETH deposited using Ethereum/Solidity's self-destruct. /// See [below](`eth_ingresses_at_block`) for more details. - pub async fn ethereum_deposits( + pub async fn ethereum_deposits( self, - state_chain_client: Arc, + process_call: ProcessCall, eth_rpc: EthRetryRpcClient, native_asset: ::ChainAsset, address_checker_address: H160, @@ -52,7 +50,12 @@ impl ChunkedByVaultBuilder { Inner::Chain: cf_chains::Chain, Inner: ChunkedByVault)>, - StateChainClient: SignedExtrinsicApi + StorageApi + ChainApi + Send + Sync + 'static, + ProcessCall: Fn(state_chain_runtime::RuntimeCall, EpochIndex) -> ProcessingFut + + Send + + Sync + + Clone + + 'static, + ProcessingFut: Future + Send + 'static, EthRetryRpcClient: EthersRetryRpcApi + AddressCheckerRetryRpcApi + Send + Sync + Clone, state_chain_runtime::Runtime: RuntimeHasChain, state_chain_runtime::RuntimeCall: @@ -60,7 +63,7 @@ impl ChunkedByVaultBuilder { { self.then(move |epoch, header| { let eth_rpc = eth_rpc.clone(); - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); async move { let (bloom, deposit_channels) = header.data; @@ -104,40 +107,34 @@ impl ChunkedByVaultBuilder { )?; if !ingresses.is_empty() { - state_chain_client - .finalize_signed_extrinsic( - pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_ingress_egress::Call::< - _, - ::Instance, - >::process_deposits { - deposit_witnesses: ingresses - .into_iter() - .map(|(to_addr, value)| { - pallet_cf_ingress_egress::DepositWitness { - deposit_address: to_addr, - asset: native_asset, - amount: - value - .try_into() - .expect("Ingress witness transfer value should fit u128"), - deposit_details: (), - } - }) - .collect(), - block_height: header.index, + process_call( + pallet_cf_ingress_egress::Call::< + _, + ::Instance, + >::process_deposits { + deposit_witnesses: ingresses + .into_iter() + .map(|(to_addr, value)| { + pallet_cf_ingress_egress::DepositWitness { + deposit_address: to_addr, + asset: native_asset, + amount: + value + .try_into() + .expect("Ingress witness transfer value should fit u128"), + deposit_details: (), } - .into(), - ), - epoch_index: epoch.index, - }, - ) - .await; + }) + .collect(), + block_height: header.index, + } + .into(), + epoch.index, + ) + .await; } } } - Ok::<_, anyhow::Error>(()) } }) diff --git a/engine/src/witness/eth/key_manager.rs b/engine/src/witness/eth/key_manager.rs index d5356cfea4..6e06576bad 100644 --- a/engine/src/witness/eth/key_manager.rs +++ b/engine/src/witness/eth/key_manager.rs @@ -1,10 +1,10 @@ -use std::sync::Arc; - use cf_chains::evm::{EvmCrypto, SchnorrVerificationComponents, TransactionFee}; +use cf_primitives::EpochIndex; use ethers::{ prelude::abigen, types::{Bloom, TransactionReceipt}, }; +use futures_core::Future; use sp_core::{H160, H256}; use state_chain_runtime::PalletInstanceAlias; use tracing::{info, trace}; @@ -18,7 +18,6 @@ use super::{ }; use crate::{ eth::retry_rpc::EthersRetryRpcApi, - state_chain_observer::client::extrinsic_api::signed::SignedExtrinsicApi, witness::common::{RuntimeCallHasChain, RuntimeHasChain}, }; use num_traits::Zero; @@ -45,11 +44,12 @@ use anyhow::Result; impl ChunkedByVaultBuilder { pub fn key_manager_witnessing< - StateChainClient, + ProcessCall, + ProcessingFut, EthRpcClient: EthersRetryRpcApi + ChainClient + Clone, >( self, - state_chain_client: Arc, + process_call: ProcessCall, eth_rpc: EthRpcClient, contract_address: H160, ) -> ChunkedByVaultBuilder @@ -61,13 +61,18 @@ impl ChunkedByVaultBuilder { ChainAccount = H160, TransactionFee = TransactionFee, >, - StateChainClient: SignedExtrinsicApi + Send + Sync + 'static, + ProcessCall: Fn(state_chain_runtime::RuntimeCall, EpochIndex) -> ProcessingFut + + Send + + Sync + + Clone + + 'static, + ProcessingFut: Future + Send + 'static, state_chain_runtime::Runtime: RuntimeHasChain, state_chain_runtime::RuntimeCall: RuntimeCallHasChain, { self.then::, _, _>(move |epoch, header| { - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); let eth_rpc = eth_rpc.clone(); async move { for event in @@ -75,52 +80,29 @@ impl ChunkedByVaultBuilder { .await? { info!("Handling event: {event}"); - match event.event_parameters { - KeyManagerEvents::AggKeySetByAggKeyFilter(_) => { - state_chain_client - .finalize_signed_extrinsic( - pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_vaults::Call::< - _, - ::Instance, - >::vault_key_rotated { - block_number: header.index, - tx_id: event.tx_hash, - } - .into(), - ), - epoch_index: epoch.index, - }, - ) - .await; - }, + let call: state_chain_runtime::RuntimeCall = match event.event_parameters { + KeyManagerEvents::AggKeySetByAggKeyFilter(_) => pallet_cf_vaults::Call::< + _, + ::Instance, + >::vault_key_rotated { + block_number: header.index, + tx_id: event.tx_hash, + } + .into(), KeyManagerEvents::AggKeySetByGovKeyFilter(AggKeySetByGovKeyFilter { new_agg_key, .. - }) => { - state_chain_client - .finalize_signed_extrinsic( - pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_vaults::Call::< - _, - ::Instance, - >::vault_key_rotated_externally { - new_public_key: - cf_chains::evm::AggKey::from_pubkey_compressed( - new_agg_key.serialize(), - ), - block_number: header.index, - tx_id: event.tx_hash, - } - .into(), - ), - epoch_index: epoch.index, - }, - ) - .await; - }, + }) => pallet_cf_vaults::Call::< + _, + ::Instance, + >::vault_key_rotated_externally { + new_public_key: cf_chains::evm::AggKey::from_pubkey_compressed( + new_agg_key.serialize(), + ), + block_number: header.index, + tx_id: event.tx_hash, + } + .into(), KeyManagerEvents::SignatureAcceptedFilter(SignatureAcceptedFilter { sig_data, .. @@ -145,54 +127,31 @@ impl ChunkedByVaultBuilder { })? .try_into() .map_err(anyhow::Error::msg)?; - state_chain_client - .finalize_signed_extrinsic( - pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_broadcast::Call::< - _, - ::Instance, - >::transaction_succeeded { - tx_out_id: SchnorrVerificationComponents { - s: sig_data.sig.into(), - k_times_g_address: sig_data - .k_times_g_address - .into(), - }, - signer_id: from, - tx_fee: TransactionFee { - effective_gas_price, - gas_used, - }, - } - .into(), - ), - epoch_index: epoch.index, - }, - ) - .await; + pallet_cf_broadcast::Call::< + _, + ::Instance, + >::transaction_succeeded { + tx_out_id: SchnorrVerificationComponents { + s: sig_data.sig.into(), + k_times_g_address: sig_data.k_times_g_address.into(), + }, + signer_id: from, + tx_fee: TransactionFee { effective_gas_price, gas_used }, + } + .into() }, KeyManagerEvents::GovernanceActionFilter(GovernanceActionFilter { message, - }) => { - state_chain_client - .finalize_signed_extrinsic( - pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_governance::Call::set_whitelisted_call_hash { - call_hash: message, - } - .into(), - ), - epoch_index: epoch.index, - }, - ) - .await; - }, + }) => pallet_cf_governance::Call::set_whitelisted_call_hash { + call_hash: message, + } + .into(), _ => { trace!("Ignoring unused event: {event}"); + continue }, - } + }; + process_call(call, epoch.index).await; } Result::Ok(header.data) @@ -264,7 +223,9 @@ mod tests { EthSource::new(retry_client.clone()) .chunk_by_vault(vault_source) .key_manager_witnessing( - state_chain_client, + |call, _| async move { + println!("Witnessed call: {:?}", call); + }, retry_client, H160::from_str("a16e02e87b7454126e5e10d957a927a7f5b5d2be").unwrap(), ) diff --git a/engine/src/witness/eth/state_chain_gateway.rs b/engine/src/witness/eth/state_chain_gateway.rs index 5fff0e5eee..c31ac8fac3 100644 --- a/engine/src/witness/eth/state_chain_gateway.rs +++ b/engine/src/witness/eth/state_chain_gateway.rs @@ -1,15 +1,8 @@ -use std::sync::Arc; - use cf_chains::Ethereum; use ethers::{prelude::abigen, types::Bloom}; use sp_core::{H160, H256}; use tracing::{info, trace}; -use crate::{ - eth::retry_rpc::EthersRetryRpcApi, - state_chain_observer::client::extrinsic_api::signed::SignedExtrinsicApi, -}; - use super::{ super::common::{ chain_source::ChainClient, @@ -17,6 +10,9 @@ use super::{ }, contract_common::events_at_block, }; +use crate::eth::retry_rpc::EthersRetryRpcApi; +use cf_primitives::EpochIndex; +use futures_core::Future; abigen!( StateChainGateway, @@ -27,20 +23,26 @@ use anyhow::Result; impl ChunkedByVaultBuilder { pub fn state_chain_gateway_witnessing< - StateChainClient, EthRpcClient: EthersRetryRpcApi + ChainClient + Clone, + ProcessCall, + ProcessingFut, >( self, - state_chain_client: Arc, + process_call: ProcessCall, eth_rpc: EthRpcClient, contract_address: H160, ) -> ChunkedByVaultBuilder where Inner: ChunkedByVault, - StateChainClient: SignedExtrinsicApi + Send + Sync + 'static, + ProcessCall: Fn(state_chain_runtime::RuntimeCall, EpochIndex) -> ProcessingFut + + Send + + Sync + + Clone + + 'static, + ProcessingFut: Future + Send + 'static, { self.then::, _, _>(move |epoch, header| { - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); let eth_rpc = eth_rpc.clone(); async move { for event in events_at_block::( @@ -51,74 +53,41 @@ impl ChunkedByVaultBuilder { .await? { info!("Handling event: {event}"); - match event.event_parameters { + let call: state_chain_runtime::RuntimeCall = match event.event_parameters { StateChainGatewayEvents::FundedFilter(FundedFilter { node_id: account_id, amount, funder, - }) => { - state_chain_client - .finalize_signed_extrinsic( - pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_funding::Call::funded { - account_id: account_id.into(), - amount: amount - .try_into() - .expect("Funded amount should fit in u128"), - funder, - tx_hash: event.tx_hash.into(), - } - .into(), - ), - epoch_index: epoch.index, - }, - ) - .await; - }, + }) => pallet_cf_funding::Call::funded { + account_id: account_id.into(), + amount: amount.try_into().expect("Funded amount should fit in u128"), + funder, + tx_hash: event.tx_hash.into(), + } + .into(), StateChainGatewayEvents::RedemptionExecutedFilter( RedemptionExecutedFilter { node_id: account_id, amount }, - ) => { - state_chain_client - .finalize_signed_extrinsic( - pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_funding::Call::redeemed { - account_id: account_id.into(), - redeemed_amount: amount - .try_into() - .expect("Redemption amount should fit in u128"), - tx_hash: event.tx_hash.to_fixed_bytes(), - } - .into(), - ), - epoch_index: epoch.index, - }, - ) - .await; - }, + ) => pallet_cf_funding::Call::redeemed { + account_id: account_id.into(), + redeemed_amount: amount + .try_into() + .expect("Redemption amount should fit in u128"), + tx_hash: event.tx_hash.to_fixed_bytes(), + } + .into(), StateChainGatewayEvents::RedemptionExpiredFilter( RedemptionExpiredFilter { node_id: account_id, amount: _ }, - ) => { - state_chain_client - .finalize_signed_extrinsic( - pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new( - pallet_cf_funding::Call::redemption_expired { - account_id: account_id.into(), - block_number: header.index, - } - .into(), - ), - epoch_index: epoch.index, - }, - ) - .await; - }, + ) => pallet_cf_funding::Call::redemption_expired { + account_id: account_id.into(), + block_number: header.index, + } + .into(), _ => { trace!("Ignoring unused event: {event}"); + continue }, - } + }; + process_call(call, epoch.index).await; } Result::Ok(header.data) diff --git a/engine/src/witness/eth/vault.rs b/engine/src/witness/eth/vault.rs index ba1b330a6b..c28729c4a7 100644 --- a/engine/src/witness/eth/vault.rs +++ b/engine/src/witness/eth/vault.rs @@ -1,12 +1,9 @@ -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; use ethers::types::Bloom; use sp_core::{H160, H256}; -use crate::{ - eth::retry_rpc::EthersRetryRpcApi, - state_chain_observer::client::extrinsic_api::signed::SignedExtrinsicApi, -}; +use crate::eth::retry_rpc::EthersRetryRpcApi; use super::{ super::common::{ @@ -15,6 +12,8 @@ use super::{ }, contract_common::{events_at_block, Event}, }; +use cf_primitives::EpochIndex; +use futures_core::Future; use anyhow::{anyhow, Result}; use cf_chains::{ @@ -178,11 +177,12 @@ pub fn call_from_event( impl ChunkedByVaultBuilder { pub fn vault_witnessing< - StateChainClient, EthRpcClient: EthersRetryRpcApi + ChainClient + Clone, + ProcessCall, + ProcessingFut, >( self, - state_chain_client: Arc, + process_call: ProcessCall, eth_rpc: EthRpcClient, contract_address: EthereumAddress, native_asset: Asset, @@ -193,10 +193,15 @@ impl ChunkedByVaultBuilder { Inner::Chain: cf_chains::Chain, Inner: ChunkedByVault, - StateChainClient: SignedExtrinsicApi + Send + Sync + 'static, + ProcessCall: Fn(state_chain_runtime::RuntimeCall, EpochIndex) -> ProcessingFut + + Send + + Sync + + Clone + + 'static, + ProcessingFut: Future + Send + 'static, { self.then::, _, _>(move |epoch, header| { - let state_chain_client = state_chain_client.clone(); + let process_call = process_call.clone(); let eth_rpc = eth_rpc.clone(); let supported_assets = supported_assets.clone(); async move { @@ -206,14 +211,7 @@ impl ChunkedByVaultBuilder { match call_from_event(event, native_asset, source_chain, &supported_assets) { Ok(option_call) => if let Some(call) = option_call { - state_chain_client - .finalize_signed_extrinsic( - pallet_cf_witnesser::Call::witness_at_epoch { - call: Box::new(call), - epoch_index: epoch.index, - }, - ) - .await; + process_call(call, epoch.index).await; }, Err(message) => { tracing::error!("Ignoring vault contract event: {message}"); diff --git a/engine/src/witness/start.rs b/engine/src/witness/start.rs index 33f50eb497..deec8a59c3 100644 --- a/engine/src/witness/start.rs +++ b/engine/src/witness/start.rs @@ -42,9 +42,25 @@ where .participating(state_chain_client.account_id()) .await; + let finalize_extrinsic_closure = { + let state_chain_client = state_chain_client.clone(); + move |call, epoch_index| { + let state_chain_client = state_chain_client.clone(); + async move { + let _ = state_chain_client + .finalize_signed_extrinsic(pallet_cf_witnesser::Call::witness_at_epoch { + call: Box::new(call), + epoch_index, + }) + .await; + } + } + }; + let start_eth = super::eth::start( scope, eth_client, + finalize_extrinsic_closure.clone(), state_chain_client.clone(), state_chain_stream.clone(), epoch_source.clone(), @@ -54,6 +70,7 @@ where let start_btc = super::btc::start( scope, btc_client, + finalize_extrinsic_closure.clone(), state_chain_client.clone(), state_chain_stream.clone(), epoch_source.clone(), @@ -63,6 +80,7 @@ where let start_dot = super::dot::start( scope, dot_client, + finalize_extrinsic_closure, state_chain_client, state_chain_stream, epoch_source, From 3c2afa81a0a318ae242b4c185b05865158be2560 Mon Sep 17 00:00:00 2001 From: kylezs Date: Wed, 27 Sep 2023 18:42:20 +1000 Subject: [PATCH 14/15] feat: add initiated_at block number for egresses (#4046) * feat: add initiated_at block number for egresses * chore: add migration for chain height addition * chore: add migration hooks --- .../pallets/cf-broadcast/src/benchmarking.rs | 21 ++++--- state-chain/pallets/cf-broadcast/src/lib.rs | 55 ++++++++++++++++--- .../pallets/cf-broadcast/src/migrations.rs | 6 ++ .../src/migrations/add_initiated_at.rs | 53 ++++++++++++++++++ state-chain/pallets/cf-broadcast/src/mock.rs | 6 +- state-chain/pallets/cf-broadcast/src/tests.rs | 1 + state-chain/runtime/src/lib.rs | 3 + 7 files changed, 130 insertions(+), 15 deletions(-) create mode 100644 state-chain/pallets/cf-broadcast/src/migrations.rs create mode 100644 state-chain/pallets/cf-broadcast/src/migrations/add_initiated_at.rs diff --git a/state-chain/pallets/cf-broadcast/src/benchmarking.rs b/state-chain/pallets/cf-broadcast/src/benchmarking.rs index 73f0e42c07..e9564119a3 100644 --- a/state-chain/pallets/cf-broadcast/src/benchmarking.rs +++ b/state-chain/pallets/cf-broadcast/src/benchmarking.rs @@ -34,6 +34,10 @@ fn insert_transaction_broadcast_attempt, I: 'static>( ); } +const INITIATED_AT: u32 = 100; + +pub type AggKeyFor = <<>::TargetChain as cf_chains::Chain>::ChainCrypto as ChainCrypto>::AggKey; + // Generates a new signature ready call. fn generate_on_signature_ready_call, I>() -> pallet::Call { let threshold_request_id = 1; @@ -46,6 +50,7 @@ fn generate_on_signature_ready_call, I>() -> pallet::Call::benchmark_value(), api_call: Box::new(ApiCallFor::::benchmark_value()), broadcast_id: 1, + initiated_at: INITIATED_AT.into(), } } @@ -61,7 +66,7 @@ benchmarks_instance_pallet! { Timeouts::::append(timeout_block, broadcast_attempt_id); ThresholdSignatureData::::insert(i, (ApiCallFor::::benchmark_value(), ThresholdSignatureFor::::benchmark_value())) } - let valid_key = <<>::TargetChain as Chain>::ChainCrypto as ChainCrypto>::AggKey::benchmark_value(); + let valid_key = AggKeyFor::::benchmark_value(); T::KeyProvider::set_key(valid_key); } : { Pallet::::on_initialize(timeout_block); @@ -79,7 +84,7 @@ benchmarks_instance_pallet! { insert_transaction_broadcast_attempt::(caller.clone().into(), broadcast_attempt_id); generate_on_signature_ready_call::().dispatch_bypass_filter(T::EnsureThresholdSigned::try_successful_origin().unwrap())?; let expiry_block = frame_system::Pallet::::block_number() + T::BroadcastTimeout::get(); - let valid_key = <<>::TargetChain as Chain>::ChainCrypto as ChainCrypto>::AggKey::benchmark_value(); + let valid_key = AggKeyFor::::benchmark_value(); T::KeyProvider::set_key(valid_key); }: _(RawOrigin::Signed(caller), broadcast_attempt_id) verify { @@ -94,7 +99,7 @@ benchmarks_instance_pallet! { }; insert_transaction_broadcast_attempt::(whitelisted_caller(), broadcast_attempt_id); let call = generate_on_signature_ready_call::(); - let valid_key = <<>::TargetChain as Chain>::ChainCrypto as ChainCrypto>::AggKey::benchmark_value(); + let valid_key = AggKeyFor::::benchmark_value(); T::KeyProvider::set_key(valid_key); } : { call.dispatch_bypass_filter(T::EnsureThresholdSigned::try_successful_origin().unwrap())? } verify { @@ -110,10 +115,11 @@ benchmarks_instance_pallet! { BenchmarkValue::benchmark_value(), signed_api_call, BenchmarkValue::benchmark_value(), - 1 + 1, + INITIATED_AT.into(), ); - T::KeyProvider::set_key(<<>::TargetChain as Chain>::ChainCrypto as ChainCrypto>::AggKey::benchmark_value()); + T::KeyProvider::set_key(AggKeyFor::::benchmark_value()); let transaction_payload = TransactionFor::::benchmark_value(); } : { @@ -130,7 +136,8 @@ benchmarks_instance_pallet! { transaction_succeeded { let caller: T::AccountId = whitelisted_caller(); let signer_id = SignerIdFor::::benchmark_value(); - TransactionOutIdToBroadcastId::::insert(TransactionOutIdFor::::benchmark_value(), 1); + let initiated_at: ChainBlockNumberFor = INITIATED_AT.into(); + TransactionOutIdToBroadcastId::::insert(TransactionOutIdFor::::benchmark_value(), (1, initiated_at)); let broadcast_attempt_id = BroadcastAttemptId { broadcast_id: 1, @@ -142,7 +149,7 @@ benchmarks_instance_pallet! { signer_id, tx_fee: TransactionFeeFor::::benchmark_value(), }; - let valid_key = <<>::TargetChain as Chain>::ChainCrypto as ChainCrypto>::AggKey::benchmark_value(); + let valid_key = AggKeyFor::::benchmark_value(); T::KeyProvider::set_key(valid_key); } : { call.dispatch_bypass_filter(T::EnsureWitnessedAtCurrentEpoch::try_successful_origin().unwrap())? } verify { diff --git a/state-chain/pallets/cf-broadcast/src/lib.rs b/state-chain/pallets/cf-broadcast/src/lib.rs index fd3c0c0923..781f1ca971 100644 --- a/state-chain/pallets/cf-broadcast/src/lib.rs +++ b/state-chain/pallets/cf-broadcast/src/lib.rs @@ -6,9 +6,10 @@ mod benchmarking; mod mock; mod tests; +pub mod migrations; pub mod weights; use cf_primitives::{BroadcastId, ThresholdSignatureRequestId}; -use cf_traits::impl_pallet_safe_mode; +use cf_traits::{impl_pallet_safe_mode, GetBlockHeight}; pub use weights::WeightInfo; impl_pallet_safe_mode!(PalletSafeMode; retry_enabled); @@ -23,7 +24,7 @@ use frame_support::{ dispatch::DispatchResultWithPostInfo, pallet_prelude::DispatchResult, sp_runtime::traits::Saturating, - traits::{Get, UnfilteredDispatchable}, + traits::{Get, OnRuntimeUpgrade, StorageVersion, UnfilteredDispatchable}, Twox64Concat, }; @@ -66,6 +67,8 @@ pub enum PalletOffence { FailedToBroadcastTransaction, } +pub const PALLET_VERSION: StorageVersion = StorageVersion::new(1); + #[frame_support::pallet] pub mod pallet { use super::*; @@ -179,6 +182,9 @@ pub mod pallet { type BroadcastReadyProvider: OnBroadcastReady; + /// Get the latest block height of the target chain via Chain Tracking. + type ChainTracking: GetBlockHeight; + /// The timeout duration for the broadcast, measured in number of blocks. #[pallet::constant] type BroadcastTimeout: Get>; @@ -202,6 +208,7 @@ pub mod pallet { pub struct Origin, I: 'static = ()>(pub(super) PhantomData<(T, I)>); #[pallet::pallet] + #[pallet::storage_version(PALLET_VERSION)] #[pallet::without_storage_info] pub struct Pallet(PhantomData<(T, I)>); @@ -237,8 +244,13 @@ pub mod pallet { /// Lookup table between TransactionOutId -> Broadcast. #[pallet::storage] - pub type TransactionOutIdToBroadcastId, I: 'static = ()> = - StorageMap<_, Twox64Concat, TransactionOutIdFor, BroadcastId, OptionQuery>; + pub type TransactionOutIdToBroadcastId, I: 'static = ()> = StorageMap< + _, + Twox64Concat, + TransactionOutIdFor, + (BroadcastId, ChainBlockNumberFor), + OptionQuery, + >; /// The list of failed broadcasts pending retry. #[pallet::storage] @@ -368,6 +380,20 @@ pub mod pallet { Weight::zero() } } + + fn on_runtime_upgrade() -> Weight { + migrations::PalletMigration::::on_runtime_upgrade() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, DispatchError> { + migrations::PalletMigration::::pre_upgrade() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: sp_std::vec::Vec) -> Result<(), DispatchError> { + migrations::PalletMigration::::post_upgrade(state) + } } #[pallet::call] @@ -452,6 +478,7 @@ pub mod pallet { threshold_signature_payload: PayloadFor, api_call: Box<>::ApiCall>, broadcast_id: BroadcastId, + initiated_at: ChainBlockNumberFor, ) -> DispatchResultWithPostInfo { let _ = T::EnsureThresholdSigned::ensure_origin(origin)?; @@ -474,6 +501,7 @@ pub mod pallet { signed_api_call, threshold_signature_payload, broadcast_id, + initiated_at, ); Ok(().into()) } @@ -502,8 +530,9 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { T::EnsureWitnessed::ensure_origin(origin.clone())?; - let broadcast_id = TransactionOutIdToBroadcastId::::take(&tx_out_id) - .ok_or(Error::::InvalidPayload)?; + let (broadcast_id, _initiated_at) = + TransactionOutIdToBroadcastId::::take(&tx_out_id) + .ok_or(Error::::InvalidPayload)?; let to_refund = AwaitingBroadcast::::get(BroadcastAttemptId { broadcast_id, @@ -616,6 +645,13 @@ impl, I: 'static> Pallet { if let Some(callback) = maybe_callback { RequestCallbacks::::insert(broadcast_id, callback); } + + // We must set this here because after the threshold signature is requested, it's + // possible that an authority submits the transaction themselves, not going through the + // standard path. This protects against that, to ensure we always set the earliest possible + // block number we could have broadcast at, so that we can ensure we witness it. + let initiated_at = T::ChainTracking::get_block_height(); + let threshold_signature_payload = api_call.threshold_signature_payload(); let signature_request_id = T::ThresholdSigner::request_signature_with_callback( threshold_signature_payload.clone(), @@ -625,6 +661,7 @@ impl, I: 'static> Pallet { threshold_signature_payload, api_call: Box::new(api_call), broadcast_id, + initiated_at, } .into() }, @@ -643,12 +680,16 @@ impl, I: 'static> Pallet { api_call: >::ApiCall, threshold_signature_payload: <::ChainCrypto as ChainCrypto>::Payload, broadcast_id: BroadcastId, + initiated_at: ChainBlockNumberFor, ) -> BroadcastAttemptId { let transaction_out_id = api_call.transaction_out_id(); T::BroadcastReadyProvider::on_broadcast_ready(&api_call); - TransactionOutIdToBroadcastId::::insert(&transaction_out_id, broadcast_id); + TransactionOutIdToBroadcastId::::insert( + &transaction_out_id, + (broadcast_id, initiated_at), + ); ThresholdSignatureData::::insert(broadcast_id, (api_call, signature)); diff --git a/state-chain/pallets/cf-broadcast/src/migrations.rs b/state-chain/pallets/cf-broadcast/src/migrations.rs new file mode 100644 index 0000000000..d7889e06e2 --- /dev/null +++ b/state-chain/pallets/cf-broadcast/src/migrations.rs @@ -0,0 +1,6 @@ +pub mod add_initiated_at; + +use cf_runtime_upgrade_utilities::VersionedMigration; + +pub type PalletMigration = + (VersionedMigration, add_initiated_at::Migration, 0, 1>,); diff --git a/state-chain/pallets/cf-broadcast/src/migrations/add_initiated_at.rs b/state-chain/pallets/cf-broadcast/src/migrations/add_initiated_at.rs new file mode 100644 index 0000000000..af2811d532 --- /dev/null +++ b/state-chain/pallets/cf-broadcast/src/migrations/add_initiated_at.rs @@ -0,0 +1,53 @@ +use crate::*; +#[cfg(feature = "try-runtime")] +use frame_support::dispatch::DispatchError; +use frame_support::{traits::OnRuntimeUpgrade, weights::Weight}; +use sp_std::marker::PhantomData; + +mod old { + use frame_support::pallet_prelude::OptionQuery; + + use super::*; + + #[frame_support::storage_alias] + pub type TransactionOutIdToBroadcastId, I: 'static> = + StorageMap, Twox64Concat, TransactionOutIdFor, BroadcastId, OptionQuery>; +} + +pub struct Migration, I: 'static>(PhantomData<(T, I)>); + +impl, I: 'static> OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let chain_height = T::ChainTracking::get_block_height(); + + TransactionOutIdToBroadcastId::::translate::(|_id, old| { + Some((old, chain_height)) + }); + + Weight::zero() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, DispatchError> { + use frame_support::ensure; + + let chain_height = T::ChainTracking::get_block_height(); + // If it's at 0 something went wrong with the initialisation. Also since initiated_at is the + // last thing being decoded, this acts as a check that the rest of the decoding worked. + ensure!(chain_height > 0u32.into(), "chain_height is 0"); + Ok(chain_height.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), DispatchError> { + use frame_support::ensure; + + let pre_upgrade_height = ChainBlockNumberFor::::decode(&mut &state[..]) + .map_err(|_| "Failed to decode pre-upgrade state.")?; + + for (_out_id, (_b_id, initiated_at)) in TransactionOutIdToBroadcastId::::iter() { + ensure!(initiated_at >= pre_upgrade_height, "initiated_at is 0"); + } + Ok(()) + } +} diff --git a/state-chain/pallets/cf-broadcast/src/mock.rs b/state-chain/pallets/cf-broadcast/src/mock.rs index a3a52538c3..a56c03096c 100644 --- a/state-chain/pallets/cf-broadcast/src/mock.rs +++ b/state-chain/pallets/cf-broadcast/src/mock.rs @@ -13,7 +13,10 @@ use cf_chains::{ }; use cf_traits::{ impl_mock_chainflip, impl_mock_runtime_safe_mode, - mocks::{signer_nomination::MockNominator, threshold_signer::MockThresholdSigner}, + mocks::{ + block_height_provider::BlockHeightProvider, signer_nomination::MockNominator, + threshold_signer::MockThresholdSigner, + }, AccountRoleRegistry, EpochKey, KeyState, OnBroadcastReady, }; use codec::{Decode, Encode}; @@ -152,6 +155,7 @@ impl pallet_cf_broadcast::Config for Test { type SafeMode = MockRuntimeSafeMode; type BroadcastReadyProvider = MockBroadcastReadyProvider; type SafeModeBlockMargin = ConstU64<10>; + type ChainTracking = BlockHeightProvider; } impl_mock_chainflip!(Test); diff --git a/state-chain/pallets/cf-broadcast/src/tests.rs b/state-chain/pallets/cf-broadcast/src/tests.rs index 87ea7dd75b..5a1f8772da 100644 --- a/state-chain/pallets/cf-broadcast/src/tests.rs +++ b/state-chain/pallets/cf-broadcast/src/tests.rs @@ -128,6 +128,7 @@ fn start_mock_broadcast_tx_out_id( MockApiCall { tx_out_id, payload: Default::default(), sig: Default::default() }, Default::default(), 1, + 100u64, ) } diff --git a/state-chain/runtime/src/lib.rs b/state-chain/runtime/src/lib.rs index 83ab32f5a6..3d11882869 100644 --- a/state-chain/runtime/src/lib.rs +++ b/state-chain/runtime/src/lib.rs @@ -682,6 +682,7 @@ impl pallet_cf_broadcast::Config for Runtime { type SafeMode = RuntimeSafeMode; type SafeModeBlockMargin = ConstU32<10>; type KeyProvider = EthereumVault; + type ChainTracking = EthereumChainTracking; } impl pallet_cf_broadcast::Config for Runtime { @@ -704,6 +705,7 @@ impl pallet_cf_broadcast::Config for Runtime { type SafeMode = RuntimeSafeMode; type SafeModeBlockMargin = ConstU32<10>; type KeyProvider = PolkadotVault; + type ChainTracking = PolkadotChainTracking; } impl pallet_cf_broadcast::Config for Runtime { @@ -726,6 +728,7 @@ impl pallet_cf_broadcast::Config for Runtime { type SafeMode = RuntimeSafeMode; type SafeModeBlockMargin = ConstU32<10>; type KeyProvider = BitcoinVault; + type ChainTracking = BitcoinChainTracking; } impl pallet_cf_chain_tracking::Config for Runtime { From 25d2e4d152a170f6f98d5cd7da733b6ebfccb3f2 Mon Sep 17 00:00:00 2001 From: kylezs Date: Wed, 27 Sep 2023 21:39:49 +1000 Subject: [PATCH 15/15] fix: deposit channel expiry (#3998) * fix: SC recycles based on external chain block * chore: use genesis * feat: CFE filers based on expires_at * chore: add weight calculations for on_idle * chore: clean up some comments * chore: add genesis config * chore: use constants for expiry blocks * chore: use log_or_panic * fix: use correct expiry * test: more recycle tests * chore: clippy * chore: move constants into chainspec * fix: use mutate, rename * chore: ingress-egress pallet migrations * chore: use TargetChainBlockNumber * chore: remove the expiry storage from the swapping and lp pallets expiry is now done in the ingress-egress pallet * chore: update SDK version * chore: remove now-invalid bouncer test * chore: do the migrations --------- Co-authored-by: Daniel --- Cargo.lock | 3 + api/bin/chainflip-broker-api/src/main.rs | 2 - api/bin/chainflip-cli/src/main.rs | 3 +- api/lib/src/lib.rs | 7 +- api/lib/src/queries.rs | 58 ++-- bouncer/package.json | 2 +- bouncer/pnpm-lock.yaml | 8 +- bouncer/shared/lp_deposit_expiry.ts | 47 ---- bouncer/tests/all_concurrent_tests.ts | 2 - bouncer/tests/lp_deposit_expiry.ts | 13 - engine/src/witness/btc.rs | 5 + .../chunked_by_vault/deposit_addresses.rs | 6 +- .../cf-integration-tests/src/mock_runtime.rs | 4 +- state-chain/node/src/chain_spec.rs | 68 +++-- state-chain/node/src/chain_spec/common.rs | 1 - state-chain/node/src/chain_spec/devnet.rs | 7 + state-chain/node/src/chain_spec/partnernet.rs | 5 +- .../node/src/chain_spec/perseverance.rs | 5 +- state-chain/node/src/chain_spec/sisyphos.rs | 5 +- state-chain/node/src/chain_spec/testnet.rs | 5 + state-chain/pallets/cf-broadcast/src/lib.rs | 2 + .../pallets/cf-ingress-egress/Cargo.toml | 7 +- .../cf-ingress-egress/src/benchmarking.rs | 20 +- .../pallets/cf-ingress-egress/src/lib.rs | 198 +++++++++---- .../cf-ingress-egress/src/migrations.rs | 6 + .../src/migrations/ingress_expiry.rs | 119 ++++++++ .../pallets/cf-ingress-egress/src/mock.rs | 29 +- .../pallets/cf-ingress-egress/src/tests.rs | 260 ++++++++++++------ state-chain/pallets/cf-lp/Cargo.toml | 7 +- state-chain/pallets/cf-lp/src/benchmarking.rs | 34 +-- state-chain/pallets/cf-lp/src/lib.rs | 112 ++------ state-chain/pallets/cf-lp/src/migrations.rs | 6 + .../cf-lp/src/migrations/remove_expiries.rs | 35 +++ state-chain/pallets/cf-lp/src/tests.rs | 86 +----- state-chain/pallets/cf-swapping/Cargo.toml | 7 +- .../pallets/cf-swapping/src/benchmarking.rs | 35 --- state-chain/pallets/cf-swapping/src/lib.rs | 98 ++----- .../pallets/cf-swapping/src/migrations.rs | 6 + .../src/migrations/remove_expiries.rs | 34 +++ state-chain/pallets/cf-swapping/src/mock.rs | 8 +- state-chain/pallets/cf-swapping/src/tests.rs | 85 +----- .../pallets/cf-swapping/src/weights.rs | 17 -- state-chain/runtime/src/chainflip.rs | 16 -- state-chain/runtime/src/constants.rs | 2 - state-chain/traits/src/lib.rs | 5 - .../traits/src/mocks/deposit_handler.rs | 25 -- 46 files changed, 747 insertions(+), 768 deletions(-) delete mode 100644 bouncer/shared/lp_deposit_expiry.ts delete mode 100755 bouncer/tests/lp_deposit_expiry.ts create mode 100644 state-chain/node/src/chain_spec/devnet.rs create mode 100644 state-chain/pallets/cf-ingress-egress/src/migrations.rs create mode 100644 state-chain/pallets/cf-ingress-egress/src/migrations/ingress_expiry.rs create mode 100644 state-chain/pallets/cf-lp/src/migrations.rs create mode 100644 state-chain/pallets/cf-lp/src/migrations/remove_expiries.rs create mode 100644 state-chain/pallets/cf-swapping/src/migrations.rs create mode 100644 state-chain/pallets/cf-swapping/src/migrations/remove_expiries.rs diff --git a/Cargo.lock b/Cargo.lock index 49d92053d8..f09ad58c8d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7185,6 +7185,7 @@ version = "0.1.0" dependencies = [ "cf-chains", "cf-primitives", + "cf-runtime-upgrade-utilities", "cf-runtime-utilities", "cf-test-utilities", "cf-traits", @@ -7208,6 +7209,7 @@ version = "0.1.0" dependencies = [ "cf-chains", "cf-primitives", + "cf-runtime-upgrade-utilities", "cf-test-utilities", "cf-traits", "frame-benchmarking", @@ -7274,6 +7276,7 @@ version = "0.1.0" dependencies = [ "cf-chains", "cf-primitives", + "cf-runtime-upgrade-utilities", "cf-test-utilities", "cf-traits", "frame-benchmarking", diff --git a/api/bin/chainflip-broker-api/src/main.rs b/api/bin/chainflip-broker-api/src/main.rs index b8dfd3eca9..291d039fd2 100644 --- a/api/bin/chainflip-broker-api/src/main.rs +++ b/api/bin/chainflip-broker-api/src/main.rs @@ -24,7 +24,6 @@ use tracing::log; #[derive(Serialize, Deserialize, Clone)] pub struct BrokerSwapDepositAddress { pub address: String, - pub expiry_block: BlockNumber, pub issued_block: BlockNumber, pub channel_id: ChannelId, } @@ -33,7 +32,6 @@ impl From for BrokerSwapDepositAddress { fn from(value: chainflip_api::SwapDepositAddress) -> Self { Self { address: value.address, - expiry_block: value.expiry_block, issued_block: value.issued_block, channel_id: value.channel_id, } diff --git a/api/bin/chainflip-cli/src/main.rs b/api/bin/chainflip-cli/src/main.rs index 2f423a1a1c..47de1818c7 100644 --- a/api/bin/chainflip-cli/src/main.rs +++ b/api/bin/chainflip-cli/src/main.rs @@ -57,7 +57,7 @@ async fn run_cli() -> Result<()> { let api = StateChainApi::connect(scope, cli_settings.state_chain).await?; match command_line_opts.cmd { Broker(BrokerSubcommands::RequestSwapDepositAddress(params)) => { - let SwapDepositAddress { address, expiry_block, .. } = api + let SwapDepositAddress { address, .. } = api .broker_api() .request_swap_deposit_address( params.source_asset, @@ -71,7 +71,6 @@ async fn run_cli() -> Result<()> { ) .await?; println!("Deposit Address: {address}"); - println!("Address expires at block {expiry_block}"); }, LiquidityProvider( LiquidityProviderSubcommands::RequestLiquidityDepositAddress { asset }, diff --git a/api/lib/src/lib.rs b/api/lib/src/lib.rs index 860c88414d..519f2d3d98 100644 --- a/api/lib/src/lib.rs +++ b/api/lib/src/lib.rs @@ -320,7 +320,6 @@ pub trait GovernanceApi: SignedExtrinsicApi { pub struct SwapDepositAddress { pub address: String, - pub expiry_block: state_chain_runtime::BlockNumber, pub issued_block: state_chain_runtime::BlockNumber, pub channel_id: ChannelId, } @@ -349,10 +348,7 @@ pub trait BrokerApi: SignedExtrinsicApi { if let Some(state_chain_runtime::RuntimeEvent::Swapping( pallet_cf_swapping::Event::SwapDepositAddressReady { - deposit_address, - expiry_block, - channel_id, - .. + deposit_address, channel_id, .. }, )) = events.iter().find(|event| { matches!( @@ -364,7 +360,6 @@ pub trait BrokerApi: SignedExtrinsicApi { }) { Ok(SwapDepositAddress { address: deposit_address.to_string(), - expiry_block: *expiry_block, issued_block: header.number, channel_id: *channel_id, }) diff --git a/api/lib/src/queries.rs b/api/lib/src/queries.rs index eacd1e5a6f..2da798e592 100644 --- a/api/lib/src/queries.rs +++ b/api/lib/src/queries.rs @@ -4,6 +4,7 @@ use cf_primitives::{chains::assets::any, AssetAmount}; use chainflip_engine::state_chain_observer::client::{ chain_api::ChainApi, storage_api::StorageApi, }; +use pallet_cf_ingress_egress::DepositChannelDetails; use serde::Deserialize; use state_chain_runtime::PalletInstanceAlias; use std::{collections::BTreeMap, sync::Arc}; @@ -15,7 +16,6 @@ pub struct SwapChannelInfo { deposit_address: ::Humanreadable, source_asset: any::Asset, destination_asset: any::Asset, - expiry_block: state_chain_runtime::BlockNumber, } pub struct QueryApi { @@ -52,47 +52,33 @@ impl QueryApi { let block_hash = block_hash.unwrap_or_else(|| self.state_chain_client.latest_finalized_hash()); - let (channel_details, channel_actions, network_environment) = tokio::try_join!( - self.state_chain_client - .storage_map::, Vec<_>>(block_hash) - .map(|result| { - result.map(|channels| channels.into_iter().collect::>()) - }), - self.state_chain_client.storage_map::, Vec<_>>(block_hash,), - self.state_chain_client - .storage_value::>(block_hash), - )?; + >, Vec<_>>(block_hash) + .map(|result| { + result.map(|channels| channels.into_iter().collect::>()) + }), + self.state_chain_client + .storage_value::>( + block_hash + ), + )?; - Ok(channel_actions + Ok(channel_details .iter() - .filter_map(|(address, action)| { - match action { - pallet_cf_ingress_egress::ChannelAction::Swap { destination_asset, .. } | - pallet_cf_ingress_egress::ChannelAction::CcmTransfer { - destination_asset, - .. - } => Some(destination_asset), - _ => None, - } - .and_then(|destination_asset| { - channel_details.get(address).map(|details| { - (destination_asset, details.deposit_channel.clone(), details.expires_at) - }) - }) - .map(|(&destination_asset, deposit_channel, expiry)| SwapChannelInfo { + .filter_map(|(_, DepositChannelDetails { action, deposit_channel, .. })| match action { + pallet_cf_ingress_egress::ChannelAction::Swap { destination_asset, .. } | + pallet_cf_ingress_egress::ChannelAction::CcmTransfer { + destination_asset, .. + } => Some(SwapChannelInfo { deposit_address: deposit_channel.address.to_humanreadable(network_environment), source_asset: deposit_channel.asset.into(), - destination_asset, - expiry_block: expiry, - }) + destination_asset: *destination_asset, + }), + _ => None, }) .collect::>()) } diff --git a/bouncer/package.json b/bouncer/package.json index 717c7097d7..2f7b9db764 100644 --- a/bouncer/package.json +++ b/bouncer/package.json @@ -6,7 +6,7 @@ "prettier:write": "prettier --write ." }, "dependencies": { - "@chainflip-io/cli": "^0.1.3", + "@chainflip-io/cli": "^0.1.4", "@polkadot/api": "10.7.2", "@polkadot/keyring": "12.2.1", "@polkadot/util": "12.2.1", diff --git a/bouncer/pnpm-lock.yaml b/bouncer/pnpm-lock.yaml index f6040fb378..adba5506a4 100644 --- a/bouncer/pnpm-lock.yaml +++ b/bouncer/pnpm-lock.yaml @@ -6,8 +6,8 @@ settings: dependencies: '@chainflip-io/cli': - specifier: ^0.1.3 - version: 0.1.3 + specifier: ^0.1.4 + version: 0.1.4 '@polkadot/api': specifier: 10.7.2 version: 10.7.2 @@ -95,8 +95,8 @@ packages: resolution: {integrity: sha512-0h+FrQDqe2Wn+IIGFkTCd4aAwTJ+7834Ek1COohCyV26AXhwQ7WQaz+4F/nLOeVl/3BtWHOHLPsq46V8YB46Eg==} dev: false - /@chainflip-io/cli@0.1.3: - resolution: {integrity: sha512-bCd9xMUy1VghAMjPB3hMNzNk59OOF9nLvS/gO6O9CBhohwlzOO2hf08mui0u36SerQBOYfCsRuM4OjuwzlefnQ==, tarball: https://npm.pkg.github.com/download/@chainflip-io/cli/0.1.3/60e786fbf686f278bd78b531b7675de9ca671558} + /@chainflip-io/cli@0.1.4: + resolution: {integrity: sha512-c91GIZvyBWYYzARuJpJZzKnw00n+i++kNl3GLSyorUuMsFLEz5qkyMf0i1bFTIp/No0nvWI0MAOgnofP0zTOrQ==, tarball: https://npm.pkg.github.com/download/@chainflip-io/cli/0.1.4/52273fa90fdea2826c552689fe54e909b57e39a5} hasBin: true dependencies: ethers: 6.7.1 diff --git a/bouncer/shared/lp_deposit_expiry.ts b/bouncer/shared/lp_deposit_expiry.ts deleted file mode 100644 index 065a17767e..0000000000 --- a/bouncer/shared/lp_deposit_expiry.ts +++ /dev/null @@ -1,47 +0,0 @@ -import { Keyring } from '@polkadot/keyring'; -import { cryptoWaitReady } from '@polkadot/util-crypto'; -import { observeEvent, getChainflipApi, lpMutex } from '../shared/utils'; -import { sendBtc } from '../shared/send_btc'; -import { submitGovernanceExtrinsic } from '../shared/cf_governance'; - -export async function testLpDepositExpiry() { - await cryptoWaitReady(); - const keyring = new Keyring({ type: 'sr25519' }); - const lpUri = process.env.LP_URI ?? '//LP_1'; - const lp = keyring.createFromUri(lpUri); - - const chainflip = await getChainflipApi(); - - console.log('=== Testing expiry of funded LP deposit address ==='); - const originalExpiryTime = Number(await chainflip.query.liquidityProvider.lpTTL()); - console.log('Setting expiry time for LP addresses to 10 blocks'); - - await submitGovernanceExtrinsic(chainflip.tx.liquidityProvider.setLpTtl(10)); - await observeEvent('liquidityProvider:LpTtlSet', chainflip); - - console.log('Requesting new BTC LP deposit address'); - lpMutex.runExclusive(async () => { - await chainflip.tx.liquidityProvider - .requestLiquidityDepositAddress('Btc') - .signAndSend(lp, { nonce: -1 }); - }); - - const depositEventResult = await observeEvent( - 'liquidityProvider:LiquidityDepositAddressReady', - chainflip, - (event) => event.data.depositAddress.Btc, - ); - const ingressAddress = depositEventResult.data.depositAddress.Btc; - - console.log('Funding BTC LP deposit address of ' + ingressAddress + ' with 1 BTC'); - - await sendBtc(ingressAddress, 1); - await observeEvent('liquidityProvider:LiquidityDepositAddressExpired', chainflip); - - console.log('Restoring expiry time for LP addresses to ' + originalExpiryTime + ' blocks'); - await submitGovernanceExtrinsic(chainflip.tx.liquidityProvider.setLpTtl(originalExpiryTime)); - - await observeEvent('liquidityProvider:LpTtlSet', chainflip); - - console.log('=== LP deposit expiry test complete ==='); -} diff --git a/bouncer/tests/all_concurrent_tests.ts b/bouncer/tests/all_concurrent_tests.ts index 3dc31084bf..692dda3569 100755 --- a/bouncer/tests/all_concurrent_tests.ts +++ b/bouncer/tests/all_concurrent_tests.ts @@ -1,5 +1,4 @@ #!/usr/bin/env -S pnpm tsx -import { testLpDepositExpiry } from '../shared/lp_deposit_expiry'; import { testAllSwaps } from '../shared/swapping'; import { testEthereumDeposits } from '../shared/ethereum_deposits'; import { runWithTimeout, observeBadEvents } from '../shared/utils'; @@ -13,7 +12,6 @@ async function runAllConcurrentTests() { await Promise.all([ testAllSwaps(), - testLpDepositExpiry(), testEthereumDeposits(), testFundRedeem('redeem'), testMultipleMembersGovernance(), diff --git a/bouncer/tests/lp_deposit_expiry.ts b/bouncer/tests/lp_deposit_expiry.ts deleted file mode 100755 index 7820d2a145..0000000000 --- a/bouncer/tests/lp_deposit_expiry.ts +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env -S pnpm tsx -import { testLpDepositExpiry } from '../shared/lp_deposit_expiry'; -import { runWithTimeout } from '../shared/utils'; - -async function main(): Promise { - await testLpDepositExpiry(); - process.exit(0); -} - -runWithTimeout(main(), 120000).catch((error) => { - console.error(error); - process.exit(-1); -}); diff --git a/engine/src/witness/btc.rs b/engine/src/witness/btc.rs index d78511eca5..97c2827a07 100644 --- a/engine/src/witness/btc.rs +++ b/engine/src/witness/btc.rs @@ -196,6 +196,8 @@ mod tests { btc::{deposit_address::DepositAddress, ScriptPubkey}, DepositChannel, }; + use pallet_cf_ingress_egress::ChannelAction; + use sp_runtime::AccountId32; fn fake_transaction(tx_outs: Vec) -> Transaction { Transaction { @@ -218,6 +220,9 @@ mod tests { asset: btc::Asset::Btc, state: DepositAddress::new([0; 32], 1), }, + action: ChannelAction::::LiquidityProvision { + lp_account: AccountId32::new([0xab; 32]), + }, } } diff --git a/engine/src/witness/common/chunked_chain_source/chunked_by_vault/deposit_addresses.rs b/engine/src/witness/common/chunked_chain_source/chunked_by_vault/deposit_addresses.rs index f943ed77ee..a182c9353f 100644 --- a/engine/src/witness/common/chunked_chain_source/chunked_by_vault/deposit_addresses.rs +++ b/engine/src/witness/common/chunked_chain_source/chunked_by_vault/deposit_addresses.rs @@ -58,7 +58,11 @@ where // FOr a given header we only witness addresses opened at or before the header, the set of // addresses each engine attempts to witness at a given block is consistent fn addresses_for_header(index: Inner::Index, addresses: &Addresses) -> Addresses { - addresses.iter().filter(|details| details.opened_at <= index).cloned().collect() + addresses + .iter() + .filter(|details| details.opened_at <= index && index <= details.expires_at) + .cloned() + .collect() } async fn get_chain_state_and_addresses( diff --git a/state-chain/cf-integration-tests/src/mock_runtime.rs b/state-chain/cf-integration-tests/src/mock_runtime.rs index 7ae8a3f3bc..bee02a5d54 100644 --- a/state-chain/cf-integration-tests/src/mock_runtime.rs +++ b/state-chain/cf-integration-tests/src/mock_runtime.rs @@ -212,9 +212,11 @@ impl ExtBuilder { environment: Default::default(), liquidity_pools: Default::default(), swapping: Default::default(), - liquidity_provider: Default::default(), system: Default::default(), transaction_payment: Default::default(), + bitcoin_ingress_egress: Default::default(), + polkadot_ingress_egress: Default::default(), + ethereum_ingress_egress: Default::default(), }) } } diff --git a/state-chain/node/src/chain_spec.rs b/state-chain/node/src/chain_spec.rs index c900e443ef..0f2fb42365 100644 --- a/state-chain/node/src/chain_spec.rs +++ b/state-chain/node/src/chain_spec.rs @@ -22,12 +22,13 @@ use sp_core::{ }; use state_chain_runtime::{ chainflip::Offence, opaque::SessionKeys, AccountId, AccountRolesConfig, AuraConfig, - BitcoinChainTrackingConfig, BitcoinThresholdSignerConfig, BitcoinVaultConfig, BlockNumber, - EmissionsConfig, EnvironmentConfig, EthereumChainTrackingConfig, EthereumThresholdSignerConfig, + BitcoinChainTrackingConfig, BitcoinIngressEgressConfig, BitcoinThresholdSignerConfig, + BitcoinVaultConfig, BlockNumber, EmissionsConfig, EnvironmentConfig, + EthereumChainTrackingConfig, EthereumIngressEgressConfig, EthereumThresholdSignerConfig, EthereumVaultConfig, FlipBalance, FlipConfig, FundingConfig, GovernanceConfig, GrandpaConfig, - PolkadotChainTrackingConfig, PolkadotThresholdSignerConfig, PolkadotVaultConfig, - ReputationConfig, RuntimeGenesisConfig, SessionConfig, Signature, SwappingConfig, SystemConfig, - ValidatorConfig, WASM_BINARY, + PolkadotChainTrackingConfig, PolkadotIngressEgressConfig, PolkadotThresholdSignerConfig, + PolkadotVaultConfig, ReputationConfig, RuntimeGenesisConfig, SessionConfig, Signature, + SwappingConfig, SystemConfig, ValidatorConfig, WASM_BINARY, }; use std::{ @@ -45,6 +46,7 @@ use sp_runtime::{ }; pub mod common; +pub mod devnet; pub mod partnernet; pub mod perseverance; pub mod sisyphos; @@ -236,7 +238,7 @@ pub fn cf_development_config() -> Result { // Governance account - Snow White snow_white.into(), 1, - common::MAX_AUTHORITIES, + devnet::MAX_AUTHORITIES, EnvironmentConfig { flip_token_address: flip_token_address.into(), eth_usdc_address: eth_usdc_address.into(), @@ -252,24 +254,27 @@ pub fn cf_development_config() -> Result { }, eth_init_agg_key, ethereum_deployment_block, - common::TOTAL_ISSUANCE, + devnet::TOTAL_ISSUANCE, genesis_funding_amount, min_funding, - common::REDEMPTION_TAX, - 8 * common::HOURS, - common::REDEMPTION_DELAY_SECS, - common::CURRENT_AUTHORITY_EMISSION_INFLATION_PERBILL, - common::BACKUP_NODE_EMISSION_INFLATION_PERBILL, - common::EXPIRY_SPAN_IN_SECONDS, - common::ACCRUAL_RATIO, - Percent::from_percent(common::REDEMPTION_PERIOD_AS_PERCENTAGE), - common::SUPPLY_UPDATE_INTERVAL, - common::PENALTIES.to_vec(), - common::KEYGEN_CEREMONY_TIMEOUT_BLOCKS, - common::THRESHOLD_SIGNATURE_CEREMONY_TIMEOUT_BLOCKS, - common::SWAP_TTL, - common::MINIMUM_SWAP_AMOUNTS.to_vec(), + devnet::REDEMPTION_TAX, + 8 * devnet::HOURS, + devnet::REDEMPTION_DELAY_SECS, + devnet::CURRENT_AUTHORITY_EMISSION_INFLATION_PERBILL, + devnet::BACKUP_NODE_EMISSION_INFLATION_PERBILL, + devnet::EXPIRY_SPAN_IN_SECONDS, + devnet::ACCRUAL_RATIO, + Percent::from_percent(devnet::REDEMPTION_PERIOD_AS_PERCENTAGE), + devnet::SUPPLY_UPDATE_INTERVAL, + devnet::PENALTIES.to_vec(), + devnet::KEYGEN_CEREMONY_TIMEOUT_BLOCKS, + devnet::THRESHOLD_SIGNATURE_CEREMONY_TIMEOUT_BLOCKS, + devnet::MINIMUM_SWAP_AMOUNTS.to_vec(), dot_runtime_version, + // Bitcoin block times on localnets are much faster, so we account for that here. + devnet::BITCOIN_EXPIRY_BLOCKS, + devnet::ETHEREUM_EXPIRY_BLOCKS, + devnet::POLKADOT_EXPIRY_BLOCKS, ) }, // Bootnodes @@ -390,9 +395,11 @@ macro_rules! network_spec { PENALTIES.to_vec(), KEYGEN_CEREMONY_TIMEOUT_BLOCKS, THRESHOLD_SIGNATURE_CEREMONY_TIMEOUT_BLOCKS, - SWAP_TTL, MINIMUM_SWAP_AMOUNTS.to_vec(), dot_runtime_version, + BITCOIN_EXPIRY_BLOCKS, + ETHEREUM_EXPIRY_BLOCKS, + POLKADOT_EXPIRY_BLOCKS, ) }, // Bootnodes @@ -445,9 +452,11 @@ fn testnet_genesis( penalties: Vec<(Offence, (i32, BlockNumber))>, keygen_ceremony_timeout_blocks: BlockNumber, threshold_signature_ceremony_timeout_blocks: BlockNumber, - swap_ttl: BlockNumber, minimum_swap_amounts: Vec<(assets::any::Asset, AssetAmount)>, dot_runtime_version: RuntimeVersion, + bitcoin_deposit_channel_lifetime: u32, + ethereum_deposit_channel_lifetime: u32, + polkadot_deposit_channel_lifetime: u32, ) -> RuntimeGenesisConfig { // Sanity Checks for (account_id, aura_id, grandpa_id) in initial_authorities.iter() { @@ -650,8 +659,17 @@ fn testnet_genesis( }, transaction_payment: Default::default(), liquidity_pools: Default::default(), - swapping: SwappingConfig { swap_ttl, minimum_swap_amounts }, - liquidity_provider: Default::default(), + swapping: SwappingConfig { minimum_swap_amounts, _phantom: PhantomData }, + // These are set to ~2 hours at average block times. + bitcoin_ingress_egress: BitcoinIngressEgressConfig { + deposit_channel_lifetime: bitcoin_deposit_channel_lifetime.into(), + }, + ethereum_ingress_egress: EthereumIngressEgressConfig { + deposit_channel_lifetime: ethereum_deposit_channel_lifetime.into(), + }, + polkadot_ingress_egress: PolkadotIngressEgressConfig { + deposit_channel_lifetime: polkadot_deposit_channel_lifetime, + }, } } diff --git a/state-chain/node/src/chain_spec/common.rs b/state-chain/node/src/chain_spec/common.rs index 0ec7b19c1a..d5d925bb02 100644 --- a/state-chain/node/src/chain_spec/common.rs +++ b/state-chain/node/src/chain_spec/common.rs @@ -40,7 +40,6 @@ pub const PENALTIES: &[(Offence, (i32, BlockNumber))] = &[ (Offence::GrandpaEquivocation, (50, HEARTBEAT_BLOCK_INTERVAL * 5)), ]; -pub const SWAP_TTL: BlockNumber = 2 * HOURS; pub const MINIMUM_SWAP_AMOUNTS: &[(Asset, AssetAmount)] = &[ (Asset::Eth, 580_000_000_000_000u128), // 1usd worth of Eth = 0.00058 * 18 d.p (Asset::Flip, FLIPPERINOS_PER_FLIP), // 1 Flip diff --git a/state-chain/node/src/chain_spec/devnet.rs b/state-chain/node/src/chain_spec/devnet.rs new file mode 100644 index 0000000000..91257d5f82 --- /dev/null +++ b/state-chain/node/src/chain_spec/devnet.rs @@ -0,0 +1,7 @@ +pub use super::common::*; + +// These represent approximately 10 minutes in localnet block times +// Bitcoin blocks are 5 seconds on localnets. +pub const BITCOIN_EXPIRY_BLOCKS: u32 = 10 * 60 / 5; +pub const ETHEREUM_EXPIRY_BLOCKS: u32 = 10 * 60 / 14; +pub const POLKADOT_EXPIRY_BLOCKS: u32 = 10 * 60 / 6; diff --git a/state-chain/node/src/chain_spec/partnernet.rs b/state-chain/node/src/chain_spec/partnernet.rs index 49aadb28df..c7878c8497 100644 --- a/state-chain/node/src/chain_spec/partnernet.rs +++ b/state-chain/node/src/chain_spec/partnernet.rs @@ -1,5 +1,8 @@ -pub use super::common::*; use super::StateChainEnvironment; +pub use super::{ + common::*, + testnet::{BITCOIN_EXPIRY_BLOCKS, ETHEREUM_EXPIRY_BLOCKS, POLKADOT_EXPIRY_BLOCKS}, +}; use cf_chains::{dot::RuntimeVersion, eth::CHAIN_ID_GOERLI}; use cf_primitives::{AccountId, AccountRole, BlockNumber, FlipBalance, NetworkEnvironment}; use sc_service::ChainType; diff --git a/state-chain/node/src/chain_spec/perseverance.rs b/state-chain/node/src/chain_spec/perseverance.rs index 0c45e1052f..e8f99ac836 100644 --- a/state-chain/node/src/chain_spec/perseverance.rs +++ b/state-chain/node/src/chain_spec/perseverance.rs @@ -1,4 +1,7 @@ -pub use super::common::*; +pub use super::{ + common::*, + testnet::{BITCOIN_EXPIRY_BLOCKS, ETHEREUM_EXPIRY_BLOCKS, POLKADOT_EXPIRY_BLOCKS}, +}; use super::{parse_account, StateChainEnvironment}; use cf_chains::{dot::RuntimeVersion, eth::CHAIN_ID_GOERLI}; use cf_primitives::{AccountId, AccountRole, BlockNumber, FlipBalance, NetworkEnvironment}; diff --git a/state-chain/node/src/chain_spec/sisyphos.rs b/state-chain/node/src/chain_spec/sisyphos.rs index ef69392f3a..d910d0bb0f 100644 --- a/state-chain/node/src/chain_spec/sisyphos.rs +++ b/state-chain/node/src/chain_spec/sisyphos.rs @@ -1,5 +1,8 @@ -pub use super::common::*; use super::StateChainEnvironment; +pub use super::{ + common::*, + testnet::{BITCOIN_EXPIRY_BLOCKS, ETHEREUM_EXPIRY_BLOCKS, POLKADOT_EXPIRY_BLOCKS}, +}; use cf_chains::{dot::RuntimeVersion, eth::CHAIN_ID_GOERLI}; use cf_primitives::{AccountId, AccountRole, BlockNumber, FlipBalance, NetworkEnvironment}; use sc_service::ChainType; diff --git a/state-chain/node/src/chain_spec/testnet.rs b/state-chain/node/src/chain_spec/testnet.rs index 178e67622b..b95779814a 100644 --- a/state-chain/node/src/chain_spec/testnet.rs +++ b/state-chain/node/src/chain_spec/testnet.rs @@ -12,6 +12,11 @@ pub const CHAIN_TYPE: ChainType = ChainType::Development; pub const NETWORK_ENVIRONMENT: NetworkEnvironment = NetworkEnvironment::Development; pub const PROTOCOL_ID: &str = "flip-test"; +// These represent approximately 2 hours on testnet block times +pub const BITCOIN_EXPIRY_BLOCKS: u32 = 2 * 60 * 60 / (10 * 60); +pub const ETHEREUM_EXPIRY_BLOCKS: u32 = 2 * 60 * 60 / 14; +pub const POLKADOT_EXPIRY_BLOCKS: u32 = 2 * 60 * 60 / 6; + pub const ENV: StateChainEnvironment = StateChainEnvironment { flip_token_address: hex_literal::hex!("Cf7Ed3AccA5a467e9e704C703E8D87F634fB0Fc9"), eth_usdc_address: hex_literal::hex!("a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"), diff --git a/state-chain/pallets/cf-broadcast/src/lib.rs b/state-chain/pallets/cf-broadcast/src/lib.rs index 781f1ca971..d787a65b1d 100644 --- a/state-chain/pallets/cf-broadcast/src/lib.rs +++ b/state-chain/pallets/cf-broadcast/src/lib.rs @@ -243,6 +243,8 @@ pub mod pallet { >; /// Lookup table between TransactionOutId -> Broadcast. + /// This storage item is used by the CFE to track which broadcasts/egresses it needs to + /// witness. #[pallet::storage] pub type TransactionOutIdToBroadcastId, I: 'static = ()> = StorageMap< _, diff --git a/state-chain/pallets/cf-ingress-egress/Cargo.toml b/state-chain/pallets/cf-ingress-egress/Cargo.toml index 1ab44ee32d..933bbcde12 100644 --- a/state-chain/pallets/cf-ingress-egress/Cargo.toml +++ b/state-chain/pallets/cf-ingress-egress/Cargo.toml @@ -17,6 +17,7 @@ cf-traits = { path = '../../traits', default-features = false } cf-runtime-utilities = { path = '../../runtime-utilities', default-features = false, features = [ 'derive', ] } +cf-runtime-upgrade-utilities = { path = '../../runtime-upgrade-utilities', default-features = false } log = { version = '0.4.16', default-features = false } @@ -47,6 +48,7 @@ std = [ 'cf-chains/std', 'cf-primitives/std', 'cf-traits/std', + 'cf-runtime-upgrade-utilities/std', 'codec/std', 'frame-benchmarking?/std', 'frame-support/std', @@ -64,4 +66,7 @@ runtime-benchmarks = [ 'frame-system/runtime-benchmarks', 'pallet-cf-governance/runtime-benchmarks', ] -try-runtime = ['frame-support/try-runtime'] +try-runtime = [ + 'frame-support/try-runtime', + 'cf-runtime-upgrade-utilities/try-runtime', +] diff --git a/state-chain/pallets/cf-ingress-egress/src/benchmarking.rs b/state-chain/pallets/cf-ingress-egress/src/benchmarking.rs index 695a7a7254..da65ff8972 100644 --- a/state-chain/pallets/cf-ingress-egress/src/benchmarking.rs +++ b/state-chain/pallets/cf-ingress-egress/src/benchmarking.rs @@ -7,7 +7,6 @@ use cf_chains::{ DepositChannel, }; use frame_benchmarking::{account, benchmarks_instance_pallet}; -use frame_system::pallet_prelude::BlockNumberFor; pub(crate) type TargetChainBlockNumber = <>::TargetChain as Chain>::ChainBlockNumber; @@ -27,16 +26,17 @@ benchmarks_instance_pallet! { let deposit_address: <>::TargetChain as Chain>::ChainAccount = BenchmarkValue::benchmark_value(); let source_asset: <>::TargetChain as Chain>::ChainAsset = BenchmarkValue::benchmark_value(); let deposit_amount: <>::TargetChain as Chain>::ChainAmount = BenchmarkValue::benchmark_value(); + let block_number: TargetChainBlockNumber = BenchmarkValue::benchmark_value(); DepositChannelLookup::::insert(&deposit_address, DepositChannelDetails { - opened_at: TargetChainBlockNumber::::benchmark_value(), + opened_at: block_number, + expires_at: block_number, deposit_channel: DepositChannel::generate_new::<>::AddressDerivation>( 1, source_asset, ).unwrap(), - expires_at: BlockNumberFor::::from(1_000u32), - }); - ChannelActions::::insert(&deposit_address, ChannelAction::::LiquidityProvision { - lp_account: account("doogle", 0, 0), + action: ChannelAction::::LiquidityProvision { + lp_account: account("doogle", 0, 0), + }, }); }: { Pallet::::process_single_deposit(deposit_address, source_asset, deposit_amount, BenchmarkValue::benchmark_value(), BenchmarkValue::benchmark_value()).unwrap() @@ -61,13 +61,17 @@ benchmarks_instance_pallet! { let deposit_address = <>::TargetChain as Chain>::ChainAccount::benchmark_value_by_id(a as u8); let deposit_fetch_id = <>::TargetChain as Chain>::DepositFetchId::benchmark_value_by_id(a as u8); let source_asset: <>::TargetChain as Chain>::ChainAsset = BenchmarkValue::benchmark_value(); + let block_number = TargetChainBlockNumber::::benchmark_value(); let mut channel = DepositChannelDetails:: { - opened_at: TargetChainBlockNumber::::benchmark_value(), + opened_at: block_number, + expires_at: block_number, deposit_channel: DepositChannel::generate_new::<>::AddressDerivation>( 1, source_asset, ).unwrap(), - expires_at: BlockNumberFor::::from(1_000u32), + action: ChannelAction::::LiquidityProvision { + lp_account: account("doogle", 0, 0), + }, }; channel.deposit_channel.state.on_fetch_scheduled(); DepositChannelLookup::::insert(deposit_address.clone(), channel); diff --git a/state-chain/pallets/cf-ingress-egress/src/lib.rs b/state-chain/pallets/cf-ingress-egress/src/lib.rs index 59afc93e29..37b068eee7 100644 --- a/state-chain/pallets/cf-ingress-egress/src/lib.rs +++ b/state-chain/pallets/cf-ingress-egress/src/lib.rs @@ -5,11 +5,14 @@ mod benchmarking; +pub mod migrations; #[cfg(test)] mod mock; #[cfg(test)] mod tests; pub mod weights; +use cf_runtime_utilities::log_or_panic; +use frame_support::{sp_runtime::SaturatedConversion, traits::OnRuntimeUpgrade}; pub use weights::WeightInfo; use cf_chains::{ @@ -21,7 +24,6 @@ use cf_chains::{ use cf_primitives::{ Asset, AssetAmount, BasisPoints, ChannelId, EgressCounter, EgressId, ForeignChain, }; -use cf_runtime_utilities::log_or_panic; use cf_traits::{ liquidity::LpBalanceApi, Broadcaster, CcmHandler, Chainflip, DepositApi, DepositHandler, EgressApi, GetBlockHeight, SwapDepositHandler, @@ -89,6 +91,8 @@ pub struct VaultTransfer { destination_address: C::ChainAccount, } +pub const PALLET_VERSION: StorageVersion = StorageVersion::new(1); + #[frame_support::pallet] pub mod pallet { use super::*; @@ -102,10 +106,15 @@ pub mod pallet { }; use sp_std::vec::Vec; + pub(crate) type ChannelRecycleQueue = + Vec<(TargetChainBlockNumber, TargetChainAccount)>; + pub(crate) type TargetChainAsset = <>::TargetChain as Chain>::ChainAsset; pub(crate) type TargetChainAccount = <>::TargetChain as Chain>::ChainAccount; pub(crate) type TargetChainAmount = <>::TargetChain as Chain>::ChainAmount; + pub(crate) type TargetChainBlockNumber = + <>::TargetChain as Chain>::ChainBlockNumber; #[derive(Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct DepositWitness { @@ -123,12 +132,13 @@ pub mod pallet { pub deposit_channel: DepositChannel, /// The block number at which the deposit channel was opened, expressed as a block number /// on the external Chain. - pub opened_at: ::ChainBlockNumber, - /// The block number at which the deposit channel will be closed, expressed as a - /// Chainflip-native block number. - // TODO: We should consider changing this to also be an external block number and expire - // based on external block numbers. See PRO-689. - pub expires_at: BlockNumberFor, + pub opened_at: TargetChainBlockNumber, + /// The last block on the target chain that the witnessing will witness it in. If funds are + /// sent after this block, they will not be witnessed. + pub expires_at: TargetChainBlockNumber, + + /// The action to be taken when the DepositChannel is deposited to. + pub action: ChannelAction, } /// Determines the action to take when a deposit is made to a channel. @@ -196,7 +206,26 @@ pub mod pallet { } } + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + pub deposit_channel_lifetime: TargetChainBlockNumber, + } + + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { deposit_channel_lifetime: Default::default() } + } + } + + #[pallet::genesis_build] + impl, I: 'static> BuildGenesisConfig for GenesisConfig { + fn build(&self) { + DepositChannelLifetime::::put(self.deposit_channel_lifetime); + } + } + #[pallet::pallet] + #[pallet::storage_version(PALLET_VERSION)] #[pallet::without_storage_info] pub struct Pallet(PhantomData<(T, I)>); @@ -259,16 +288,6 @@ pub mod pallet { OptionQuery, >; - /// Stores the channel action against the address - #[pallet::storage] - pub type ChannelActions, I: 'static = ()> = StorageMap< - _, - Twox64Concat, - TargetChainAccount, - ChannelAction, - OptionQuery, - >; - /// Stores the latest channel id used to generate an address. #[pallet::storage] pub type ChannelIdCounter, I: 'static = ()> = @@ -304,6 +323,10 @@ pub mod pallet { pub type MinimumDeposit, I: 'static = ()> = StorageMap<_, Twox64Concat, TargetChainAsset, TargetChainAmount, ValueQuery>; + #[pallet::storage] + pub type DepositChannelLifetime, I: 'static = ()> = + StorageValue<_, TargetChainBlockNumber, ValueQuery>; + /// Stores any failed transfers by the Vault contract. /// Without dealing with the underlying reason for the failure, retrying is unlike to succeed. /// Therefore these calls are stored here, until we can react to the reason for failure and @@ -316,6 +339,10 @@ pub mod pallet { pub type DepositBalances, I: 'static = ()> = StorageMap<_, Twox64Concat, TargetChainAsset, DepositTracker, ValueQuery>; + #[pallet::storage] + pub type DepositChannelRecycleBlocks, I: 'static = ()> = + StorageValue<_, ChannelRecycleQueue, ValueQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -381,6 +408,39 @@ pub mod pallet { #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { + /// Recycle addresses if we can + fn on_idle(_n: BlockNumberFor, remaining_weight: Weight) -> Weight { + let read_write_weight = + frame_support::weights::constants::RocksDbWeight::get().reads_writes(1, 1); + + let maximum_recycle_number = remaining_weight + .ref_time() + .checked_div(read_write_weight.ref_time()) + .unwrap_or_default() + .saturated_into::(); + + let can_recycle = DepositChannelRecycleBlocks::::mutate(|recycle_queue| { + Self::can_and_cannot_recycle( + recycle_queue, + maximum_recycle_number, + T::ChainTracking::get_block_height(), + ) + }); + + for address in can_recycle.iter() { + if let Some(details) = DepositChannelLookup::::take(address) { + if let Some(state) = details.deposit_channel.state.maybe_recycle() { + DepositChannelPool::::insert( + details.deposit_channel.channel_id, + DepositChannel { state, ..details.deposit_channel }, + ); + } + } + } + + read_write_weight.saturating_mul(can_recycle.len() as u64) + } + /// Take all scheduled Egress and send them out fn on_finalize(_n: BlockNumberFor) { // Send all fetch/transfer requests as a batch. Revert storage if failed. @@ -391,6 +451,19 @@ pub mod pallet { // Egress all scheduled Cross chain messages Self::do_egress_scheduled_ccm(); } + + fn on_runtime_upgrade() -> Weight { + migrations::PalletMigration::::on_runtime_upgrade() + } + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, DispatchError> { + migrations::PalletMigration::::pre_upgrade() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: sp_std::vec::Vec) -> Result<(), DispatchError> { + migrations::PalletMigration::::post_upgrade(state) + } } #[pallet::call] @@ -403,6 +476,7 @@ pub mod pallet { addresses: Vec>, ) -> DispatchResult { T::EnsureWitnessedAtCurrentEpoch::ensure_origin(origin)?; + for deposit_address in addresses { DepositChannelLookup::::mutate(deposit_address, |deposit_channel_details| { deposit_channel_details @@ -457,7 +531,7 @@ pub mod pallet { pub fn process_deposits( origin: OriginFor, deposit_witnesses: Vec>, - block_height: ::ChainBlockNumber, + block_height: TargetChainBlockNumber, ) -> DispatchResult { T::EnsureWitnessed::ensure_origin(origin)?; @@ -529,6 +603,21 @@ pub mod pallet { } impl, I: 'static> Pallet { + fn can_and_cannot_recycle( + channel_recycle_blocks: &mut ChannelRecycleQueue, + maximum_recyclable_number: usize, + current_block_height: TargetChainBlockNumber, + ) -> Vec> { + let partition_point = sp_std::cmp::min( + channel_recycle_blocks.partition_point(|(block, _)| *block <= current_block_height), + maximum_recyclable_number, + ); + channel_recycle_blocks + .drain(..partition_point) + .map(|(_, address)| address) + .collect() + } + /// Take all scheduled egress requests and send them out in an `AllBatch` call. /// /// Note: Egress transactions with Blacklisted assets are not sent, and kept in storage. @@ -552,6 +641,7 @@ impl, I: 'static> Pallet { .map(|details| { let can_fetch = details.deposit_channel.state.can_fetch(); + if can_fetch { deposit_fetch_id.replace( details.deposit_channel.fetch_id(), @@ -572,7 +662,6 @@ impl, I: 'static> Pallet { .collect() }); - // Returns Ok(()) if there's nothing to send. if batch_to_send.is_empty() { return TransactionOutcome::Commit(Ok(())) } @@ -685,11 +774,22 @@ impl, I: 'static> Pallet { asset: TargetChainAsset, amount: TargetChainAmount, deposit_details: ::DepositDetails, - block_height: ::ChainBlockNumber, + block_height: TargetChainBlockNumber, ) -> DispatchResult { let deposit_channel_details = DepositChannelLookup::::get(&deposit_address) .ok_or(Error::::InvalidDepositAddress)?; + if DepositChannelPool::::get(deposit_channel_details.deposit_channel.channel_id) + .is_some() + { + log_or_panic!( + "Deposit channel {} should not be in the recycled address pool if it's active", + deposit_channel_details.deposit_channel.channel_id + ); + #[cfg(not(debug_assertions))] + return Err(Error::::InvalidDepositAddress.into()) + } + ensure!( deposit_channel_details.deposit_channel.asset == asset, Error::::AssetMismatch @@ -716,12 +816,7 @@ impl, I: 'static> Pallet { let channel_id = deposit_channel_details.deposit_channel.channel_id; Self::deposit_event(Event::::DepositFetchesScheduled { channel_id, asset }); - // NB: Don't take here. We should continue witnessing this address - // even after an deposit to it has occurred. - // https://github.com/chainflip-io/chainflip-eth-contracts/pull/226 - match ChannelActions::::get(&deposit_address) - .ok_or(Error::::InvalidDepositAddress)? - { + match deposit_channel_details.action { ChannelAction::LiquidityProvision { lp_account, .. } => T::LpBalance::try_credit_account(&lp_account, asset.into(), amount.into())?, ChannelAction::Swap { @@ -783,13 +878,23 @@ impl, I: 'static> Pallet { Ok(()) } + fn expiry_and_recycle_block_height( + ) -> (TargetChainBlockNumber, TargetChainBlockNumber, TargetChainBlockNumber) + { + let current_height = T::ChainTracking::get_block_height(); + let lifetime = DepositChannelLifetime::::get(); + let expiry_height = current_height + lifetime; + let recycle_height = expiry_height + lifetime; + + (current_height, expiry_height, recycle_height) + } + /// Opens a channel for the given asset and registers it with the given action. /// /// May re-use an existing deposit address, depending on chain configuration. fn open_channel( source_asset: TargetChainAsset, - channel_action: ChannelAction, - expires_at: BlockNumberFor, + action: ChannelAction, ) -> Result<(ChannelId, TargetChainAccount), DispatchError> { let (deposit_channel, channel_id) = if let Some((channel_id, mut deposit_channel)) = DepositChannelPool::::drain().next() @@ -813,13 +918,18 @@ impl, I: 'static> Pallet { let deposit_address = deposit_channel.address.clone(); - ChannelActions::::insert(&deposit_address, channel_action); + let (current_height, expiry_height, recycle_height) = + Self::expiry_and_recycle_block_height(); + + DepositChannelRecycleBlocks::::append((recycle_height, deposit_address.clone())); + DepositChannelLookup::::insert( &deposit_address, DepositChannelDetails { deposit_channel, - opened_at: T::ChainTracking::get_block_height(), - expires_at, + opened_at: current_height, + expires_at: expiry_height, + action, }, ); @@ -882,13 +992,9 @@ impl, I: 'static> DepositApi for Pallet { fn request_liquidity_deposit_address( lp_account: T::AccountId, source_asset: TargetChainAsset, - expiry_block: BlockNumberFor, ) -> Result<(ChannelId, ForeignChainAddress), DispatchError> { - let (channel_id, deposit_address) = Self::open_channel( - source_asset, - ChannelAction::LiquidityProvision { lp_account }, - expiry_block, - )?; + let (channel_id, deposit_address) = + Self::open_channel(source_asset, ChannelAction::LiquidityProvision { lp_account })?; Ok((channel_id, deposit_address.into())) } @@ -901,7 +1007,6 @@ impl, I: 'static> DepositApi for Pallet { broker_commission_bps: BasisPoints, broker_id: T::AccountId, channel_metadata: Option, - expiry_block: BlockNumberFor, ) -> Result<(ChannelId, ForeignChainAddress), DispatchError> { let (channel_id, deposit_address) = Self::open_channel( source_asset, @@ -918,25 +1023,8 @@ impl, I: 'static> DepositApi for Pallet { broker_id, }, }, - expiry_block, )?; Ok((channel_id, deposit_address.into())) } - - // Note: we expect that the mapping from any instantiable pallet to the instance of this pallet - // is matching to the right chain. Because of that we can ignore the chain parameter. - fn expire_channel(address: TargetChainAccount) { - ChannelActions::::remove(&address); - if let Some(deposit_channel_details) = DepositChannelLookup::::get(&address) { - if let Some(state) = deposit_channel_details.deposit_channel.state.maybe_recycle() { - DepositChannelPool::::insert( - deposit_channel_details.deposit_channel.channel_id, - DepositChannel { state, ..deposit_channel_details.deposit_channel }, - ); - } - } else { - log_or_panic!("Tried to close an unknown channel."); - } - } } diff --git a/state-chain/pallets/cf-ingress-egress/src/migrations.rs b/state-chain/pallets/cf-ingress-egress/src/migrations.rs new file mode 100644 index 0000000000..f632b58ce8 --- /dev/null +++ b/state-chain/pallets/cf-ingress-egress/src/migrations.rs @@ -0,0 +1,6 @@ +pub mod ingress_expiry; + +use cf_runtime_upgrade_utilities::VersionedMigration; + +pub type PalletMigration = + (VersionedMigration, ingress_expiry::Migration, 0, 1>,); diff --git a/state-chain/pallets/cf-ingress-egress/src/migrations/ingress_expiry.rs b/state-chain/pallets/cf-ingress-egress/src/migrations/ingress_expiry.rs new file mode 100644 index 0000000000..3c5c33943b --- /dev/null +++ b/state-chain/pallets/cf-ingress-egress/src/migrations/ingress_expiry.rs @@ -0,0 +1,119 @@ +use crate::*; +use frame_support::traits::OnRuntimeUpgrade; +use sp_std::marker::PhantomData; + +#[cfg(feature = "try-runtime")] +use codec::{Decode, Encode}; +#[cfg(feature = "try-runtime")] +use frame_support::dispatch::DispatchError; + +// Copied from state-chain/node/src/chain_spec/testnet.rs: +// These represent approximately 2 hours on testnet block times +pub const BITCOIN_EXPIRY_BLOCKS: u32 = 2 * 60 * 60 / (10 * 60); +pub const ETHEREUM_EXPIRY_BLOCKS: u32 = 2 * 60 * 60 / 14; +pub const POLKADOT_EXPIRY_BLOCKS: u32 = 2 * 60 * 60 / 6; + +pub struct Migration, I: 'static>(PhantomData<(T, I)>); + +// These were removed in 0.9.4 +mod old { + + use super::*; + + #[derive( + CloneNoBound, RuntimeDebug, PartialEq, Eq, Encode, Decode, TypeInfo, MaxEncodedLen, + )] + #[scale_info(skip_type_params(T, I))] + pub struct DepositChannelDetails, I: 'static> { + pub deposit_channel: DepositChannel, + /// The block number at which the deposit channel was opened, expressed as a block number + /// on the external Chain. + pub opened_at: ::ChainBlockNumber, + // *State Chain block number* + pub expires_at: BlockNumberFor, + } + + #[frame_support::storage_alias] + pub type ChannelActions, I: 'static> = StorageMap< + Pallet, + Twox64Concat, + TargetChainAccount, + ChannelAction<::AccountId>, + OptionQuery, + >; + + #[frame_support::storage_alias] + pub type DepositChannelLookup, I: 'static> = StorageMap< + Pallet, + Twox64Concat, + TargetChainAccount, + DepositChannelDetails, + OptionQuery, + >; +} + +impl, I: 'static> OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let lifetime: TargetChainBlockNumber = match T::TargetChain::NAME { + "Bitcoin" => BITCOIN_EXPIRY_BLOCKS.into(), + "Ethereum" => ETHEREUM_EXPIRY_BLOCKS.into(), + "Polkadot" => POLKADOT_EXPIRY_BLOCKS.into(), + _ => unreachable!("Unsupported chain"), + }; + + DepositChannelLifetime::::put(lifetime); + + let channel_lifetime = DepositChannelLifetime::::get(); + let current_external_block_height = T::ChainTracking::get_block_height(); + let expiry_block = current_external_block_height.saturating_add(channel_lifetime); + let recycle_block = expiry_block.saturating_add(channel_lifetime); + + let old_channel_lookup = old::DepositChannelLookup::::drain().collect::>(); + + for (address, old_channel) in old_channel_lookup { + if let Some(action) = old::ChannelActions::::take(&address) { + DepositChannelLookup::::insert( + address.clone(), + DepositChannelDetails { + deposit_channel: old_channel.deposit_channel, + opened_at: old_channel.opened_at, + expires_at: expiry_block, + action, + }, + ); + } + + // We're just going to recycle them 2 hours from when we did the migration. + DepositChannelRecycleBlocks::::append((recycle_block, address)); + + // Remove any we missed above. + let _ = old::ChannelActions::::drain().collect::>(); + } + + Weight::zero() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, DispatchError> { + let number_of_channels_in_lookup = + old::DepositChannelLookup::::iter_keys().count() as u32; + + Ok(number_of_channels_in_lookup.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), DispatchError> { + let number_of_channels_in_lookup_pre_migration = ::decode(&mut &state[..]).unwrap(); + ensure!( + DepositChannelLookup::::iter_keys().count() as u32 == + number_of_channels_in_lookup_pre_migration, + "DepositChannelLookup migration failed." + ); + ensure!( + DepositChannelRecycleBlocks::::decode_len().unwrap_or_default() as u32 == + number_of_channels_in_lookup_pre_migration, + "DepositChannelRecycleBlocks migration failed." + ); + Ok(()) + } +} diff --git a/state-chain/pallets/cf-ingress-egress/src/mock.rs b/state-chain/pallets/cf-ingress-egress/src/mock.rs index f5fb366b92..322311fb95 100644 --- a/state-chain/pallets/cf-ingress-egress/src/mock.rs +++ b/state-chain/pallets/cf-ingress-egress/src/mock.rs @@ -26,7 +26,6 @@ use cf_traits::{ }; use frame_support::traits::{OriginTrait, UnfilteredDispatchable}; use frame_system as system; -use frame_system::pallet_prelude::BlockNumberFor; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, IdentityLookup, Zero}; @@ -116,8 +115,13 @@ impl crate::Config for Test { pub const ALICE: ::AccountId = 123u64; pub const BROKER: ::AccountId = 456u64; -// Configure a mock runtime to test the pallet. -impl_test_helpers!(Test); +impl_test_helpers! { + Test, + RuntimeGenesisConfig { + system: Default::default(), + ingress_egress: IngressEgressConfig { deposit_channel_lifetime: 100 }, + } +} type TestChainAccount = <::TargetChain as Chain>::ChainAccount; type TestChainAmount = <::TargetChain as Chain>::ChainAmount; @@ -131,6 +135,7 @@ pub trait RequestAddressAndDeposit { } impl RequestAddressAndDeposit for TestRunner { + /// Request desposit addresses and complete the deposit of funds into those addresses. #[track_caller] fn request_address_and_deposit( self, @@ -168,14 +173,12 @@ pub enum DepositRequest { Liquidity { lp_account: AccountId, asset: TestChainAsset, - expiry_block: BlockNumberFor, }, /// Do a non-ccm swap using a default broker and no fees. SimpleSwap { source_asset: TestChainAsset, destination_asset: TestChainAsset, destination_address: ForeignChainAddress, - expiry_block: BlockNumberFor, }, } @@ -206,19 +209,16 @@ impl RequestAddress for TestExternalities { .iter() .cloned() .map(|request| match request { - DepositRequest::Liquidity { lp_account, asset, expiry_block } => - IngressEgress::request_liquidity_deposit_address( - lp_account, - asset, - expiry_block, - ) - .map(|(id, addr)| (request, id, TestChainAccount::try_from(addr).unwrap())) - .unwrap(), + DepositRequest::Liquidity { lp_account, asset } => + IngressEgress::request_liquidity_deposit_address(lp_account, asset) + .map(|(id, addr)| { + (request, id, TestChainAccount::try_from(addr).unwrap()) + }) + .unwrap(), DepositRequest::SimpleSwap { source_asset, destination_asset, ref destination_address, - expiry_block, } => IngressEgress::request_swap_deposit_address( source_asset, destination_asset.into(), @@ -226,7 +226,6 @@ impl RequestAddress for TestExternalities { Default::default(), BROKER, None, - expiry_block, ) .map(|(channel_id, deposit_address)| { (request, channel_id, TestChainAccount::try_from(deposit_address).unwrap()) diff --git a/state-chain/pallets/cf-ingress-egress/src/tests.rs b/state-chain/pallets/cf-ingress-egress/src/tests.rs index fc086e4f3d..de0e03a7d2 100644 --- a/state-chain/pallets/cf-ingress-egress/src/tests.rs +++ b/state-chain/pallets/cf-ingress-egress/src/tests.rs @@ -22,6 +22,7 @@ use cf_traits::{ use frame_support::{ assert_noop, assert_ok, traits::{Hooks, OriginTrait}, + weights::Weight, }; use sp_core::H160; @@ -29,7 +30,6 @@ const ALICE_ETH_ADDRESS: EthereumAddress = H160([100u8; 20]); const BOB_ETH_ADDRESS: EthereumAddress = H160([101u8; 20]); const ETH_ETH: eth::Asset = eth::Asset::Eth; const ETH_FLIP: eth::Asset = eth::Asset::Flip; -const EXPIRY_BLOCK: u64 = 6; #[track_caller] fn expect_size_of_address_pool(size: usize) { @@ -200,10 +200,8 @@ fn can_schedule_swap_egress_to_batch() { fn request_address_and_deposit( who: ChannelId, asset: eth::Asset, - expiry: u64, ) -> (ChannelId, ::ChainAccount) { - let (id, address) = - IngressEgress::request_liquidity_deposit_address(who, asset, expiry).unwrap(); + let (id, address) = IngressEgress::request_liquidity_deposit_address(who, asset).unwrap(); let address: ::ChainAccount = address.try_into().unwrap(); assert_ok!(IngressEgress::process_single_deposit( address, @@ -220,9 +218,9 @@ fn can_schedule_deposit_fetch() { new_test_ext().execute_with(|| { assert!(ScheduledEgressFetchOrTransfer::::get().is_empty()); - request_address_and_deposit(1u64, eth::Asset::Eth, 1_000u64); - request_address_and_deposit(2u64, eth::Asset::Eth, 1_000u64); - request_address_and_deposit(3u64, eth::Asset::Flip, 1_000u64); + request_address_and_deposit(1u64, eth::Asset::Eth); + request_address_and_deposit(2u64, eth::Asset::Eth); + request_address_and_deposit(3u64, eth::Asset::Flip); assert!(matches!( &ScheduledEgressFetchOrTransfer::::get()[..], @@ -237,7 +235,7 @@ fn can_schedule_deposit_fetch() { crate::Event::DepositFetchesScheduled { channel_id: 1, asset: eth::Asset::Eth }, )); - request_address_and_deposit(4u64, eth::Asset::Eth, 1_000u64); + request_address_and_deposit(4u64, eth::Asset::Eth); assert!(matches!( &ScheduledEgressFetchOrTransfer::::get()[..], @@ -258,16 +256,16 @@ fn on_finalize_can_send_batch_all() { IngressEgress::schedule_egress(ETH_ETH, 2_000, ALICE_ETH_ADDRESS, None); IngressEgress::schedule_egress(ETH_ETH, 3_000, BOB_ETH_ADDRESS, None); IngressEgress::schedule_egress(ETH_ETH, 4_000, BOB_ETH_ADDRESS, None); - request_address_and_deposit(1u64, eth::Asset::Eth, 1_000u64); - request_address_and_deposit(2u64, eth::Asset::Eth, 1_000u64); - request_address_and_deposit(3u64, eth::Asset::Eth, 1_000u64); - request_address_and_deposit(4u64, eth::Asset::Eth, 1_000u64); + request_address_and_deposit(1u64, eth::Asset::Eth); + request_address_and_deposit(2u64, eth::Asset::Eth); + request_address_and_deposit(3u64, eth::Asset::Eth); + request_address_and_deposit(4u64, eth::Asset::Eth); IngressEgress::schedule_egress(ETH_FLIP, 5_000, ALICE_ETH_ADDRESS, None); IngressEgress::schedule_egress(ETH_FLIP, 6_000, ALICE_ETH_ADDRESS, None); IngressEgress::schedule_egress(ETH_FLIP, 7_000, BOB_ETH_ADDRESS, None); IngressEgress::schedule_egress(ETH_FLIP, 8_000, BOB_ETH_ADDRESS, None); - request_address_and_deposit(5u64, eth::Asset::Flip, 1_000u64); + request_address_and_deposit(5u64, eth::Asset::Flip); // Take all scheduled Egress and Broadcast as batch IngressEgress::on_finalize(1); @@ -299,19 +297,19 @@ fn all_batch_apicall_creation_failure_should_rollback_storage() { IngressEgress::schedule_egress(ETH_ETH, 2_000, ALICE_ETH_ADDRESS, None); IngressEgress::schedule_egress(ETH_ETH, 3_000, BOB_ETH_ADDRESS, None); IngressEgress::schedule_egress(ETH_ETH, 4_000, BOB_ETH_ADDRESS, None); - request_address_and_deposit(1u64, eth::Asset::Eth, 1_000u64); - request_address_and_deposit(2u64, eth::Asset::Eth, 1_000u64); - request_address_and_deposit(3u64, eth::Asset::Eth, 1_000u64); - request_address_and_deposit(4u64, eth::Asset::Eth, 1_000u64); + request_address_and_deposit(1u64, eth::Asset::Eth); + request_address_and_deposit(2u64, eth::Asset::Eth); + request_address_and_deposit(3u64, eth::Asset::Eth); + request_address_and_deposit(4u64, eth::Asset::Eth); IngressEgress::schedule_egress(ETH_FLIP, 5_000, ALICE_ETH_ADDRESS, None); IngressEgress::schedule_egress(ETH_FLIP, 6_000, ALICE_ETH_ADDRESS, None); IngressEgress::schedule_egress(ETH_FLIP, 7_000, BOB_ETH_ADDRESS, None); IngressEgress::schedule_egress(ETH_FLIP, 8_000, BOB_ETH_ADDRESS, None); - request_address_and_deposit(5u64, eth::Asset::Flip, 1_000u64); + request_address_and_deposit(5u64, eth::Asset::Flip); MockAllBatch::::set_success(false); - request_address_and_deposit(4u64, eth::Asset::Usdc, 1_000u64); + request_address_and_deposit(4u64, eth::Asset::Usdc); let scheduled_requests = ScheduledEgressFetchOrTransfer::::get(); @@ -329,21 +327,10 @@ fn addresses_are_getting_reused() { // Request 2 deposit addresses and deposit to one of them. .request_address_and_deposit(&[ ( - DepositRequest::Liquidity { - lp_account: ALICE, - asset: eth::Asset::Eth, - expiry_block: 1000_u64, - }, + DepositRequest::Liquidity { lp_account: ALICE, asset: eth::Asset::Eth }, 100u32.into(), ), - ( - DepositRequest::Liquidity { - lp_account: ALICE, - asset: eth::Asset::Eth, - expiry_block: 1000_u64, - }, - 0u32.into(), - ), + (DepositRequest::Liquidity { lp_account: ALICE, asset: eth::Asset::Eth }, 0u32.into()), ]) .inspect_storage(|deposit_details| { assert_eq!(ChannelIdCounter::::get(), deposit_details.len() as u64); @@ -365,9 +352,9 @@ fn addresses_are_getting_reused() { channels }) .then_execute_at_next_block(|channels| { - for (_request, _id, address) in &channels { - IngressEgress::expire_channel(*address); - } + let recycle_block = IngressEgress::expiry_and_recycle_block_height().2; + BlockHeightProvider::::set_block_height(recycle_block); + channels[0].clone() }) // Check that the used address is now deployed and in the pool of available addresses. @@ -379,7 +366,6 @@ fn addresses_are_getting_reused() { .request_deposit_addresses(&[(DepositRequest::Liquidity { lp_account: ALICE, asset: eth::Asset::Eth, - expiry_block: 1000_u64, })]) // The address should have been taken from the pool and the id counter unchanged. .inspect_storage(|_| { @@ -392,18 +378,22 @@ fn addresses_are_getting_reused() { fn proof_address_pool_integrity() { new_test_ext().execute_with(|| { let channel_details = (0..3) - .map(|id| request_address_and_deposit(id, eth::Asset::Eth, 1_000u64)) + .map(|id| request_address_and_deposit(id, eth::Asset::Eth)) .collect::>(); // All addresses in use expect_size_of_address_pool(0); IngressEgress::on_finalize(1); for (_id, address) in channel_details { assert_ok!(IngressEgress::finalise_ingress(RuntimeOrigin::root(), vec![address])); - IngressEgress::expire_channel(address); } + let recycle_block = IngressEgress::expiry_and_recycle_block_height().2; + BlockHeightProvider::::set_block_height(recycle_block); + + IngressEgress::on_idle(1, Weight::MAX); + // Expect all addresses to be available expect_size_of_address_pool(3); - request_address_and_deposit(4u64, eth::Asset::Eth, 1_000u64); + request_address_and_deposit(4u64, eth::Asset::Eth); // Expect one address to be in use expect_size_of_address_pool(2); }); @@ -413,19 +403,20 @@ fn proof_address_pool_integrity() { fn create_new_address_while_pool_is_empty() { new_test_ext().execute_with(|| { let channel_details = (0..2) - .map(|id| request_address_and_deposit(id, eth::Asset::Eth, 1_000u64)) + .map(|id| request_address_and_deposit(id, eth::Asset::Eth)) .collect::>(); IngressEgress::on_finalize(1); for (_id, address) in channel_details { assert_ok!(IngressEgress::finalise_ingress(RuntimeOrigin::root(), vec![address])); - IngressEgress::expire_channel(address); } - IngressEgress::on_initialize(EXPIRY_BLOCK); + let recycle_block = IngressEgress::expiry_and_recycle_block_height().2; + BlockHeightProvider::::set_block_height(recycle_block); + IngressEgress::on_idle(1, Weight::MAX); + assert_eq!(ChannelIdCounter::::get(), 2); - request_address_and_deposit(3u64, eth::Asset::Eth, 1_000u64); + request_address_and_deposit(3u64, eth::Asset::Eth); assert_eq!(ChannelIdCounter::::get(), 2); IngressEgress::on_finalize(1); - IngressEgress::on_initialize(EXPIRY_BLOCK); assert_eq!(ChannelIdCounter::::get(), 2); }); } @@ -442,7 +433,6 @@ fn reused_address_channel_id_matches() { let (reused_channel_id, reused_address) = IngressEgress::open_channel( eth::Asset::Eth, ChannelAction::LiquidityProvision { lp_account: 0 }, - 1_000u64, ) .unwrap(); // The reused details should be the same as before. @@ -478,7 +468,6 @@ fn can_process_ccm_deposit() { 0, 1, Some(channel_metadata), - 1_000u64, ) .unwrap(); @@ -590,7 +579,7 @@ fn multi_use_deposit_address_different_blocks() { const ETH: eth::Asset = eth::Asset::Eth; new_test_ext() - .then_execute_at_next_block(|_| request_address_and_deposit(ALICE, ETH, 1_000u64)) + .then_execute_at_next_block(|_| request_address_and_deposit(ALICE, ETH)) .then_execute_at_next_block(|channel @ (_, deposit_address)| { // Set the address to deployed. // Do another, should succeed. @@ -601,11 +590,13 @@ fn multi_use_deposit_address_different_blocks() { (), Default::default() )); + let recycle_block = IngressEgress::expiry_and_recycle_block_height().2; + BlockHeightProvider::::set_block_height(recycle_block); + channel }) .then_execute_at_next_block(|(_, deposit_address)| { // Closing the channel should invalidate the deposit address. - IngressEgress::expire_channel(deposit_address); assert_noop!( IngressEgress::process_deposits( RuntimeOrigin::root(), @@ -628,11 +619,7 @@ fn multi_use_deposit_same_block() { const FLIP: eth::Asset = eth::Asset::Flip; const DEPOSIT_AMOUNT: ::ChainAmount = 1_000; new_test_ext() - .request_deposit_addresses(&[DepositRequest::Liquidity { - lp_account: ALICE, - asset: FLIP, - expiry_block: 1_000u64, - }]) + .request_deposit_addresses(&[DepositRequest::Liquidity { lp_account: ALICE, asset: FLIP }]) .map_context(|mut ctx| { assert!(ctx.len() == 1); ctx.pop().unwrap() @@ -785,7 +772,7 @@ fn deposits_below_minimum_are_rejected() { )); // Observe that eth deposit gets rejected. - let (_, deposit_address) = request_address_and_deposit(0, eth, 1_000u64); + let (_, deposit_address) = request_address_and_deposit(0, eth); System::assert_last_event(RuntimeEvent::IngressEgress( crate::Event::::DepositIgnored { deposit_address, @@ -796,7 +783,7 @@ fn deposits_below_minimum_are_rejected() { )); // Flip deposit should succeed. - let (_, deposit_address) = request_address_and_deposit(0, flip, 1_000u64); + let (_, deposit_address) = request_address_and_deposit(0, flip); System::assert_last_event(RuntimeEvent::IngressEgress( crate::Event::::DepositReceived { deposit_address, @@ -813,7 +800,7 @@ fn handle_pending_deployment() { const ETH: eth::Asset = eth::Asset::Eth; new_test_ext().execute_with(|| { // Initial request. - let (_, deposit_address) = request_address_and_deposit(ALICE, eth::Asset::Eth, 1_000u64); + let (_, deposit_address) = request_address_and_deposit(ALICE, eth::Asset::Eth); assert_eq!(ScheduledEgressFetchOrTransfer::::decode_len().unwrap_or_default(), 1); // Process deposits. IngressEgress::on_finalize(1); @@ -822,8 +809,8 @@ fn handle_pending_deployment() { Pallet::::process_single_deposit(deposit_address, ETH, 1, (), Default::default()) .unwrap(); // None-pending requests can still be sent - request_address_and_deposit(1u64, eth::Asset::Eth, 1_000u64); - request_address_and_deposit(2u64, eth::Asset::Eth, 1_000u64); + request_address_and_deposit(1u64, eth::Asset::Eth); + request_address_and_deposit(2u64, eth::Asset::Eth); assert_eq!(ScheduledEgressFetchOrTransfer::::decode_len().unwrap_or_default(), 3); // Process deposit again. IngressEgress::on_finalize(1); @@ -841,7 +828,7 @@ fn handle_pending_deployment() { fn handle_pending_deployment_same_block() { new_test_ext().execute_with(|| { // Initial request. - let (_, deposit_address) = request_address_and_deposit(ALICE, eth::Asset::Eth, 1_000u64); + let (_, deposit_address) = request_address_and_deposit(ALICE, eth::Asset::Eth); Pallet::::process_single_deposit( deposit_address, eth::Asset::Eth, @@ -877,7 +864,7 @@ fn channel_reuse_with_different_assets() { new_test_ext() // First, request a deposit address and use it, then close it so it gets recycled. .request_address_and_deposit(&[( - DepositRequest::Liquidity { lp_account: ALICE, asset: ASSET_1, expiry_block: 1_000u64 }, + DepositRequest::Liquidity { lp_account: ALICE, asset: ASSET_1 }, 100_000, )]) .map_context(|mut result| result.pop().unwrap()) @@ -894,8 +881,9 @@ fn channel_reuse_with_different_assets() { asset ); }) - .then_execute_at_next_block(|(_, channel_id, channel_address)| { - IngressEgress::expire_channel(channel_address); + .then_execute_at_next_block(|(_, channel_id, _)| { + let recycle_block = IngressEgress::expiry_and_recycle_block_height().2; + BlockHeightProvider::::set_block_height(recycle_block); channel_id }) .inspect_storage(|channel_id| { @@ -909,7 +897,6 @@ fn channel_reuse_with_different_assets() { .request_deposit_addresses(&[DepositRequest::Liquidity { lp_account: ALICE, asset: ASSET_2, - expiry_block: 1_000u64, }]) .map_context(|mut result| result.pop().unwrap()) // Ensure that the deposit channel's asset is updated. @@ -923,6 +910,31 @@ fn channel_reuse_with_different_assets() { }); } +/// This is the sequence we're testing. +/// 1. Request deposit address +/// 2. Deposit to address when it's almost expired +/// 3. The channel is expired +/// 4. We need to finalise the ingress, by fetching +/// 5. The fetch should succeed. +#[test] +fn ingress_finalisation_succeeds_after_channel_expired_but_not_recycled() { + new_test_ext().execute_with(|| { + assert!(ScheduledEgressFetchOrTransfer::::get().is_empty(), "Is empty after genesis"); + + request_address_and_deposit(ALICE, eth::Asset::Eth); + + // Because we're only *expiring* and not recycling, we should still be able to fetch. + let expiry_block = IngressEgress::expiry_and_recycle_block_height().1; + BlockHeightProvider::::set_block_height(expiry_block); + + IngressEgress::on_idle(1, Weight::MAX); + + IngressEgress::on_finalize(1); + + assert!(ScheduledEgressFetchOrTransfer::::get().is_empty(),); + }); +} + #[test] fn can_store_failed_vault_transfers() { new_test_ext().execute_with(|| { @@ -953,8 +965,6 @@ fn basic_balance_tracking() { const ETH_DEPOSIT_AMOUNT: u128 = 1_000; const FLIP_DEPOSIT_AMOUNT: u128 = 2_000; const USDC_DEPOSIT_AMOUNT: u128 = 3_000; - // Expiry just needs to be sufficiently high so that it won't trigger. - const EXPIRY_BLOCK: u64 = 1_000; new_test_ext() .check_deposit_balances(&[ @@ -963,11 +973,7 @@ fn basic_balance_tracking() { (eth::Asset::Usdc, 0), ]) .request_address_and_deposit(&[( - DepositRequest::Liquidity { - lp_account: ALICE, - asset: eth::Asset::Eth, - expiry_block: EXPIRY_BLOCK, - }, + DepositRequest::Liquidity { lp_account: ALICE, asset: eth::Asset::Eth }, ETH_DEPOSIT_AMOUNT, )]) .check_deposit_balances(&[ @@ -976,11 +982,7 @@ fn basic_balance_tracking() { (eth::Asset::Usdc, 0), ]) .request_address_and_deposit(&[( - DepositRequest::Liquidity { - lp_account: ALICE, - asset: eth::Asset::Flip, - expiry_block: EXPIRY_BLOCK, - }, + DepositRequest::Liquidity { lp_account: ALICE, asset: eth::Asset::Flip }, FLIP_DEPOSIT_AMOUNT, )]) .check_deposit_balances(&[ @@ -989,11 +991,7 @@ fn basic_balance_tracking() { (eth::Asset::Usdc, 0), ]) .request_address_and_deposit(&[( - DepositRequest::Liquidity { - lp_account: ALICE, - asset: eth::Asset::Usdc, - expiry_block: EXPIRY_BLOCK, - }, + DepositRequest::Liquidity { lp_account: ALICE, asset: eth::Asset::Usdc }, USDC_DEPOSIT_AMOUNT, )]) .check_deposit_balances(&[ @@ -1002,11 +1000,7 @@ fn basic_balance_tracking() { (eth::Asset::Usdc, USDC_DEPOSIT_AMOUNT), ]) .request_address_and_deposit(&[( - DepositRequest::Liquidity { - lp_account: ALICE, - asset: eth::Asset::Eth, - expiry_block: EXPIRY_BLOCK, - }, + DepositRequest::Liquidity { lp_account: ALICE, asset: eth::Asset::Eth }, ETH_DEPOSIT_AMOUNT, )]) .check_deposit_balances(&[ @@ -1019,7 +1013,6 @@ fn basic_balance_tracking() { source_asset: eth::Asset::Eth, destination_asset: eth::Asset::Flip, destination_address: ForeignChainAddress::Eth(Default::default()), - expiry_block: EXPIRY_BLOCK, }, ETH_DEPOSIT_AMOUNT, )]) @@ -1029,3 +1022,98 @@ fn basic_balance_tracking() { (eth::Asset::Usdc, USDC_DEPOSIT_AMOUNT), ]); } + +#[test] +fn test_default_empty_amounts() { + let mut channel_recycle_blocks = Default::default(); + let can_recycle = IngressEgress::can_and_cannot_recycle(&mut channel_recycle_blocks, 0, 0); + + assert_eq!(can_recycle, vec![]); + assert_eq!(channel_recycle_blocks, vec![]); +} + +#[test] +fn test_cannot_recycle_if_block_number_less_than_current_height() { + let maximum_recyclable_number = 2; + let mut channel_recycle_blocks = + (1u64..5).map(|i| (i, H160::from([i as u8; 20]))).collect::>(); + let current_block_height = 3; + + let can_recycle = IngressEgress::can_and_cannot_recycle( + &mut channel_recycle_blocks, + maximum_recyclable_number, + current_block_height, + ); + + assert_eq!(can_recycle, vec![H160::from([1u8; 20]), H160::from([2; 20])]); + assert_eq!( + channel_recycle_blocks, + vec![(3, H160::from([3u8; 20])), (4, H160::from([4u8; 20]))] + ); +} + +// Same test as above, but lower maximum recyclable number +#[test] +fn test_can_only_recycle_up_to_max_amount() { + let maximum_recyclable_number = 1; + let mut channel_recycle_blocks = + (1u64..5).map(|i| (i, H160::from([i as u8; 20]))).collect::>(); + let current_block_height = 3; + + let can_recycle = IngressEgress::can_and_cannot_recycle( + &mut channel_recycle_blocks, + maximum_recyclable_number, + current_block_height, + ); + + assert_eq!(can_recycle, vec![H160::from([1u8; 20])]); + assert_eq!( + channel_recycle_blocks, + vec![(2, H160::from([2; 20])), (3, H160::from([3u8; 20])), (4, H160::from([4u8; 20]))] + ); +} + +#[test] +fn none_can_be_recycled_due_to_low_block_number() { + let maximum_recyclable_number = 4; + let mut channel_recycle_blocks = + (1u64..5).map(|i| (i, H160::from([i as u8; 20]))).collect::>(); + let current_block_height = 0; + + let can_recycle = IngressEgress::can_and_cannot_recycle( + &mut channel_recycle_blocks, + maximum_recyclable_number, + current_block_height, + ); + + assert!(can_recycle.is_empty()); + assert_eq!( + channel_recycle_blocks, + vec![ + (1, H160::from([1u8; 20])), + (2, H160::from([2; 20])), + (3, H160::from([3; 20])), + (4, H160::from([4; 20])) + ] + ); +} + +#[test] +fn all_can_be_recycled() { + let maximum_recyclable_number = 4; + let mut channel_recycle_blocks = + (1u64..5).map(|i| (i, H160::from([i as u8; 20]))).collect::>(); + let current_block_height = 4; + + let can_recycle = IngressEgress::can_and_cannot_recycle( + &mut channel_recycle_blocks, + maximum_recyclable_number, + current_block_height, + ); + + assert_eq!( + can_recycle, + vec![H160::from([1u8; 20]), H160::from([2; 20]), H160::from([3; 20]), H160::from([4; 20])] + ); + assert!(channel_recycle_blocks.is_empty()); +} diff --git a/state-chain/pallets/cf-lp/Cargo.toml b/state-chain/pallets/cf-lp/Cargo.toml index 163d8d508e..bc5c0cea57 100644 --- a/state-chain/pallets/cf-lp/Cargo.toml +++ b/state-chain/pallets/cf-lp/Cargo.toml @@ -17,6 +17,7 @@ targets = ['x86_64-unknown-linux-gnu'] cf-chains = { path = '../../chains', default-features = false } cf-primitives = { path = '../../primitives', default-features = false } cf-traits = { path = '../../traits', default-features = false } +cf-runtime-upgrade-utilities = { path = '../../runtime-upgrade-utilities', default-features = false } serde = { version = '1.0.126', default_features = false, features = [ 'alloc', @@ -49,6 +50,7 @@ std = [ 'cf-chains/std', 'cf-primitives/std', 'cf-traits/std', + 'cf-runtime-upgrade-utilities/std', 'codec/std', 'frame-benchmarking/std', 'frame-support/std', @@ -66,4 +68,7 @@ runtime-benchmarks = [ 'frame-system/runtime-benchmarks', 'pallet-cf-account-roles/runtime-benchmarks', ] -try-runtime = ['frame-support/try-runtime'] +try-runtime = [ + 'cf-runtime-upgrade-utilities/try-runtime', + 'frame-support/try-runtime', +] diff --git a/state-chain/pallets/cf-lp/src/benchmarking.rs b/state-chain/pallets/cf-lp/src/benchmarking.rs index a341899a2c..9f6123a72f 100644 --- a/state-chain/pallets/cf-lp/src/benchmarking.rs +++ b/state-chain/pallets/cf-lp/src/benchmarking.rs @@ -5,7 +5,7 @@ use cf_chains::{address::EncodedAddress, benchmarking_value::BenchmarkValue}; use cf_primitives::Asset; use cf_traits::AccountRoleRegistry; use frame_benchmarking::{benchmarks, whitelisted_caller}; -use frame_support::{assert_ok, dispatch::UnfilteredDispatchable, traits::OnNewAccount}; +use frame_support::{assert_ok, traits::OnNewAccount}; use frame_system::RawOrigin; benchmarks! { @@ -40,38 +40,6 @@ benchmarks! { verify { assert_ok!(T::AccountRoleRegistry::ensure_liquidity_provider(RawOrigin::Signed(caller).into())); } - - on_initialize { - let a in 1..100; - let caller: T::AccountId = whitelisted_caller(); - ::OnNewAccount::on_new_account(&caller); - let _ = Pallet::::register_lp_account(RawOrigin::Signed(caller.clone()).into()); - let _ = Pallet::::register_liquidity_refund_address( - RawOrigin::Signed(caller.clone()).into(), - EncodedAddress::Eth(Default::default()), - ); - for i in 0..a { - assert_ok!(Pallet::::request_liquidity_deposit_address(RawOrigin::Signed(caller.clone()).into(), Asset::Eth)); - } - let expiry = LpTTL::::get() + frame_system::Pallet::::current_block_number(); - assert!(!LiquidityChannelExpiries::::get(expiry).is_empty()); - }: { - Pallet::::on_initialize(expiry); - } verify { - assert!(LiquidityChannelExpiries::::get(expiry).is_empty()); - } - - set_lp_ttl { - let ttl = BlockNumberFor::::from(1_000u32); - let call = Call::::set_lp_ttl { - ttl, - }; - }: { - let _ = call.dispatch_bypass_filter(T::EnsureGovernance::try_successful_origin().unwrap()); - } verify { - assert_eq!(crate::LpTTL::::get(), ttl); - } - register_liquidity_refund_address { let caller: T::AccountId = whitelisted_caller(); ::OnNewAccount::on_new_account(&caller); diff --git a/state-chain/pallets/cf-lp/src/lib.rs b/state-chain/pallets/cf-lp/src/lib.rs index 5c112da763..c95ae66a4c 100644 --- a/state-chain/pallets/cf-lp/src/lib.rs +++ b/state-chain/pallets/cf-lp/src/lib.rs @@ -8,12 +8,10 @@ use cf_traits::{ EgressApi, }; use frame_support::{ - pallet_prelude::*, - sp_runtime::{traits::BlockNumberProvider, DispatchResult, Saturating}, + dispatch::Vec, pallet_prelude::*, sp_runtime::DispatchResult, traits::OnRuntimeUpgrade, }; use frame_system::pallet_prelude::*; pub use pallet::*; -use sp_std::vec::Vec; mod benchmarking; @@ -22,9 +20,12 @@ mod mock; #[cfg(test)] mod tests; +pub mod migrations; pub mod weights; pub use weights::WeightInfo; +pub const PALLET_VERSION: StorageVersion = StorageVersion::new(1); + impl_pallet_safe_mode!(PalletSafeMode; deposit_enabled, withdrawal_enabled); #[frame_support::pallet] @@ -83,21 +84,6 @@ pub mod pallet { WithdrawalsDisabled, } - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_initialize(n: BlockNumberFor) -> Weight { - let expired = LiquidityChannelExpiries::::take(n); - let expired_count = expired.len(); - for (_, address) in expired { - T::DepositHandler::expire_channel(address.clone()); - Self::deposit_event(Event::LiquidityDepositAddressExpired { - address: T::AddressConverter::to_encoded_address(address), - }); - } - T::WeightInfo::on_initialize(expired_count as u32) - } - } - #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -115,22 +101,15 @@ pub mod pallet { channel_id: ChannelId, asset: Asset, deposit_address: EncodedAddress, - expiry_block: BlockNumberFor, // account the funds will be credited to upon deposit account_id: T::AccountId, }, - LiquidityDepositAddressExpired { - address: EncodedAddress, - }, WithdrawalEgressScheduled { egress_id: EgressId, asset: Asset, amount: AssetAmount, destination_address: EncodedAddress, }, - LpTtlSet { - ttl: BlockNumberFor, - }, LiquidityRefundAddressRegistered { account_id: T::AccountId, chain: ForeignChain, @@ -138,25 +117,8 @@ pub mod pallet { }, } - #[pallet::genesis_config] - pub struct GenesisConfig { - pub lp_ttl: BlockNumberFor, - } - - #[pallet::genesis_build] - impl BuildGenesisConfig for GenesisConfig { - fn build(&self) { - LpTTL::::put(self.lp_ttl); - } - } - - impl Default for GenesisConfig { - fn default() -> Self { - Self { lp_ttl: BlockNumberFor::::from(1200u32) } - } - } - #[pallet::pallet] + #[pallet::storage_version(PALLET_VERSION)] #[pallet::without_storage_info] pub struct Pallet(PhantomData); @@ -165,18 +127,7 @@ pub mod pallet { pub type FreeBalances = StorageDoubleMap<_, Twox64Concat, T::AccountId, Identity, Asset, AssetAmount>; - /// For a given block number, stores the list of liquidity deposit channels that expire at that - /// block. - #[pallet::storage] - pub(super) type LiquidityChannelExpiries = StorageMap< - _, - Twox64Concat, - BlockNumberFor, - Vec<(ChannelId, cf_chains::ForeignChainAddress)>, - ValueQuery, - >; - - /// Stores the registered refund address for an Account + /// Stores the registered energency withdrawal address for an Account #[pallet::storage] pub type LiquidityRefundAddress = StorageDoubleMap< _, @@ -187,9 +138,22 @@ pub mod pallet { ForeignChainAddress, >; - /// The TTL for liquidity channels. - #[pallet::storage] - pub type LpTTL = StorageValue<_, BlockNumberFor, ValueQuery>; + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_runtime_upgrade() -> Weight { + migrations::PalletMigration::::on_runtime_upgrade() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, DispatchError> { + migrations::PalletMigration::::pre_upgrade() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), DispatchError> { + migrations::PalletMigration::::post_upgrade(state) + } + } #[pallet::call] impl Pallet { @@ -210,26 +174,13 @@ pub mod pallet { Error::::NoLiquidityRefundAddressRegistered ); - let expiry_block = - frame_system::Pallet::::current_block_number().saturating_add(LpTTL::::get()); - let (channel_id, deposit_address) = - T::DepositHandler::request_liquidity_deposit_address( - account_id.clone(), - asset, - expiry_block, - )?; - - LiquidityChannelExpiries::::append( - expiry_block, - (channel_id, deposit_address.clone()), - ); + T::DepositHandler::request_liquidity_deposit_address(account_id.clone(), asset)?; Self::deposit_event(Event::LiquidityDepositAddressReady { channel_id, asset, deposit_address: T::AddressConverter::to_encoded_address(deposit_address), - expiry_block, account_id, }); @@ -296,23 +247,6 @@ pub mod pallet { Ok(()) } - /// Sets the lifetime of liquidity deposit channels. - /// - /// Requires Governance - /// - /// ## Events - /// - /// - [On update](Event::LpTtlSet) - #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::set_lp_ttl())] - pub fn set_lp_ttl(origin: OriginFor, ttl: BlockNumberFor) -> DispatchResult { - T::EnsureGovernance::ensure_origin(origin)?; - LpTTL::::set(ttl); - - Self::deposit_event(Event::::LpTtlSet { ttl }); - Ok(()) - } - /// Registers an Liquidity Refund Address(LRA) for an account. /// To request deposit address for a chain, an LRA must be registered for that chain. /// diff --git a/state-chain/pallets/cf-lp/src/migrations.rs b/state-chain/pallets/cf-lp/src/migrations.rs new file mode 100644 index 0000000000..78943f89a1 --- /dev/null +++ b/state-chain/pallets/cf-lp/src/migrations.rs @@ -0,0 +1,6 @@ +pub mod remove_expiries; + +use cf_runtime_upgrade_utilities::VersionedMigration; + +pub type PalletMigration = + (VersionedMigration, remove_expiries::Migration, 0, 1>,); diff --git a/state-chain/pallets/cf-lp/src/migrations/remove_expiries.rs b/state-chain/pallets/cf-lp/src/migrations/remove_expiries.rs new file mode 100644 index 0000000000..b5c20e8fd7 --- /dev/null +++ b/state-chain/pallets/cf-lp/src/migrations/remove_expiries.rs @@ -0,0 +1,35 @@ +use crate::*; +use frame_support::{dispatch::Vec, traits::OnRuntimeUpgrade}; +use sp_std::marker::PhantomData; + +pub struct Migration(PhantomData); + +mod old { + + use super::*; + + use cf_primitives::ChannelId; + use frame_support::pallet_prelude::ValueQuery; + + #[frame_support::storage_alias] + pub type SwapTTL = StorageValue, BlockNumberFor, ValueQuery>; + + #[frame_support::storage_alias] + pub type SwapChannelExpiries = StorageMap< + Pallet, + Twox64Concat, + BlockNumberFor, + Vec<(ChannelId, ForeignChainAddress)>, + ValueQuery, + >; +} + +impl OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let _ = old::SwapChannelExpiries::::drain().collect::>(); + + let _ = old::SwapTTL::::take(); + + Weight::zero() + } +} diff --git a/state-chain/pallets/cf-lp/src/tests.rs b/state-chain/pallets/cf-lp/src/tests.rs index 8b1731dcea..57b99deb44 100644 --- a/state-chain/pallets/cf-lp/src/tests.rs +++ b/state-chain/pallets/cf-lp/src/tests.rs @@ -1,20 +1,11 @@ -use crate::{mock::*, FreeBalances, LiquidityChannelExpiries, LiquidityRefundAddress, LpTTL}; +use crate::{mock::*, FreeBalances, LiquidityRefundAddress}; -use cf_chains::{ - address::{AddressConverter, EncodedAddress}, - AnyChain, ForeignChainAddress, -}; +use cf_chains::{address::EncodedAddress, ForeignChainAddress}; use cf_primitives::{AccountId, Asset, ForeignChain}; use cf_test_utilities::assert_events_match; -use cf_traits::{ - mocks::{ - address_converter::MockAddressConverter, - deposit_handler::{LpChannel, MockDepositHandler}, - }, - SetSafeMode, -}; -use frame_support::{assert_noop, assert_ok, error::BadOrigin, traits::Hooks}; +use cf_traits::SetSafeMode; +use frame_support::{assert_noop, assert_ok, error::BadOrigin}; #[test] fn egress_chain_and_asset_must_match() { @@ -118,75 +109,6 @@ fn cannot_deposit_and_withdrawal_during_safe_mode() { }); } -#[test] -fn deposit_channel_expires() { - new_test_ext().execute_with(|| { - assert_ok!(LiquidityProvider::register_liquidity_refund_address( - RuntimeOrigin::signed(LP_ACCOUNT.into()), - EncodedAddress::Eth(Default::default()), - )); - // Expiry = current (1) + ttl - let expiry = LpTTL::::get() + 1; - let asset = Asset::Eth; - assert_ok!(LiquidityProvider::request_liquidity_deposit_address( - RuntimeOrigin::signed(LP_ACCOUNT.into()), - asset, - )); - - let (channel_id, deposit_address) = assert_events_match!(Test, RuntimeEvent::LiquidityProvider(crate::Event::LiquidityDepositAddressReady { - asset: event_asset, - channel_id, - deposit_address, - expiry_block, - account_id, - }) if expiry_block == expiry && event_asset == asset && account_id == LP_ACCOUNT.into() => (channel_id, deposit_address)); - let lp_channel = LpChannel { - deposit_address: MockAddressConverter::try_from_encoded_address(deposit_address.clone()).unwrap(), - source_asset: asset, - lp_account: LP_ACCOUNT.into(), - expiry, - }; - - assert_eq!( - LiquidityChannelExpiries::::get(expiry), - vec![(channel_id, MockAddressConverter::try_from_encoded_address(deposit_address.clone()).unwrap())] - ); - assert_eq!( - MockDepositHandler::::get_liquidity_channels(), - vec![lp_channel.clone()] - ); - - // Does not expire until expiry - LiquidityProvider::on_initialize(expiry - 1); - assert_eq!( - LiquidityChannelExpiries::::get(expiry), - vec![(channel_id, MockAddressConverter::try_from_encoded_address(deposit_address.clone()).unwrap())] - ); - assert_eq!( - MockDepositHandler::::get_liquidity_channels(), - vec![lp_channel] - ); - - // Expire the address on the expiry block - LiquidityProvider::on_initialize(expiry); - - assert_eq!(LiquidityChannelExpiries::::get(expiry), vec![]); - System::assert_last_event(RuntimeEvent::LiquidityProvider( - crate::Event::::LiquidityDepositAddressExpired { address: deposit_address }, - )); - assert!(MockDepositHandler::::get_liquidity_channels().is_empty()); - }); -} - -#[test] -fn can_set_lp_ttl() { - new_test_ext().execute_with(|| { - assert_eq!(LpTTL::::get(), 1_200); - assert_ok!(LiquidityProvider::set_lp_ttl(RuntimeOrigin::root(), 10)); - assert_eq!(LpTTL::::get(), 10); - }); -} - #[test] fn can_register_and_deregister_liquidity_refund_address() { new_test_ext().execute_with(|| { diff --git a/state-chain/pallets/cf-swapping/Cargo.toml b/state-chain/pallets/cf-swapping/Cargo.toml index f018f19a52..8911154184 100644 --- a/state-chain/pallets/cf-swapping/Cargo.toml +++ b/state-chain/pallets/cf-swapping/Cargo.toml @@ -16,6 +16,7 @@ targets = ['x86_64-unknown-linux-gnu'] # Internal dependencies cf-chains = { path = '../../chains', default-features = false } cf-primitives = { path = '../../primitives', default-features = false } +cf-runtime-upgrade-utilities = { path = '../../runtime-upgrade-utilities', default-features = false } cf-traits = { path = '../../traits', default-features = false } log = { version = '0.4.16', default-features = false } @@ -48,6 +49,7 @@ default = ['std'] std = [ 'cf-chains/std', 'cf-primitives/std', + 'cf-runtime-upgrade-utilities/std', 'cf-traits/std', 'codec/std', 'frame-benchmarking/std', @@ -68,4 +70,7 @@ runtime-benchmarks = [ 'frame-system/runtime-benchmarks', 'pallet-cf-account-roles/runtime-benchmarks', ] -try-runtime = ['frame-support/try-runtime'] +try-runtime = [ + 'cf-runtime-upgrade-utilities/try-runtime', + 'frame-support/try-runtime', +] diff --git a/state-chain/pallets/cf-swapping/src/benchmarking.rs b/state-chain/pallets/cf-swapping/src/benchmarking.rs index 0440e2cb7f..ba5ce5a8b2 100644 --- a/state-chain/pallets/cf-swapping/src/benchmarking.rs +++ b/state-chain/pallets/cf-swapping/src/benchmarking.rs @@ -123,41 +123,6 @@ benchmarks! { )]); } - on_initialize { - let a in 1..100; - let caller: T::AccountId = whitelisted_caller(); - ::OnNewAccount::on_new_account(&caller); - T::AccountRoleRegistry::register_as_broker(&caller).unwrap(); - let origin = RawOrigin::Signed(caller); - for i in 0..a { - let call = Call::::request_swap_deposit_address{ - source_asset: Asset::Usdc, - destination_asset: Asset::Eth, - destination_address: EncodedAddress::Eth(Default::default()), - broker_commission_bps: Default::default(), - channel_metadata: None, - }; - call.dispatch_bypass_filter(origin.clone().into())?; - } - let expiry = SwapTTL::::get() + frame_system::Pallet::::current_block_number(); - assert!(!SwapChannelExpiries::::get(expiry).is_empty()); - }: { - Pallet::::on_initialize(expiry); - } verify { - assert!(SwapChannelExpiries::::get(expiry).is_empty()); - } - - set_swap_ttl { - let ttl = BlockNumberFor::::from(1_000u32); - let call = Call::::set_swap_ttl { - ttl - }; - }: { - let _ = call.dispatch_bypass_filter(::EnsureGovernance::try_successful_origin().unwrap()); - } verify { - assert_eq!(crate::SwapTTL::::get(), ttl); - } - set_minimum_swap_amount { let asset = Asset::Eth; let amount = 1_000; diff --git a/state-chain/pallets/cf-swapping/src/lib.rs b/state-chain/pallets/cf-swapping/src/lib.rs index 488a68918c..7f8bca1cef 100644 --- a/state-chain/pallets/cf-swapping/src/lib.rs +++ b/state-chain/pallets/cf-swapping/src/lib.rs @@ -7,16 +7,19 @@ use cf_primitives::{ Asset, AssetAmount, ChannelId, ForeignChain, SwapLeg, TransactionHash, STABLE_ASSET, }; use cf_traits::{impl_pallet_safe_mode, liquidity::SwappingApi, CcmHandler, DepositApi}; -use frame_support::{pallet_prelude::*, storage::with_storage_layer}; +use frame_support::{ + pallet_prelude::*, + sp_runtime::{ + traits::{Get, Saturating}, + DispatchError, Permill, + }, + storage::with_storage_layer, + traits::OnRuntimeUpgrade, +}; use frame_system::pallet_prelude::*; pub use pallet::*; use sp_arithmetic::{helpers_128bit::multiply_by_rational_with_rounding, traits::Zero, Rounding}; -use sp_runtime::{ - traits::{BlockNumberProvider, Get, Saturating}, - DispatchError, Permill, -}; use sp_std::{collections::btree_map::BTreeMap, vec, vec::Vec}; - #[cfg(test)] mod mock; @@ -25,9 +28,12 @@ mod tests; mod benchmarking; +pub mod migrations; pub mod weights; pub use weights::WeightInfo; +pub const PALLET_VERSION: StorageVersion = StorageVersion::new(1); + const BASIS_POINTS_PER_MILLION: u32 = 100; #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, TypeInfo, MaxEncodedLen)] @@ -199,6 +205,7 @@ pub mod pallet { } #[pallet::pallet] + #[pallet::storage_version(PALLET_VERSION)] #[pallet::without_storage_info] pub struct Pallet(PhantomData); @@ -223,23 +230,10 @@ pub mod pallet { #[pallet::storage] pub type CcmGasBudget = StorageMap<_, Twox64Concat, u64, (Asset, AssetAmount)>; - /// Stores the swap TTL in blocks. - #[pallet::storage] - pub type SwapTTL = StorageValue<_, BlockNumberFor, ValueQuery>; - /// Storage for storing CCMs pending assets to be swapped. #[pallet::storage] pub(crate) type PendingCcms = StorageMap<_, Twox64Concat, u64, CcmSwap>; - /// For a given block number, stores the list of swap channels that expire at that block. - #[pallet::storage] - pub type SwapChannelExpiries = StorageMap< - _, - Twox64Concat, - BlockNumberFor, - Vec<(ChannelId, ForeignChainAddress)>, - ValueQuery, - >; /// Tracks the outputs of Ccm swaps. #[pallet::storage] pub(crate) type CcmOutputs = StorageMap<_, Twox64Concat, u64, CcmSwapOutput>; @@ -262,7 +256,6 @@ pub mod pallet { SwapDepositAddressReady { deposit_address: EncodedAddress, destination_address: EncodedAddress, - expiry_block: BlockNumberFor, source_asset: Asset, destination_asset: Asset, channel_id: ChannelId, @@ -313,13 +306,6 @@ pub mod pallet { ccm_id: u64, egress_id: EgressId, }, - SwapDepositAddressExpired { - deposit_address: EncodedAddress, - channel_id: ChannelId, - }, - SwapTtlSet { - ttl: BlockNumberFor, - }, CcmDepositReceived { ccm_id: u64, principal_swap_id: Option, @@ -370,14 +356,13 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { - pub swap_ttl: BlockNumberFor, pub minimum_swap_amounts: Vec<(Asset, AssetAmount)>, + pub _phantom: PhantomData, } #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - SwapTTL::::put(self.swap_ttl); for (asset, min) in &self.minimum_swap_amounts { MinimumSwapAmount::::insert(asset, min); } @@ -386,27 +371,12 @@ pub mod pallet { impl Default for GenesisConfig { fn default() -> Self { - // 1200 = 2 hours (6 sec per block) - Self { swap_ttl: BlockNumberFor::::from(1_200u32), minimum_swap_amounts: vec![] } + Self { minimum_swap_amounts: vec![], _phantom: PhantomData } } } #[pallet::hooks] impl Hooks> for Pallet { - /// Clean up expired deposit channels - fn on_initialize(n: BlockNumberFor) -> Weight { - let expired = SwapChannelExpiries::::take(n); - let expired_count = expired.len(); - for (channel_id, address) in expired { - T::DepositHandler::expire_channel(address.clone()); - Self::deposit_event(Event::::SwapDepositAddressExpired { - deposit_address: T::AddressConverter::to_encoded_address(address), - channel_id, - }); - } - T::WeightInfo::on_initialize(expired_count as u32) - } - /// Execute all swaps in the SwapQueue fn on_finalize(_n: BlockNumberFor) { if !T::SafeMode::get().swaps_enabled { @@ -495,6 +465,20 @@ pub mod pallet { } } } + + fn on_runtime_upgrade() -> Weight { + migrations::PalletMigration::::on_runtime_upgrade() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, DispatchError> { + migrations::PalletMigration::::pre_upgrade() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), DispatchError> { + migrations::PalletMigration::::post_upgrade(state) + } } #[pallet::call] @@ -528,9 +512,6 @@ pub mod pallet { ); } - let expiry_block = frame_system::Pallet::::current_block_number() - .saturating_add(SwapTTL::::get()); - let (channel_id, deposit_address) = T::DepositHandler::request_swap_deposit_address( source_asset, destination_asset, @@ -538,15 +519,11 @@ pub mod pallet { broker_commission_bps, broker, channel_metadata.clone(), - expiry_block, )?; - SwapChannelExpiries::::append(expiry_block, (channel_id, deposit_address.clone())); - Self::deposit_event(Event::::SwapDepositAddressReady { deposit_address: T::AddressConverter::to_encoded_address(deposit_address), destination_address, - expiry_block, source_asset, destination_asset, channel_id, @@ -684,23 +661,6 @@ pub mod pallet { Ok(()) } - /// Sets the lifetime of swap channels. - /// - /// Requires Governance. - /// - /// ## Events - /// - /// - [On update](Event::SwapTtlSet) - #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::set_swap_ttl())] - pub fn set_swap_ttl(origin: OriginFor, ttl: BlockNumberFor) -> DispatchResult { - T::EnsureGovernance::ensure_origin(origin)?; - SwapTTL::::set(ttl); - - Self::deposit_event(Event::::SwapTtlSet { ttl }); - Ok(()) - } - /// Sets the Minimum swap amount allowed for an asset. /// /// Requires Governance. diff --git a/state-chain/pallets/cf-swapping/src/migrations.rs b/state-chain/pallets/cf-swapping/src/migrations.rs new file mode 100644 index 0000000000..78943f89a1 --- /dev/null +++ b/state-chain/pallets/cf-swapping/src/migrations.rs @@ -0,0 +1,6 @@ +pub mod remove_expiries; + +use cf_runtime_upgrade_utilities::VersionedMigration; + +pub type PalletMigration = + (VersionedMigration, remove_expiries::Migration, 0, 1>,); diff --git a/state-chain/pallets/cf-swapping/src/migrations/remove_expiries.rs b/state-chain/pallets/cf-swapping/src/migrations/remove_expiries.rs new file mode 100644 index 0000000000..d4071992d8 --- /dev/null +++ b/state-chain/pallets/cf-swapping/src/migrations/remove_expiries.rs @@ -0,0 +1,34 @@ +use crate::*; +use frame_support::traits::OnRuntimeUpgrade; +use sp_std::marker::PhantomData; + +pub struct Migration(PhantomData); + +mod old { + + use super::*; + + use frame_support::pallet_prelude::ValueQuery; + + #[frame_support::storage_alias] + pub type SwapTTL = StorageValue, BlockNumberFor, ValueQuery>; + + #[frame_support::storage_alias] + pub type SwapChannelExpiries = StorageMap< + Pallet, + Twox64Concat, + BlockNumberFor, + Vec<(ChannelId, ForeignChainAddress)>, + ValueQuery, + >; +} + +impl OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let _ = old::SwapChannelExpiries::::drain().collect::>(); + + let _ = old::SwapTTL::::take(); + + Weight::zero() + } +} diff --git a/state-chain/pallets/cf-swapping/src/mock.rs b/state-chain/pallets/cf-swapping/src/mock.rs index 11c57c0a83..3779906737 100644 --- a/state-chain/pallets/cf-swapping/src/mock.rs +++ b/state-chain/pallets/cf-swapping/src/mock.rs @@ -1,3 +1,5 @@ +use core::marker::PhantomData; + use crate::{self as pallet_cf_swapping, PalletSafeMode, WeightInfo}; use cf_chains::AnyChain; use cf_primitives::{Asset, AssetAmount}; @@ -112,10 +114,6 @@ impl WeightInfo for MockWeightInfo { Weight::from_parts(100, 0) } - fn set_swap_ttl() -> Weight { - Weight::from_parts(100, 0) - } - fn set_minimum_swap_amount() -> Weight { Weight::from_parts(100, 0) } @@ -137,7 +135,7 @@ cf_test_utilities::impl_test_helpers! { Test, RuntimeGenesisConfig { system: Default::default(), - swapping: SwappingConfig { swap_ttl: 5, minimum_swap_amounts: vec![] }, + swapping: SwappingConfig { minimum_swap_amounts: vec![], _phantom: PhantomData }, }, || { >::register_as_broker(&ALICE).unwrap(); diff --git a/state-chain/pallets/cf-swapping/src/tests.rs b/state-chain/pallets/cf-swapping/src/tests.rs index 2e9943a019..4f08a0fb31 100644 --- a/state-chain/pallets/cf-swapping/src/tests.rs +++ b/state-chain/pallets/cf-swapping/src/tests.rs @@ -2,7 +2,7 @@ use crate::{ mock::{RuntimeEvent, *}, CcmFailReason, CcmGasBudget, CcmIdCounter, CcmOutputs, CcmSwap, CcmSwapOutput, CollectedRejectedFunds, EarnedBrokerFees, Error, Event, MinimumSwapAmount, Pallet, PendingCcms, - Swap, SwapChannelExpiries, SwapOrigin, SwapQueue, SwapTTL, SwapType, + Swap, SwapOrigin, SwapQueue, SwapType, }; use cf_chains::{ address::{to_encoded_address, AddressConverter, EncodedAddress, ForeignChainAddress}, @@ -11,11 +11,10 @@ use cf_chains::{ AnyChain, CcmChannelMetadata, CcmDepositMetadata, }; use cf_primitives::{Asset, AssetAmount, ForeignChain, NetworkEnvironment}; -use cf_test_utilities::{assert_event_sequence, assert_events_match}; +use cf_test_utilities::assert_event_sequence; use cf_traits::{ mocks::{ address_converter::MockAddressConverter, - deposit_handler::{MockDepositHandler, SwapChannel}, egress_handler::{MockEgressHandler, MockEgressParameter}, }, CcmHandler, SetSafeMode, SwapDepositHandler, SwappingApi, @@ -23,7 +22,6 @@ use cf_traits::{ use frame_support::{assert_noop, assert_ok, sp_std::iter}; use frame_support::traits::Hooks; -use sp_runtime::traits::BlockNumberProvider; // Returns some test data fn generate_test_swaps() -> Vec { @@ -229,12 +227,11 @@ fn expect_swap_id_to_be_emitted() { RuntimeEvent::Swapping(Event::SwapDepositAddressReady { deposit_address: EncodedAddress::Eth(..), destination_address: EncodedAddress::Eth(..), - expiry_block, source_asset: Asset::Eth, destination_asset: Asset::Usdc, channel_id: 0, .. - }) if expiry_block == SwapTTL::::get() + System::current_block_number(), + }), RuntimeEvent::Swapping(Event::SwapScheduled { swap_id: 1, source_asset: Asset::Eth, @@ -246,7 +243,9 @@ fn expect_swap_id_to_be_emitted() { channel_id: 1, deposit_block_height: 0 }, - swap_type: SwapType::Swap(ForeignChainAddress::Eth(..)), broker_commission: _ }), + swap_type: SwapType::Swap(ForeignChainAddress::Eth(..)), + broker_commission: _ + }), RuntimeEvent::Swapping(Event::SwapExecuted { swap_id: 1, .. }), RuntimeEvent::Swapping(Event::SwapEgressScheduled { swap_id: 1, @@ -315,78 +314,6 @@ fn can_swap_using_witness_origin() { }); } -#[test] -fn swap_expires() { - new_test_ext().execute_with(|| { - let expiry = SwapTTL::::get() + 1; - assert_eq!(expiry, 6); // Expiry = current(1) + TTL (5) - assert_ok!(Swapping::request_swap_deposit_address( - RuntimeOrigin::signed(ALICE), - Asset::Eth, - Asset::Usdc, - EncodedAddress::Eth(Default::default()), - 0, - None - )); - - let deposit_address = assert_events_match!(Test, RuntimeEvent::Swapping(Event::SwapDepositAddressReady { - deposit_address, - .. - }) => deposit_address); - let swap_channel = SwapChannel { - deposit_address: MockAddressConverter::try_from_encoded_address(deposit_address).unwrap(), - source_asset: Asset::Eth, - destination_asset: Asset::Usdc, - destination_address: ForeignChainAddress::Eth(Default::default()), - broker_commission_bps: 0, - broker_id: ALICE, - channel_metadata: None, - expiry, - }; - - assert_eq!( - SwapChannelExpiries::::get(expiry), - vec![(0, ForeignChainAddress::Eth(Default::default()))] - ); - assert_eq!( - MockDepositHandler::::get_swap_channels(), - vec![swap_channel.clone()] - ); - - // Does not expire until expiry block. - Swapping::on_initialize(expiry - 1); - assert_eq!( - SwapChannelExpiries::::get(expiry), - vec![(0, ForeignChainAddress::Eth(Default::default()))] - ); - assert_eq!( - MockDepositHandler::::get_swap_channels(), - vec![swap_channel] - ); - - Swapping::on_initialize(6); - assert_eq!(SwapChannelExpiries::::get(6), vec![]); - System::assert_last_event(RuntimeEvent::Swapping( - Event::::SwapDepositAddressExpired { - deposit_address: EncodedAddress::Eth(Default::default()), - channel_id: 0, - }, - )); - assert!( - MockDepositHandler::::get_swap_channels().is_empty() - ); - }); -} - -#[test] -fn can_set_swap_ttl() { - new_test_ext().execute_with(|| { - assert_eq!(crate::SwapTTL::::get(), 5); - assert_ok!(Swapping::set_swap_ttl(RuntimeOrigin::root(), 10)); - assert_eq!(crate::SwapTTL::::get(), 10); - }); -} - #[test] fn reject_invalid_ccm_deposit() { new_test_ext().execute_with(|| { diff --git a/state-chain/pallets/cf-swapping/src/weights.rs b/state-chain/pallets/cf-swapping/src/weights.rs index e3a61e8c1a..8614b2a5e3 100644 --- a/state-chain/pallets/cf-swapping/src/weights.rs +++ b/state-chain/pallets/cf-swapping/src/weights.rs @@ -36,7 +36,6 @@ pub trait WeightInfo { fn schedule_swap_from_contract() -> Weight; fn ccm_deposit() -> Weight; fn on_initialize(a: u32, ) -> Weight; - fn set_swap_ttl() -> Weight; fn set_minimum_swap_amount() -> Weight; } @@ -48,7 +47,6 @@ impl WeightInfo for PalletWeight { // Storage: EthereumIngressEgress ChannelIdCounter (r:1 w:1) // Storage: Environment EthereumVaultAddress (r:1 w:0) // Storage: Swapping SwapTTL (r:1 w:0) - // Storage: Swapping SwapChannelExpiries (r:1 w:1) // Storage: EthereumIngressEgress ChannelActions (r:0 w:1) // Storage: EthereumIngressEgress FetchParamDetails (r:0 w:1) // Storage: EthereumIngressEgress AddressStatus (r:0 w:1) @@ -91,7 +89,6 @@ impl WeightInfo for PalletWeight { .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(5)) } - // Storage: Swapping SwapChannelExpiries (r:1 w:1) // Storage: EthereumIngressEgress AddressStatus (r:1 w:0) // Storage: EthereumIngressEgress DepositAddressDetailsLookup (r:1 w:1) // Storage: EthereumIngressEgress ChannelActions (r:0 w:1) @@ -106,12 +103,6 @@ impl WeightInfo for PalletWeight { .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(a.into()))) } - // Storage: Swapping SwapTTL (r:0 w:1) - fn set_swap_ttl() -> Weight { - // Minimum execution time: 13_000 nanoseconds. - Weight::from_parts(14_000_000, 0) - .saturating_add(T::DbWeight::get().writes(1)) - } // Storage: AccountRoles SwappingEnabled (r:1 w:0) // Storage: AccountRoles AccountRoles (r:1 w:1) fn register_as_broker() -> Weight { @@ -133,7 +124,6 @@ impl WeightInfo for () { // Storage: EthereumIngressEgress ChannelIdCounter (r:1 w:1) // Storage: Environment EthereumVaultAddress (r:1 w:0) // Storage: Swapping SwapTTL (r:1 w:0) - // Storage: Swapping SwapChannelExpiries (r:1 w:1) // Storage: EthereumIngressEgress ChannelActions (r:0 w:1) // Storage: EthereumIngressEgress FetchParamDetails (r:0 w:1) // Storage: EthereumIngressEgress AddressStatus (r:0 w:1) @@ -176,7 +166,6 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3)) .saturating_add(RocksDbWeight::get().writes(5)) } - // Storage: Swapping SwapChannelExpiries (r:1 w:1) // Storage: EthereumIngressEgress AddressStatus (r:1 w:0) // Storage: EthereumIngressEgress DepositAddressDetailsLookup (r:1 w:1) // Storage: EthereumIngressEgress ChannelActions (r:0 w:1) @@ -191,12 +180,6 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes(1)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(a.into()))) } - // Storage: Swapping SwapTTL (r:0 w:1) - fn set_swap_ttl() -> Weight { - // Minimum execution time: 13_000 nanoseconds. - Weight::from_parts(14_000_000, 0) - .saturating_add(RocksDbWeight::get().writes(1)) - } // Storage: AccountRoles SwappingEnabled (r:1 w:0) // Storage: AccountRoles AccountRoles (r:1 w:1) fn register_as_broker() -> Weight { diff --git a/state-chain/runtime/src/chainflip.rs b/state-chain/runtime/src/chainflip.rs index 4bde2f0d4e..5804088bbb 100644 --- a/state-chain/runtime/src/chainflip.rs +++ b/state-chain/runtime/src/chainflip.rs @@ -460,7 +460,6 @@ macro_rules! impl_deposit_api_for_anychain { fn request_liquidity_deposit_address( lp_account: Self::AccountId, source_asset: Asset, - expiry: Self::BlockNumber, ) -> Result<(ChannelId, ForeignChainAddress), DispatchError> { match source_asset.into() { $( @@ -468,7 +467,6 @@ macro_rules! impl_deposit_api_for_anychain { $pallet::request_liquidity_deposit_address( lp_account, source_asset.try_into().unwrap(), - expiry, ), )+ } @@ -481,7 +479,6 @@ macro_rules! impl_deposit_api_for_anychain { broker_commission_bps: BasisPoints, broker_id: Self::AccountId, channel_metadata: Option, - expiry: Self::BlockNumber, ) -> Result<(ChannelId, ForeignChainAddress), DispatchError> { match source_asset.into() { $( @@ -492,23 +489,10 @@ macro_rules! impl_deposit_api_for_anychain { broker_commission_bps, broker_id, channel_metadata, - expiry, ), )+ } } - - fn expire_channel(address: ForeignChainAddress) { - match address.chain() { - $( - ForeignChain::$chain => { - <$pallet as DepositApi<$chain>>::expire_channel( - address.try_into().expect("Checked for address compatibility") - ); - }, - )+ - } - } } } } diff --git a/state-chain/runtime/src/constants.rs b/state-chain/runtime/src/constants.rs index 4edad8d06b..ba49101312 100644 --- a/state-chain/runtime/src/constants.rs +++ b/state-chain/runtime/src/constants.rs @@ -101,8 +101,6 @@ pub mod common { /// See https://github.com/chainflip-io/chainflip-backend/issues/1629 pub const TX_FEE_MULTIPLIER: FlipBalance = 10_000; - /// Default supply update interval is 24 hours. - pub mod eth { use cf_chains::{eth::Ethereum, Chain}; diff --git a/state-chain/traits/src/lib.rs b/state-chain/traits/src/lib.rs index b87c948922..02f53997b5 100644 --- a/state-chain/traits/src/lib.rs +++ b/state-chain/traits/src/lib.rs @@ -640,7 +640,6 @@ pub trait DepositApi { fn request_liquidity_deposit_address( lp_account: Self::AccountId, source_asset: C::ChainAsset, - expiry: Self::BlockNumber, ) -> Result<(ChannelId, ForeignChainAddress), DispatchError>; /// Issues a channel id and deposit address for a new swap. @@ -651,11 +650,7 @@ pub trait DepositApi { broker_commission_bps: BasisPoints, broker_id: Self::AccountId, channel_metadata: Option, - expiry: Self::BlockNumber, ) -> Result<(ChannelId, ForeignChainAddress), DispatchError>; - - /// Expires a channel. - fn expire_channel(address: C::ChainAccount); } pub trait AccountRoleRegistry { diff --git a/state-chain/traits/src/mocks/deposit_handler.rs b/state-chain/traits/src/mocks/deposit_handler.rs index 185d3f8e56..27b6e19a00 100644 --- a/state-chain/traits/src/mocks/deposit_handler.rs +++ b/state-chain/traits/src/mocks/deposit_handler.rs @@ -30,7 +30,6 @@ pub struct SwapChannel { pub broker_commission_bps: BasisPoints, pub broker_id: ::AccountId, pub channel_metadata: Option, - pub expiry: BlockNumberFor, } #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, TypeInfo)] @@ -38,7 +37,6 @@ pub struct LpChannel { pub deposit_address: ForeignChainAddress, pub source_asset: ::ChainAsset, pub lp_account: ::AccountId, - pub expiry: BlockNumberFor, } impl MockDepositHandler { @@ -85,7 +83,6 @@ impl DepositApi for MockDepositHandler { fn request_liquidity_deposit_address( lp_account: Self::AccountId, source_asset: ::ChainAsset, - expiry: Self::BlockNumber, ) -> Result<(cf_primitives::ChannelId, ForeignChainAddress), sp_runtime::DispatchError> { let (channel_id, deposit_address) = Self::get_new_deposit_address(SwapOrLp::Lp, source_asset); @@ -98,7 +95,6 @@ impl DepositApi for MockDepositHandler { deposit_address: deposit_address.clone(), source_asset, lp_account, - expiry, }); } }); @@ -112,7 +108,6 @@ impl DepositApi for MockDepositHandler { broker_commission_bps: BasisPoints, broker_id: Self::AccountId, channel_metadata: Option, - expiry: Self::BlockNumber, ) -> Result<(cf_primitives::ChannelId, ForeignChainAddress), sp_runtime::DispatchError> { let (channel_id, deposit_address) = Self::get_new_deposit_address(SwapOrLp::Swap, source_asset); @@ -129,29 +124,9 @@ impl DepositApi for MockDepositHandler { broker_commission_bps, broker_id, channel_metadata, - expiry, }); }; }); Ok((channel_id, deposit_address)) } - - fn expire_channel(address: ::ChainAccount) { - ::mutate_value( - b"SWAP_INGRESS_CHANNELS", - |storage: &mut Option>>| { - if let Some(inner) = storage.as_mut() { - inner.retain(|x| x.deposit_address != address.clone().into()); - } - }, - ); - ::mutate_value( - b"LP_INGRESS_CHANNELS", - |storage: &mut Option>>| { - if let Some(inner) = storage.as_mut() { - inner.retain(|x| x.deposit_address != address.clone().into()); - } - }, - ); - } }