diff --git a/.github/workflows/upgrade-test.yml b/.github/workflows/upgrade-test.yml index 3560996900..a387ae2a68 100644 --- a/.github/workflows/upgrade-test.yml +++ b/.github/workflows/upgrade-test.yml @@ -96,8 +96,9 @@ jobs: if: inputs.run-job run: | mkdir -p upgrade-from-bins; ( cd upgrade-from-bins - curl https://artifacts.chainflip.io/${{ env.OLD_VERSION }}/chainflip-backend-bin.zip --output chainflip-backend-bin.zip - unzip chainflip-backend-bin.zip -d . + curl https://artifacts.chainflip.io/${{ env.OLD_VERSION }}/chainflip-backend-bin.zip --output chainflip-backend-bin.zip + unzip chainflip-backend-bin.zip -d . + mv ./artifacts/chainflip-backend-bin/* . ); ls -l upgrade-from-bins - name: Permissions for latest binaries 🛡️ diff --git a/Cargo.lock b/Cargo.lock index 066bc7b03d..db99252b4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1327,7 +1327,7 @@ dependencies = [ [[package]] name = "cf-engine-dylib" -version = "1.7.0" +version = "1.8.0" dependencies = [ "chainflip-engine", "engine-proc-macros", @@ -1567,7 +1567,7 @@ dependencies = [ [[package]] name = "chainflip-api" -version = "1.7.0" +version = "1.8.0" dependencies = [ "anyhow", "async-trait", @@ -1614,7 +1614,7 @@ dependencies = [ [[package]] name = "chainflip-broker-api" -version = "1.7.0" +version = "1.8.0" dependencies = [ "anyhow", "cf-chains", @@ -1638,7 +1638,7 @@ dependencies = [ [[package]] name = "chainflip-cli" -version = "1.7.0" +version = "1.8.0" dependencies = [ "anyhow", "bigdecimal", @@ -1659,7 +1659,7 @@ dependencies = [ [[package]] name = "chainflip-engine" -version = "1.7.0" +version = "1.8.0" dependencies = [ "anyhow", "async-broadcast", @@ -1796,7 +1796,7 @@ dependencies = [ [[package]] name = "chainflip-lp-api" -version = "1.7.0" +version = "1.8.0" dependencies = [ "anyhow", "cf-primitives", @@ -1823,7 +1823,7 @@ dependencies = [ [[package]] name = "chainflip-node" -version = "1.7.0" +version = "1.8.0" dependencies = [ "cf-chains", "cf-primitives", @@ -3213,7 +3213,7 @@ checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" [[package]] name = "engine-proc-macros" -version = "1.7.0" +version = "1.8.0" dependencies = [ "engine-upgrade-utils", "proc-macro2", @@ -3223,7 +3223,7 @@ dependencies = [ [[package]] name = "engine-runner" -version = "1.7.0" +version = "1.8.0" dependencies = [ "anyhow", "assert_cmd", @@ -13040,7 +13040,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "state-chain-runtime" -version = "1.7.0" +version = "1.8.0" dependencies = [ "bitvec", "cf-amm", diff --git a/api/bin/chainflip-broker-api/Cargo.toml b/api/bin/chainflip-broker-api/Cargo.toml index 4ab0d5ec54..6778ad49bd 100644 --- a/api/bin/chainflip-broker-api/Cargo.toml +++ b/api/bin/chainflip-broker-api/Cargo.toml @@ -1,7 +1,7 @@ [package] authors = ["Chainflip team "] name = "chainflip-broker-api" -version = "1.7.0" +version = "1.8.0" edition = "2021" [package.metadata.deb] diff --git a/api/bin/chainflip-cli/Cargo.toml b/api/bin/chainflip-cli/Cargo.toml index c2c8c428a9..9af9a73682 100644 --- a/api/bin/chainflip-cli/Cargo.toml +++ b/api/bin/chainflip-cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Chainflip team "] edition = "2021" build = "build.rs" name = "chainflip-cli" -version = "1.7.0" +version = "1.8.0" [lints] workspace = true diff --git a/api/bin/chainflip-lp-api/Cargo.toml b/api/bin/chainflip-lp-api/Cargo.toml index 62ef1e1e23..a35ca3f8bf 100644 --- a/api/bin/chainflip-lp-api/Cargo.toml +++ b/api/bin/chainflip-lp-api/Cargo.toml @@ -1,7 +1,7 @@ [package] authors = ["Chainflip team "] name = "chainflip-lp-api" -version = "1.7.0" +version = "1.8.0" edition = "2021" [package.metadata.deb] diff --git a/api/lib/Cargo.toml b/api/lib/Cargo.toml index 9bd10e457b..5239ee5ed2 100644 --- a/api/lib/Cargo.toml +++ b/api/lib/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "chainflip-api" -version = "1.7.0" +version = "1.8.0" edition = "2021" [lints] diff --git a/engine-dylib/Cargo.toml b/engine-dylib/Cargo.toml index 6116eb0123..7d4212447d 100644 --- a/engine-dylib/Cargo.toml +++ b/engine-dylib/Cargo.toml @@ -3,11 +3,11 @@ authors = ["Chainflip team "] build = "build.rs" edition = "2021" name = "cf-engine-dylib" -version = "1.7.0" +version = "1.8.0" [lib] crate-type = ["cdylib"] -name = "chainflip_engine_v1_7_0" +name = "chainflip_engine_v1_8_0" path = "src/lib.rs" [dependencies] diff --git a/engine-proc-macros/Cargo.toml b/engine-proc-macros/Cargo.toml index 9c3fc510b6..8b4e1f011b 100644 --- a/engine-proc-macros/Cargo.toml +++ b/engine-proc-macros/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" name = "engine-proc-macros" # The version here is the version that will be used for the generated code, and therefore will be the # suffix of the generated engine entrypoint. TODO: Fix this. -version = "1.7.0" +version = "1.8.0" [lib] proc-macro = true diff --git a/engine-runner-bin/Cargo.toml b/engine-runner-bin/Cargo.toml index 63061b2627..e3080252c6 100644 --- a/engine-runner-bin/Cargo.toml +++ b/engine-runner-bin/Cargo.toml @@ -2,7 +2,7 @@ name = "engine-runner" description = "The central runner for the chainflip engine, it requires two shared library versions to run." # NB: When updating this version, you must update the debian assets appropriately too. -version = "1.7.0" +version = "1.8.0" authors = ["Chainflip team "] build = "build.rs" edition = "2021" @@ -22,19 +22,19 @@ assets = [ # to specify this. We do this in the `chainflip-engine.service` files, so the user does not need to set it # manually. [ - "target/release/libchainflip_engine_v1_7_0.so", + "target/release/libchainflip_engine_v1_8_0.so", # This is the path where the engine dylib is searched for on linux. # As set in the build.rs file. - "usr/lib/chainflip-engine/libchainflip_engine_v1_7_0.so", + "usr/lib/chainflip-engine/libchainflip_engine_v1_8_0.so", "755", ], # The old version gets put into target/release by the package github actions workflow. # It downloads the correct version from the releases page. [ - "target/release/libchainflip_engine_v1_6_8.so", + "target/release/libchainflip_engine_v1_7_0.so", # This is the path where the engine dylib is searched for on linux. # As set in the build.rs file. - "usr/lib/chainflip-engine/libchainflip_engine_v1_6_8.so", + "usr/lib/chainflip-engine/libchainflip_engine_v1_7_0.so", "755", ], ] diff --git a/engine-runner-bin/src/main.rs b/engine-runner-bin/src/main.rs index 1e31f52008..5b0fb96ad5 100644 --- a/engine-runner-bin/src/main.rs +++ b/engine-runner-bin/src/main.rs @@ -2,7 +2,7 @@ use engine_upgrade_utils::{CStrArray, NEW_VERSION, OLD_VERSION}; // Declare the entrypoints into each version of the engine mod old { - #[engine_proc_macros::link_engine_library_version("1.6.8")] + #[engine_proc_macros::link_engine_library_version("1.7.0")] extern "C" { pub fn cfe_entrypoint( c_args: engine_upgrade_utils::CStrArray, @@ -12,7 +12,7 @@ mod old { } mod new { - #[engine_proc_macros::link_engine_library_version("1.7.0")] + #[engine_proc_macros::link_engine_library_version("1.8.0")] extern "C" { fn cfe_entrypoint( c_args: engine_upgrade_utils::CStrArray, diff --git a/engine-upgrade-utils/src/lib.rs b/engine-upgrade-utils/src/lib.rs index 7bbb1ccb97..0587a49d6a 100644 --- a/engine-upgrade-utils/src/lib.rs +++ b/engine-upgrade-utils/src/lib.rs @@ -10,8 +10,8 @@ pub mod build_helpers; // rest of the places the version needs changing on build using the build scripts in each of the // relevant crates. // Should also check that the compatibility function below `args_compatible_with_old` is correct. -pub const OLD_VERSION: &str = "1.6.8"; -pub const NEW_VERSION: &str = "1.7.0"; +pub const OLD_VERSION: &str = "1.7.0"; +pub const NEW_VERSION: &str = "1.8.0"; pub const ENGINE_LIB_PREFIX: &str = "chainflip_engine_v"; pub const ENGINE_ENTRYPOINT_PREFIX: &str = "cfe_entrypoint_v"; diff --git a/engine/Cargo.toml b/engine/Cargo.toml index a625a955ba..9be3fd7bed 100644 --- a/engine/Cargo.toml +++ b/engine/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Chainflip team "] build = "build.rs" edition = "2021" name = "chainflip-engine" -version = "1.7.0" +version = "1.8.0" [lib] crate-type = ["lib"] diff --git a/state-chain/cf-integration-tests/src/lib.rs b/state-chain/cf-integration-tests/src/lib.rs index 0f5c09ffa1..0c456ad8d5 100644 --- a/state-chain/cf-integration-tests/src/lib.rs +++ b/state-chain/cf-integration-tests/src/lib.rs @@ -11,7 +11,6 @@ mod authorities; mod funding; mod genesis; mod governance; -mod migrations; mod new_epoch; mod solana; mod swapping; diff --git a/state-chain/cf-integration-tests/src/migrations/serialize_solana_broadcast.rs b/state-chain/cf-integration-tests/src/migrations/serialize_solana_broadcast.rs deleted file mode 100644 index 377e52446d..0000000000 --- a/state-chain/cf-integration-tests/src/migrations/serialize_solana_broadcast.rs +++ /dev/null @@ -1,154 +0,0 @@ -use cf_chains::sol::{ - sol_tx_core::{CompiledInstruction, MessageHeader}, - SolMessage, SolPubkey, SolSignature, -}; - -use cf_chains::sol::{SolHash, SolanaTransactionData}; -use genesis::with_test_defaults; -use sp_runtime::AccountId32; - -use frame_support::traits::UncheckedOnRuntimeUpgrade; -use pallet_cf_broadcast::BroadcastData; -use state_chain_runtime::{ - migrations::serialize_solana_broadcast::{self, old, SerializeSolanaBroadcastMigration}, - SolanaInstance, -}; - -use crate::*; - -use cf_chains::sol::SolTransaction; - -// Test data pulled from `state-chain/chains/src/sol/sol_tx_core.rs` -#[test] -fn test_migration() { - with_test_defaults().build().execute_with(|| { - let tx: SolTransaction = SolTransaction { - signatures: vec![ - SolSignature(hex_literal::hex!( - "d1144b223b6b600de4b2d96bdceb03573a3e9781953e4c668c57e505f017859d96543243b4d904dc2f02f2f5ab5db7ba4551c7e015e64078add4674ac2e7460c" - )), - ], - message: SolMessage { - header: MessageHeader { - num_required_signatures: 1, - num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 8, - }, - account_keys: vec![ - SolPubkey(hex_literal::hex!( - "2e8944a76efbece296221e736627f4528a947578263a1172a9786410702d2ef2" - )), - SolPubkey(hex_literal::hex!( - "22020a74fd97df45db96d2bbf4e485ccbec56945155ff8f668856be26c9de4a9" - )), - SolPubkey(hex_literal::hex!( - "79c03bceb9ddea819e956b2b332e87fbbf49fc8968df78488e88cfaa366f3036" - )), - SolPubkey(hex_literal::hex!( - "8cd28baa84f2067bbdf24513c2d44e44bf408f2e6da6e60762e3faa4a62a0adb" - )), - SolPubkey(hex_literal::hex!( - "8d9871ed5fb2ee05765af23b7cabcc0d6b08ed370bb9f616a0d4dea40a25f870" - )), - SolPubkey(hex_literal::hex!( - "b5b9d633289c8fd72fb05f33349bf4cc44e82add5d865311ae346d7c9a67b7dd" - )), - SolPubkey(hex_literal::hex!( - "f53a2f4350451db5595a75e231519bc2758798f72550e57487722e7cbe954dbc" - )), - SolPubkey(hex_literal::hex!( - "0000000000000000000000000000000000000000000000000000000000000000" - )), - SolPubkey(hex_literal::hex!( - "0306466fe5211732ffecadba72c39be7bc8ce5bbc5f7126b2c439b3a40000000" - )), - SolPubkey(hex_literal::hex!( - "06a7d517192c568ee08a845f73d29788cf035c3145b21ab344d8062ea9400000" - )), - SolPubkey(hex_literal::hex!( - "06ddf6e1d765a193d9cbe146ceeb79ac1cb485ed5f5b37913a8cf5857eff00a9" - )), - SolPubkey(hex_literal::hex!( - "0fb9ba52b1f09445f1e3a7508d59f0797923acf744fbe2da303fb06da859ee87" - )), - SolPubkey(hex_literal::hex!( - "72b5d2051d300b10b74314b7e25ace9998ca66eb2c7fbc10ef130dd67028293c" - )), - SolPubkey(hex_literal::hex!( - "a140fd3d05766f0087d57bf99df05731e894392ffcc8e8d7e960ba73c09824aa" - )), - SolPubkey(hex_literal::hex!( - "a1e031c8bc9bec3b610cf7b36eb3bf3aa40237c9e5be2c7893878578439eb00b" - )), - ], - recent_blockhash: SolHash(hex_literal::hex!( - "f7f02ac4729abaa97c01aa6526ba909c3bcb16c7f47c7e13dfdc5a1b15f647b4" - )) - .into(), - instructions: vec![ - CompiledInstruction { - program_id_index: 7, - accounts: hex_literal::hex!("030900").to_vec(), - data: hex_literal::hex!("04000000").to_vec(), - }, - CompiledInstruction { - program_id_index: 8, - accounts: vec![], - data: hex_literal::hex!("030a00000000000000").to_vec(), - }, - CompiledInstruction { - program_id_index: 8, - accounts: vec![], - data: hex_literal::hex!("0233620100").to_vec(), - }, - CompiledInstruction { - program_id_index: 12, - accounts: hex_literal::hex!("0e00040507").to_vec(), - data: hex_literal::hex!("8e24658f6c59298c080000000100000000000000ff").to_vec(), - }, - CompiledInstruction { - program_id_index: 12, - accounts: hex_literal::hex!("0e000d01020b0a0607").to_vec(), - data: hex_literal::hex!("494710642cb0c646080000000200000000000000ff06").to_vec(), - }, - ], - }, - }; - - old::AwaitingBroadcast::insert( - 22, - old::SolanaBroadcastData { - broadcast_id: 22, - transaction_payload: tx, - threshold_signature_payload: SolMessage::default(), - transaction_out_id: SolSignature::default(), - nominee: Some(AccountId32::from([11; 32])), - }, - ); - - let state = serialize_solana_broadcast::pre_upgrade_check().unwrap(); - SerializeSolanaBroadcastMigration::on_runtime_upgrade(); - serialize_solana_broadcast::post_upgrade_check(state).unwrap(); - - let expected_serialized_tx = hex_literal::hex!("01d1144b223b6b600de4b2d96bdceb03573a3e9781953e4c668c57e505f017859d96543243b4d904dc2f02f2f5ab5db7ba4551c7e015e64078add4674ac2e7460c0100080f2e8944a76efbece296221e736627f4528a947578263a1172a9786410702d2ef222020a74fd97df45db96d2bbf4e485ccbec56945155ff8f668856be26c9de4a979c03bceb9ddea819e956b2b332e87fbbf49fc8968df78488e88cfaa366f30368cd28baa84f2067bbdf24513c2d44e44bf408f2e6da6e60762e3faa4a62a0adb8d9871ed5fb2ee05765af23b7cabcc0d6b08ed370bb9f616a0d4dea40a25f870b5b9d633289c8fd72fb05f33349bf4cc44e82add5d865311ae346d7c9a67b7ddf53a2f4350451db5595a75e231519bc2758798f72550e57487722e7cbe954dbc00000000000000000000000000000000000000000000000000000000000000000306466fe5211732ffecadba72c39be7bc8ce5bbc5f7126b2c439b3a4000000006a7d517192c568ee08a845f73d29788cf035c3145b21ab344d8062ea940000006ddf6e1d765a193d9cbe146ceeb79ac1cb485ed5f5b37913a8cf5857eff00a90fb9ba52b1f09445f1e3a7508d59f0797923acf744fbe2da303fb06da859ee8772b5d2051d300b10b74314b7e25ace9998ca66eb2c7fbc10ef130dd67028293ca140fd3d05766f0087d57bf99df05731e894392ffcc8e8d7e960ba73c09824aaa1e031c8bc9bec3b610cf7b36eb3bf3aa40237c9e5be2c7893878578439eb00bf7f02ac4729abaa97c01aa6526ba909c3bcb16c7f47c7e13dfdc5a1b15f647b40507030309000404000000080009030a0000000000000008000502336201000c050e00040507158e24658f6c59298c080000000100000000000000ff0c090e000d01020b0a060716494710642cb0c646080000000200000000000000ff06").to_vec(); - - let mut broadcast_iter = - pallet_cf_broadcast::AwaitingBroadcast::::iter(); - let (first_broadcast_id, first_broadcast_data) = broadcast_iter.next().unwrap(); - assert!(broadcast_iter.next().is_none()); - - assert_eq!(first_broadcast_id, 22); - assert_eq!( - first_broadcast_data, - BroadcastData { - broadcast_id: 22, - transaction_payload: SolanaTransactionData { - serialized_transaction: expected_serialized_tx, - }, - threshold_signature_payload: SolMessage::default(), - transaction_out_id: SolSignature::default(), - nominee: Some(AccountId32::from([11; 32])), - } - ); - }); -} diff --git a/state-chain/node/Cargo.toml b/state-chain/node/Cargo.toml index 816cfb682d..7e6fca33d0 100644 --- a/state-chain/node/Cargo.toml +++ b/state-chain/node/Cargo.toml @@ -8,7 +8,7 @@ license = "" name = "chainflip-node" publish = false repository = "https://github.com/chainflip-io/chainflip-backend" -version = "1.7.0" +version = "1.8.0" [[bin]] name = "chainflip-node" diff --git a/state-chain/pallets/cf-broadcast/src/migrations.rs b/state-chain/pallets/cf-broadcast/src/migrations.rs index 405df46959..180e17e3c7 100644 --- a/state-chain/pallets/cf-broadcast/src/migrations.rs +++ b/state-chain/pallets/cf-broadcast/src/migrations.rs @@ -1,27 +1,4 @@ use crate::Pallet; use cf_runtime_utilities::PlaceholderMigration; -use frame_support::migrations::VersionedMigration; -mod initialize_broadcast_timeout_storage; -mod migrate_timeouts; -pub mod remove_aborted_broadcasts; - -pub type PalletMigration = ( - VersionedMigration< - 6, - 7, - initialize_broadcast_timeout_storage::Migration, - Pallet, - ::DbWeight, - >, - VersionedMigration< - 7, - 8, - migrate_timeouts::Migration, - Pallet, - ::DbWeight, - >, - PlaceholderMigration<8, Pallet>, - // Migration 8->9 is SerializeSolanaBroadcastMigration in runtime lib. - // Migration 9->10 is SolanaEgressSuccessWitnessMigration in runtime lib. -); +pub type PalletMigration = (PlaceholderMigration<10, Pallet>,); diff --git a/state-chain/pallets/cf-broadcast/src/migrations/initialize_broadcast_timeout_storage.rs b/state-chain/pallets/cf-broadcast/src/migrations/initialize_broadcast_timeout_storage.rs deleted file mode 100644 index a7ae59716a..0000000000 --- a/state-chain/pallets/cf-broadcast/src/migrations/initialize_broadcast_timeout_storage.rs +++ /dev/null @@ -1,92 +0,0 @@ -use frame_support::{traits::UncheckedOnRuntimeUpgrade, weights::Weight}; -use old::maybe_get_timeout_for_type; - -use crate::*; - -// Constants copied from `runtime/src/constants.rs`, -// in order to use same timeout values as given in `node/src/chain_spec.rs` -pub const MILLISECONDS_PER_BLOCK_ETHEREUM: u32 = 14 * 1000; -pub const MILLISECONDS_PER_BLOCK_POLKADOT: u32 = 6 * 1000; -pub const MILLISECONDS_PER_BLOCK_ARBITRUM: u32 = 250; -pub const MILLISECONDS_PER_BLOCK_SOLANA: u32 = 400; - -pub const BLOCKS_PER_MINUTE_ETHEREUM: u32 = 60000 / MILLISECONDS_PER_BLOCK_ETHEREUM; -pub const BLOCKS_PER_MINUTE_POLKADOT: u32 = 60000 / MILLISECONDS_PER_BLOCK_POLKADOT; -pub const BLOCKS_PER_MINUTE_ARBITRUM: u32 = 60000 / MILLISECONDS_PER_BLOCK_ARBITRUM; -pub const BLOCKS_PER_MINUTE_SOLANA: u32 = 60000 / MILLISECONDS_PER_BLOCK_SOLANA; - -mod old { - use cf_primitives::BlockNumber; - - use super::*; - - // Same timeout values as previously defined in `#[pallet::constant]`s - // and same as currently used in `node/src/chain_spec.rs` - pub const ETHEREUM_BROADCAST_TIMEOUT: BlockNumber = 5 * BLOCKS_PER_MINUTE_ETHEREUM; // note, due to rounding, this is effectively ~4.7 min - pub const POLKADOT_BROADCAST_TIMEOUT: BlockNumber = 4 * BLOCKS_PER_MINUTE_POLKADOT; - pub const BITCOIN_BROADCAST_TIMEOUT: BlockNumber = 9; - pub const ARBITRUM_BROADCAST_TIMEOUT: BlockNumber = 2 * BLOCKS_PER_MINUTE_ARBITRUM; - pub const SOLANA_BROADCAST_TIMEOUT: BlockNumber = 4 * BLOCKS_PER_MINUTE_SOLANA; - - // For testing purposes we also have to set the timeout for the mock configuration, - // following `BROADCAST_EXPIRY_BLOCKS` in `mock.rs` - pub const MOCK_ETHEREUM_BROADCAST_TIMEOUT: BlockNumber = 4; - - pub fn maybe_get_timeout_for_type, I: 'static>( - ) -> Option> { - // Choose timeout value based on statically defined chain name. - // It should be the same as the previously used constants. - let timeout: ChainBlockNumberFor = match T::TargetChain::NAME { - "Ethereum" => old::ETHEREUM_BROADCAST_TIMEOUT, - "Polkadot" => old::POLKADOT_BROADCAST_TIMEOUT, - "Bitcoin" => old::BITCOIN_BROADCAST_TIMEOUT, - "Arbitrum" => old::ARBITRUM_BROADCAST_TIMEOUT, - "Solana" => old::SOLANA_BROADCAST_TIMEOUT, - "MockEthereum" => old::MOCK_ETHEREUM_BROADCAST_TIMEOUT, - _ => return None, // skip migration for unexpected chain name - } - .into(); - Some(timeout) - } -} - -pub struct Migration, I: 'static>(PhantomData<(T, I)>); - -impl, I: 'static> UncheckedOnRuntimeUpgrade for Migration { - fn on_runtime_upgrade() -> Weight { - if let Some(timeout) = maybe_get_timeout_for_type::() { - BroadcastTimeout::::set(timeout); - } - - Weight::zero() - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), DispatchError> { - assert_eq!(BroadcastTimeout::::get(), maybe_get_timeout_for_type::().unwrap()); - Ok(()) - } -} - -#[cfg(test)] -mod migration_tests { - - #[test] - fn test_migration() { - use super::*; - use crate::mock::*; - - new_test_ext().execute_with(|| { - // Perform runtime migration. - super::Migration::::on_runtime_upgrade(); - #[cfg(feature = "try-runtime")] - super::Migration::::post_upgrade(vec![]).unwrap(); - - // Storage is initialized correctly - assert_eq!( - crate::BroadcastTimeout::::get(), - maybe_get_timeout_for_type::().unwrap() - ); - }); - } -} diff --git a/state-chain/pallets/cf-broadcast/src/migrations/migrate_timeouts.rs b/state-chain/pallets/cf-broadcast/src/migrations/migrate_timeouts.rs deleted file mode 100644 index 0b91ffcd96..0000000000 --- a/state-chain/pallets/cf-broadcast/src/migrations/migrate_timeouts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use frame_support::{ - pallet_prelude::ValueQuery, traits::UncheckedOnRuntimeUpgrade, weights::Weight, -}; - -use crate::*; - -mod old { - use super::*; - - #[frame_support::storage_alias] - pub type Timeouts, I: 'static> = StorageMap< - Pallet, - Twox64Concat, - BlockNumberFor, - BTreeSet<(BroadcastId, ::ValidatorId)>, - ValueQuery, - >; -} - -pub struct Migration, I: 'static>(PhantomData<(T, I)>); - -impl, I: 'static> UncheckedOnRuntimeUpgrade for Migration { - fn on_runtime_upgrade() -> Weight { - // Instead of trying to translate the previous timeout into external chain blocks, - // we simply reset the remaining timeout duration to the new `BroadcastTimeout` value. - let new_timeout = T::ChainTracking::get_block_height() + BroadcastTimeout::::get(); - for (_, timeouts) in old::Timeouts::::drain() { - for (broadcast_id, nominee) in timeouts { - Timeouts::::append((new_timeout, broadcast_id, nominee)) - } - } - Weight::zero() - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, DispatchError> { - let mut timeouts = Vec::new(); - for (_, old_broadcast_ids) in old::Timeouts::::iter() { - for (old_broadcast_id, old_nominee) in old_broadcast_ids { - timeouts.push((old_broadcast_id, old_nominee)) - } - } - let data: MigrationData = MigrationData { - timeouts, - target_chainblock: T::ChainTracking::get_block_height() + - BroadcastTimeout::::get(), - }; - Ok(data.encode()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), DispatchError> { - let data = MigrationData::::decode(&mut &state[..]).unwrap(); - let new_timeouts = Timeouts::::get(); - - // We don't know whether the timeout is set to exactly the `new_timeout` value or a higher - // one, because between getting the current block height in `pre_upgrade` and in - // `on_runtime_upgrade` some time might have passed. - for (broadcast_id, nominee) in data.timeouts { - let (new_timeout, _, _) = new_timeouts - .iter() - .find(|(_, id, nom)| (id, nom) == (&broadcast_id, &nominee)) - .unwrap(); - assert!(*new_timeout >= data.target_chainblock); - } - - // Make sure that the old map is empty - assert!(old::Timeouts::::iter().next().is_none()); - - Ok(()) - } -} - -#[derive(Encode, Decode)] -pub struct MigrationData, I: 'static> { - pub timeouts: Vec<(BroadcastId, ::ValidatorId)>, - pub target_chainblock: ChainBlockNumberFor, -} - -#[cfg(test)] -mod migration_tests { - #[test] - fn test_migration() { - use super::*; - use crate::mock::*; - - new_test_ext().execute_with(|| { - let target = frame_system::Pallet::::block_number() + - BroadcastTimeout::::get(); - - // Create a few timeouts to migrate - old::Timeouts::::set(target, BTreeSet::from([(0, 100), (1, 101), (3, 102)])); - old::Timeouts::::set(target + 1, BTreeSet::from([(4, 103), (5, 104)])); - - #[cfg(feature = "try-runtime")] - let state = super::Migration::::pre_upgrade().unwrap(); - - // increment block height - let new_height = >::ChainTracking::get_block_height() + 20; - >::ChainTracking::set_block_height(new_height); - - // Perform runtime migration. - super::Migration::::on_runtime_upgrade(); - - #[cfg(feature = "try-runtime")] - super::Migration::::post_upgrade(state).unwrap(); - }); - } -} diff --git a/state-chain/pallets/cf-broadcast/src/migrations/remove_aborted_broadcasts.rs b/state-chain/pallets/cf-broadcast/src/migrations/remove_aborted_broadcasts.rs deleted file mode 100644 index 3161b13277..0000000000 --- a/state-chain/pallets/cf-broadcast/src/migrations/remove_aborted_broadcasts.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::*; - -// Highest stale aborted broadcasts as of 3/10/2024: -// Mainnet -pub const ETHEREUM_MAX_ABORTED_BROADCAST_BERGHAIN: BroadcastId = 11592; -pub const ARBITRUM_MAX_ABORTED_BROADCAST_BERGHAIN: BroadcastId = 426; -// Perseverance testnet -pub const ETHEREUM_MAX_ABORTED_BROADCAST_PERSEVERANCE: BroadcastId = 1609; -pub const ARBITRUM_MAX_ABORTED_BROADCAST_PERSEVERANCE: BroadcastId = 665; -pub const POLKADOT_MAX_ABORTED_BROADCAST_PERSEVERANCE: BroadcastId = 634; - -pub fn remove_stale_and_all_older, I: 'static>(latest_stale_broadcast: BroadcastId) { - AbortedBroadcasts::::mutate(|aborted| { - aborted.retain(|id| id > &latest_stale_broadcast); - }); -} - -#[cfg(feature = "try-runtime")] -pub fn assert_removed, I: 'static>(latest_stale_broadcast: BroadcastId) { - let aborted_broadcasts = AbortedBroadcasts::::get(); - if let Some(first) = aborted_broadcasts.first() { - assert!(*first > latest_stale_broadcast, "Aborted broadcast {first} was not removed"); - } -} diff --git a/state-chain/pallets/cf-elections/src/lib.rs b/state-chain/pallets/cf-elections/src/lib.rs index bb8560dd33..a294226fff 100644 --- a/state-chain/pallets/cf-elections/src/lib.rs +++ b/state-chain/pallets/cf-elections/src/lib.rs @@ -109,7 +109,6 @@ pub mod electoral_system; pub mod electoral_systems; -pub mod migrations; mod mock; mod tests; pub mod vote_storage; diff --git a/state-chain/pallets/cf-elections/src/migrations.rs b/state-chain/pallets/cf-elections/src/migrations.rs deleted file mode 100644 index 22c9b6fdc3..0000000000 --- a/state-chain/pallets/cf-elections/src/migrations.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod change_old; diff --git a/state-chain/pallets/cf-elections/src/migrations/change_old.rs b/state-chain/pallets/cf-elections/src/migrations/change_old.rs deleted file mode 100644 index 7730c9040b..0000000000 --- a/state-chain/pallets/cf-elections/src/migrations/change_old.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::{ - electoral_system::{ - AuthorityVoteOf, ConsensusVotes, ElectionIdentifierOf, ElectionReadAccess, - ElectionWriteAccess, ElectoralSystem, ElectoralWriteAccess, VotePropertiesOf, - }, - electoral_systems::monotonic_change::OnChangeHook, - vote_storage::{self, VoteStorage}, - CorruptStorageError, -}; -use cf_utilities::success_threshold_from_share_count; -use frame_support::{ - pallet_prelude::{MaybeSerializeDeserialize, Member}, - Parameter, -}; -use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; - -/// This electoral system detects if a value changes. The SC can request that it detects if a -/// particular value, the instance of which is specified by an identifier, has changed from some -/// specified value. Once a change is detected and gains consensus the hook is called and the system -/// will stop trying to detect changes for that identifier. -/// -/// `Settings` can be used by governance to provide information to authorities about exactly how -/// they should `vote`. -/// -/// Authorities only need to vote if their observed value is different than the one specified in the -/// `ElectionProperties`. -pub struct Change { - _phantom: core::marker::PhantomData<(Identifier, Value, Settings, Hook, ValidatorId)>, -} - -impl< - Identifier: Member + Parameter + Ord, - Value: Member + Parameter + Eq + Ord, - Settings: Member + Parameter + MaybeSerializeDeserialize + Eq, - Hook: OnChangeHook + 'static, - ValidatorId: Member + Parameter + Ord + MaybeSerializeDeserialize, - > Change -{ - pub fn watch_for_change>( - electoral_access: &mut ElectoralAccess, - identifier: Identifier, - previous_value: Value, - ) -> Result<(), CorruptStorageError> { - electoral_access.new_election((), (identifier, previous_value), ())?; - Ok(()) - } -} -impl< - Identifier: Member + Parameter + Ord, - Value: Member + Parameter + Eq + Ord, - Settings: Member + Parameter + MaybeSerializeDeserialize + Eq, - Hook: OnChangeHook + 'static, - ValidatorId: Member + Parameter + Ord + MaybeSerializeDeserialize, - > ElectoralSystem for Change -{ - type ValidatorId = ValidatorId; - type ElectoralUnsynchronisedState = (); - type ElectoralUnsynchronisedStateMapKey = (); - type ElectoralUnsynchronisedStateMapValue = (); - - type ElectoralUnsynchronisedSettings = (); - type ElectoralSettings = Settings; - type ElectionIdentifierExtra = (); - type ElectionProperties = (Identifier, Value); - type ElectionState = (); - type Vote = vote_storage::bitmap::Bitmap; - type Consensus = Value; - type OnFinalizeContext = (); - type OnFinalizeReturn = (); - - fn generate_vote_properties( - _election_identifier: ElectionIdentifierOf, - _previous_vote: Option<(VotePropertiesOf, AuthorityVoteOf)>, - _vote: &::PartialVote, - ) -> Result, CorruptStorageError> { - Ok(()) - } - - fn is_vote_desired>( - _election_identifier: ElectionIdentifierOf, - _election_access: &ElectionAccess, - _current_vote: Option<(VotePropertiesOf, AuthorityVoteOf)>, - ) -> Result { - Ok(true) - } - - fn is_vote_needed( - (_, _, current_vote): ( - VotePropertiesOf, - ::PartialVote, - AuthorityVoteOf, - ), - (_, proposed_vote): ( - ::PartialVote, - ::Vote, - ), - ) -> bool { - match current_vote { - AuthorityVoteOf::::Vote(current_vote) => current_vote != proposed_vote, - // Could argue for either true or false. If the `PartialVote` is never reconstructed and - // becomes invalid, then this function will be bypassed and the vote will be considered - // needed. So false is safe, and true will likely result in unneeded voting. - _ => false, - } - } - - fn on_finalize>( - electoral_access: &mut ElectoralAccess, - election_identifiers: Vec>, - _context: &Self::OnFinalizeContext, - ) -> Result { - for election_identifier in election_identifiers { - let mut election_access = electoral_access.election_mut(election_identifier)?; - if let Some(value) = election_access.check_consensus()?.has_consensus() { - let (identifier, previous_value) = election_access.properties()?; - if previous_value != value { - election_access.delete(); - Hook::on_change(identifier, value); - } - } - } - - Ok(()) - } - - fn check_consensus>( - _election_identifier: ElectionIdentifierOf, - _election_access: &ElectionAccess, - _previous_consensus: Option<&Self::Consensus>, - consensus_votes: ConsensusVotes, - ) -> Result, CorruptStorageError> { - let num_authorities = consensus_votes.num_authorities(); - let active_votes = consensus_votes.active_votes(); - let num_active_votes = active_votes.len() as u32; - let success_threshold = success_threshold_from_share_count(num_authorities); - Ok(if num_active_votes >= success_threshold { - let mut counts = BTreeMap::new(); - for vote in active_votes { - counts.entry(vote).and_modify(|count| *count += 1).or_insert(1); - } - counts.iter().find_map(|(vote, count)| { - if *count >= success_threshold { - Some(vote.clone()) - } else { - None - } - }) - } else { - None - }) - } -} diff --git a/state-chain/pallets/cf-ingress-egress/src/migrations.rs b/state-chain/pallets/cf-ingress-egress/src/migrations.rs index 8eea0e3350..663cda034f 100644 --- a/state-chain/pallets/cf-ingress-egress/src/migrations.rs +++ b/state-chain/pallets/cf-ingress-egress/src/migrations.rs @@ -1,15 +1,4 @@ use crate::Pallet; use cf_runtime_utilities::PlaceholderMigration; -use frame_support::migrations::VersionedMigration; -mod add_owner_to_channel_details; -pub type PalletMigration = ( - VersionedMigration< - 15, - 16, - add_owner_to_channel_details::Migration, - Pallet, - ::DbWeight, - >, - PlaceholderMigration<16, Pallet>, -); +pub type PalletMigration = (PlaceholderMigration<16, Pallet>,); diff --git a/state-chain/pallets/cf-ingress-egress/src/migrations/add_owner_to_channel_details.rs b/state-chain/pallets/cf-ingress-egress/src/migrations/add_owner_to_channel_details.rs deleted file mode 100644 index 8dd56dffd8..0000000000 --- a/state-chain/pallets/cf-ingress-egress/src/migrations/add_owner_to_channel_details.rs +++ /dev/null @@ -1,183 +0,0 @@ -use frame_support::{traits::UncheckedOnRuntimeUpgrade, weights::Weight}; - -use crate::*; -mod old { - use super::*; - - #[derive(Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, TypeInfo)] - pub enum ChannelAction { - Swap { - destination_asset: Asset, - destination_address: ForeignChainAddress, - broker_fees: Beneficiaries, - refund_params: Option, - dca_params: Option, - }, - LiquidityProvision { - lp_account: AccountId, - }, - CcmTransfer { - destination_asset: Asset, - destination_address: ForeignChainAddress, - broker_fees: Beneficiaries, - channel_metadata: CcmChannelMetadata, - refund_params: Option, - dca_params: Option, - }, - } - - #[derive(CloneNoBound, RuntimeDebug, PartialEq, Eq, Encode, Decode, TypeInfo)] - #[scale_info(skip_type_params(T, I))] - pub struct DepositChannelDetails, I: 'static> { - pub deposit_channel: DepositChannel, - pub opened_at: TargetChainBlockNumber, - pub expires_at: TargetChainBlockNumber, - pub action: ChannelAction, - pub boost_fee: BasisPoints, - pub boost_status: BoostStatus>, - } - - #[frame_support::storage_alias] - pub type DepositChannelLookup, I: 'static> = StorageMap< - Pallet, - Twox64Concat, - TargetChainAccount, - DepositChannelDetails, - OptionQuery, - >; -} - -pub struct Migration, I: 'static>(PhantomData<(T, I)>); - -impl, I: 'static> UncheckedOnRuntimeUpgrade for Migration { - fn on_runtime_upgrade() -> Weight { - DepositChannelLookup::::translate( - |_account, channel_details: old::DepositChannelDetails| { - let dummy_account = T::AccountId::decode(&mut &[0u8; 32][..]).unwrap(); - let channel_action = match channel_details.action { - old::ChannelAction::LiquidityProvision { lp_account, .. } => - ChannelAction::LiquidityProvision { lp_account, refund_address: None }, - old::ChannelAction::Swap { - destination_asset, - destination_address, - broker_fees, - refund_params, - dca_params, - } => ChannelAction::Swap { - destination_asset, - destination_address, - broker_fees, - refund_params, - dca_params, - }, - old::ChannelAction::CcmTransfer { - destination_asset, - destination_address, - broker_fees, - channel_metadata, - refund_params, - dca_params, - } => ChannelAction::CcmTransfer { - destination_asset, - destination_address, - broker_fees, - channel_metadata, - refund_params, - dca_params, - }, - }; - let new_channel_details = DepositChannelDetails { - owner: dummy_account, - deposit_channel: channel_details.deposit_channel, - opened_at: channel_details.opened_at, - expires_at: channel_details.expires_at, - action: channel_action, - boost_fee: channel_details.boost_fee, - boost_status: channel_details.boost_status, - }; - Some(new_channel_details) - }, - ); - Weight::zero() - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, DispatchError> { - let number_of_old_channels: u32 = - old::DepositChannelLookup::::iter().collect::>().len() as u32; - Ok(number_of_old_channels.encode()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), DispatchError> { - let number_of_old_channels = u32::decode(&mut &state[..]).unwrap(); - let number_of_new_channels = - DepositChannelLookup::::iter().collect::>().len() as u32; - assert_eq!(number_of_old_channels, number_of_new_channels); - Ok(()) - } -} - -#[cfg(test)] -mod migration_tests { - use super::*; - use crate::mock_eth::*; - - #[test] - fn test_migration() { - use cf_chains::evm::DeploymentStatus; - new_test_ext().execute_with(|| { - let channel_id = 1u64; - let address = sp_core::H160([1u8; 20]); - let asset = cf_chains::assets::eth::Asset::Eth; - let deployment_state = DeploymentStatus::Deployed; - let lp_account = 5u64; - let opened_at = 1u64; - let expires_at = 2u64; - let action = ChannelAction::LiquidityProvision { lp_account, refund_address: None }; - let boost_fee = 1; - let boost_status = BoostStatus::NotBoosted; - - old::DepositChannelLookup::::insert( - address, - old::DepositChannelDetails { - deposit_channel: DepositChannel { - asset, - channel_id, - address, - state: deployment_state, - }, - opened_at, - expires_at, - action: old::ChannelAction::LiquidityProvision { lp_account }, - boost_fee, - boost_status, - }, - ); - assert_eq!(old::DepositChannelLookup::::iter().count(), 1); - - #[cfg(feature = "try-runtime")] - let state = super::Migration::::pre_upgrade().unwrap(); - super::Migration::::on_runtime_upgrade(); - - #[cfg(feature = "try-runtime")] - super::Migration::::post_upgrade(state).unwrap(); - - assert_eq!(DepositChannelLookup::::iter().count(), 1); - - let migrated_deposit_channel = DepositChannelLookup::::get(address) - .expect("to have a channel in storage"); - - assert_eq!(migrated_deposit_channel.owner, 0); - assert_eq!(old::DepositChannelLookup::::iter().count(), 0); - - assert_eq!(migrated_deposit_channel.deposit_channel.asset, asset); - assert_eq!(migrated_deposit_channel.deposit_channel.channel_id, channel_id); - assert_eq!(migrated_deposit_channel.deposit_channel.address, address); - assert_eq!(migrated_deposit_channel.deposit_channel.state, deployment_state); - assert_eq!(migrated_deposit_channel.opened_at, opened_at); - assert_eq!(migrated_deposit_channel.expires_at, expires_at); - assert_eq!(migrated_deposit_channel.action, action); - }); - } -} diff --git a/state-chain/pallets/cf-validator/src/migrations.rs b/state-chain/pallets/cf-validator/src/migrations.rs index 57ec3b02ad..63df77caf2 100644 --- a/state-chain/pallets/cf-validator/src/migrations.rs +++ b/state-chain/pallets/cf-validator/src/migrations.rs @@ -1,16 +1,4 @@ use crate::Pallet; use cf_runtime_utilities::PlaceholderMigration; -use frame_support::migrations::VersionedMigration; -mod delete_old_epoch_data; - -pub type PalletMigration = ( - VersionedMigration< - 3, - 4, - delete_old_epoch_data::Migration, - Pallet, - ::DbWeight, - >, - PlaceholderMigration<4, Pallet>, -); +pub type PalletMigration = (PlaceholderMigration<4, Pallet>,); diff --git a/state-chain/pallets/cf-validator/src/migrations/delete_old_epoch_data.rs b/state-chain/pallets/cf-validator/src/migrations/delete_old_epoch_data.rs deleted file mode 100644 index c9dac2f798..0000000000 --- a/state-chain/pallets/cf-validator/src/migrations/delete_old_epoch_data.rs +++ /dev/null @@ -1,118 +0,0 @@ -use frame_support::{traits::UncheckedOnRuntimeUpgrade, weights::Weight}; - -use crate::*; - -pub struct Migration(PhantomData); - -impl UncheckedOnRuntimeUpgrade for Migration { - fn on_runtime_upgrade() -> Weight { - pub fn delete_all_old( - iter: Iter, - remove: Remove, - relevant: Relevant, - ) where - Iter: Fn() -> IterRes, - IterRes: Iterator, - Relevant: Fn(Item) -> Option, - Remove: Fn(Index), - { - let mut old_indices = Vec::new(); - for item in iter() { - if let Some(index) = relevant(item) { - old_indices.push(index); - } - } - for index in old_indices { - remove(index); - } - } - - let epoch = LastExpiredEpoch::::get(); - - delete_all_old( - HistoricalAuthorities::::iter, - HistoricalAuthorities::::remove, - |(e, _)| if e <= epoch { Some(e) } else { None }, - ); - delete_all_old(HistoricalBonds::::iter, HistoricalBonds::::remove, |(e, _)| { - if e <= epoch { - Some(e) - } else { - None - } - }); - delete_all_old( - AuthorityIndex::::iter, - |(e1, e2)| AuthorityIndex::::remove(e1, e2), - |(e, e2, _)| if e <= epoch { Some((e, e2)) } else { None }, - ); - - Weight::zero() - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, DispatchError> { - Ok(vec![]) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), DispatchError> { - let epoch = LastExpiredEpoch::::get(); - - assert!(!HistoricalAuthorities::::iter().any(|(e, _)| e <= epoch)); - assert!(!HistoricalBonds::::iter().any(|(e, _)| e <= epoch)); - assert!(!AuthorityIndex::::iter().any(|(e, _, _)| e <= epoch)); - - Ok(()) - } -} - -#[cfg(test)] -mod migration_tests { - #[test] - fn test_migration() { - use super::*; - use crate::mock::*; - - new_test_ext().execute_with(|| { - let last_expired_epoch = 1000; - LastExpiredEpoch::::set(last_expired_epoch); - - // create some test values - HistoricalAuthorities::::set(last_expired_epoch - 2, vec![1, 2, 3]); - HistoricalAuthorities::::set(last_expired_epoch - 1, vec![4, 5]); - HistoricalAuthorities::::set(last_expired_epoch, vec![6, 7, 8, 9]); - HistoricalAuthorities::::set(last_expired_epoch + 1, vec![10, 11]); - - HistoricalBonds::::set(last_expired_epoch - 2, 100); - HistoricalBonds::::set(last_expired_epoch - 1, 101); - HistoricalBonds::::set(last_expired_epoch, 102); - HistoricalBonds::::set(last_expired_epoch + 1, 103); - - AuthorityIndex::::set(last_expired_epoch - 2, 1, Some(1)); - AuthorityIndex::::set(last_expired_epoch - 2, 2, Some(2)); - AuthorityIndex::::set(last_expired_epoch - 2, 3, Some(3)); - AuthorityIndex::::set(last_expired_epoch - 1, 1, Some(1)); - AuthorityIndex::::set(last_expired_epoch - 1, 2, Some(2)); - AuthorityIndex::::set(last_expired_epoch, 3, Some(1)); - AuthorityIndex::::set(last_expired_epoch, 1, Some(2)); - AuthorityIndex::::set(last_expired_epoch + 1, 2, Some(1)); - AuthorityIndex::::set(last_expired_epoch + 2, 3, Some(2)); - - #[cfg(feature = "try-runtime")] - let state = super::Migration::::pre_upgrade().unwrap(); - - // Perform runtime migration. - super::Migration::::on_runtime_upgrade(); - - #[cfg(feature = "try-runtime")] - super::Migration::::post_upgrade(state).unwrap(); - - // ensure that data which is not expired is kept - assert_eq!(HistoricalAuthorities::::get(last_expired_epoch + 1), vec![10, 11]); - assert_eq!(HistoricalBonds::::get(last_expired_epoch + 1), 103); - assert_eq!(AuthorityIndex::::get(last_expired_epoch + 1, 2), Some(1)); - assert_eq!(AuthorityIndex::::get(last_expired_epoch + 2, 3), Some(2)); - }); - } -} diff --git a/state-chain/runtime/Cargo.toml b/state-chain/runtime/Cargo.toml index 64ee2d7877..b70460b1a6 100644 --- a/state-chain/runtime/Cargo.toml +++ b/state-chain/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "state-chain-runtime" -version = "1.7.0" +version = "1.8.0" authors = ["Chainflip Team "] edition = "2021" homepage = "https://chainflip.io" diff --git a/state-chain/runtime/src/chainflip/solana_elections.rs b/state-chain/runtime/src/chainflip/solana_elections.rs index 04367dc753..19eb845ab5 100644 --- a/state-chain/runtime/src/chainflip/solana_elections.rs +++ b/state-chain/runtime/src/chainflip/solana_elections.rs @@ -55,337 +55,6 @@ pub type SolanaElectoralSystem = Composite< SolanaElectionHooks, >; -pub mod old { - use super::*; - use crate::Weight; - use bitvec::prelude::*; - use cf_primitives::EpochIndex; - use frame_support::{ - pallet_prelude::{OptionQuery, StorageDoubleMap}, - traits::{OnRuntimeUpgrade, StorageInstance}, - Identity, Twox64Concat, - }; - use pallet_cf_elections::{ - electoral_system::ElectionIdentifierOf, vote_storage::VoteStorage, ConsensusHistory, - SharedDataHash, UniqueMonotonicIdentifier, - }; - - pub type SolanaNonceTrackingOld = pallet_cf_elections::migrations::change_old::Change< - SolAddress, - SolHash, - (), - SolanaNonceTrackingHook, - ::ValidatorId, - >; - pub type SolanaElectoralSystem = Composite< - ( - SolanaBlockHeightTracking, - SolanaFeeTracking, - SolanaIngressTracking, - SolanaNonceTrackingOld, - SolanaEgressWitnessing, - SolanaLiveness, - ), - ::ValidatorId, - SolanaElectionHooksOld, - >; - pub struct SolanaElectionHooksOld; - impl - Hooks< - SolanaBlockHeightTracking, - SolanaFeeTracking, - SolanaIngressTracking, - SolanaNonceTrackingOld, - SolanaEgressWitnessing, - SolanaLiveness, - > for SolanaElectionHooksOld - { - type OnFinalizeContext = (); - type OnFinalizeReturn = (); - - fn on_finalize< - GenericElectoralAccess, - BlockHeightTranslator: Translator, - FeeTranslator: Translator, - IngressTranslator: Translator, - OldNonceTrackingTranslator: Translator, - EgressWitnessingTranslator: Translator, - LivenessTranslator: Translator, - >( - generic_electoral_access: &mut GenericElectoralAccess, - ( - block_height_translator, - fee_translator, - ingress_translator, - old_nonce_translator, - egress_witnessing_translator, - liveness_translator, - ): ( - BlockHeightTranslator, - FeeTranslator, - IngressTranslator, - OldNonceTrackingTranslator, - EgressWitnessingTranslator, - LivenessTranslator, - ), - ( - block_height_identifiers, - fee_identifiers, - ingress_identifiers, - old_nonce_identifiers, - egress_witnessing_identifiers, - liveness_identifiers, - ): ( - Vec< - ElectionIdentifier< - ::ElectionIdentifierExtra, - >, - >, - Vec< - ElectionIdentifier< - ::ElectionIdentifierExtra, - >, - >, - Vec< - ElectionIdentifier< - ::ElectionIdentifierExtra, - >, - >, - Vec< - ElectionIdentifier< - ::ElectionIdentifierExtra, - >, - >, - Vec< - ElectionIdentifier< - ::ElectionIdentifierExtra, - >, - >, - Vec< - ElectionIdentifier< - ::ElectionIdentifierExtra, - >, - >, - ), - _context: &Self::OnFinalizeContext, - ) -> Result { - let block_height = SolanaBlockHeightTracking::on_finalize( - &mut block_height_translator.translate_electoral_access(generic_electoral_access), - block_height_identifiers, - &(), - )?; - SolanaLiveness::on_finalize( - &mut liveness_translator.translate_electoral_access(generic_electoral_access), - liveness_identifiers, - &(crate::System::block_number(), block_height), - )?; - SolanaFeeTracking::on_finalize( - &mut fee_translator.translate_electoral_access(generic_electoral_access), - fee_identifiers, - &(), - )?; - SolanaEgressWitnessing::on_finalize( - &mut egress_witnessing_translator - .translate_electoral_access(generic_electoral_access), - egress_witnessing_identifiers, - &(), - )?; - SolanaIngressTracking::on_finalize( - &mut ingress_translator.translate_electoral_access(generic_electoral_access), - ingress_identifiers, - &block_height, - )?; - old::SolanaNonceTrackingOld::on_finalize( - &mut old_nonce_translator.translate_electoral_access(generic_electoral_access), - old_nonce_identifiers, - &(), - )?; - Ok(()) - } - } - #[derive(Encode, Decode, TypeInfo, Clone)] - struct ElectionBitmapComponents { - epoch: EpochIndex, - #[allow(clippy::type_complexity)] - bitmaps: Vec<( - <::Vote as VoteStorage>::BitmapComponent, - BitVec, - )>, //sp_core::H256, BitVec)>, - } - #[derive(PartialEq, Eq, Clone, Debug, Encode, Decode, TypeInfo, Default)] - pub struct ReferenceDetails { - pub count: u32, - pub created: u32, - pub expires: u32, - } - pub struct Migration; - impl OnRuntimeUpgrade for Migration { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::DispatchError> { - let election_identifiers = frame_support::migration::storage_key_iter::< - ElectionIdentifierOf, - ::ElectionProperties, - Twox64Concat - >(b"SolanaElections", b"ElectionProperties") - .filter(|(_, value)| { - matches!(value, pallet_cf_elections::electoral_systems::composite::tuple_6_impls::CompositeElectionProperties::D(_)) - }) - .map(|(key, value)| { - log::info!("Old {:?}: {:?}",key, value); - key - }) - .collect::>>(); - log::info!("Number of elections: {:?}", election_identifiers.len()); - Ok((election_identifiers.len() as u32).encode()) - } - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { - let previous_number_election = u32::decode(&mut &state[..]).unwrap(); - log::info!("Post upgrade number of election old state: {:?}", previous_number_election); - log::info!( - "Post upgrade number of unavailable nonces: {:?}", - pallet_cf_environment::SolanaUnavailableNonceAccounts::::iter_keys() - .collect::>() - .len() as u32 - ); - - assert!( - previous_number_election == - pallet_cf_environment::SolanaUnavailableNonceAccounts::::iter_keys() - .collect::>() - .len() as u32 - ); - let election_identifiers = frame_support::migration::storage_key_iter::< - ElectionIdentifierOf, - ::ElectionProperties, - Twox64Concat - >(b"SolanaElections", b"ElectionProperties") - .filter(|(_, value)| { - matches!(value, pallet_cf_elections::electoral_systems::composite::tuple_6_impls::CompositeElectionProperties::D(_)) - }) - .map(|(key, _)| { - key - }) - .collect::>>(); - log::info!("Post upgrade number of elections: {:?}", election_identifiers.len() as u32); - assert!(previous_number_election == election_identifiers.len() as u32); - - Ok(()) - } - fn on_runtime_upgrade() -> frame_support::weights::Weight { - let election_identifiers = frame_support::migration::storage_key_iter::< - ElectionIdentifierOf, - ::ElectionProperties, - Twox64Concat - >(b"SolanaElections", b"ElectionProperties") - .filter(|(_, value)| { - matches!(value, pallet_cf_elections::electoral_systems::composite::tuple_6_impls::CompositeElectionProperties::D(_)) - }) - .map(|(key, value)| { - log::info!("During Upgrade {:?}: {:?}",key, value); - key - }) - .collect::>>(); - - for election_identifier in election_identifiers { - //Removing BitmapComponents - let bitmap = frame_support::storage::migration::take_storage_item::< - _, - ElectionBitmapComponents, - Twox64Concat, - >( - b"SolanaElections", - b"BitmapComponents", - election_identifier.unique_monotonic(), - ); - if bitmap.is_some() { - log::info!("Bitmap {:?}", bitmap.clone().unwrap().bitmaps); - //If they have some data, remove the SharedDataRederenceCount as well - for (bitmap_component, _) in bitmap.unwrap().bitmaps { - <::Vote as VoteStorage>::visit_shared_data_references_in_bitmap_component( - &bitmap_component, - |shared_data_hash| { - struct StoragePrefix; - impl StorageInstance for StoragePrefix{ - const STORAGE_PREFIX: &'static str = "SharedDataReferenceCount"; - fn pallet_prefix() -> &'static str { - "SolanaElections" - } - } - let hashed_key_and_prefix = StorageDoubleMap::< - StoragePrefix, - Identity, - SharedDataHash, - Twox64Concat, - UniqueMonotonicIdentifier, - (), - OptionQuery, - >::hashed_key_for(shared_data_hash, election_identifier.unique_monotonic()); - let reference: core::option::Option = frame_support::storage::unhashed::take::(&hashed_key_and_prefix); - log::info!("References {:?}", reference); - let shared_data = - frame_support::storage::migration::take_storage_item::< - _, - <::Vote as VoteStorage>::SharedData, - Identity, - >(b"SolanaElections", b"SharedData", shared_data_hash); - log::info!("SharedData {:?}", shared_data); - } - ); - } - } - let properties = - frame_support::storage::migration::take_storage_item::< - _, - ::ElectionProperties, - Twox64Concat, - >(b"SolanaElections", b"ElectionProperties", election_identifier); - log::info!("Properties {:?}", properties); - - let consensus_history = frame_support::storage::migration::take_storage_item::< - _, - ConsensusHistory<::Consensus>, - Twox64Concat, - >( - b"SolanaElections", - b"ElectionConsensusHistory", - election_identifier.unique_monotonic(), - ); - log::info!("Consensus history {:?}", consensus_history); - - let settings = frame_support::storage::migration::take_storage_item::< - _, - ::ElectoralSettings, - Twox64Concat, - >( - b"SolanaElections", - b"ElectoralSettings", - election_identifier.unique_monotonic(), - ); - log::info!("Settings {:?}", settings); - - let consensus_history_uptodate = - frame_support::storage::migration::take_storage_item::< - _, - EpochIndex, - Twox64Concat, - >( - b"SolanaElections", - b"ElectionConsensusHistoryUpToDate", - election_identifier.unique_monotonic(), - ); - log::info!("Consensus history up to date {:?}", consensus_history_uptodate); - } - for (key, value) in - pallet_cf_environment::SolanaUnavailableNonceAccounts::::iter() - { - log::info!("Creating a new election for nonce: {:?}, {:?}", key, value); - let _ = SolanaNonceTrackingTrigger::watch_for_nonce_change(key, value); - } - Weight::zero() - } - } -} const LIVENESS_CHECK_DURATION: BlockNumberFor = 10; /// Creates an initial state to initialize the pallet with. diff --git a/state-chain/runtime/src/lib.rs b/state-chain/runtime/src/lib.rs index e1568b449c..c70a746158 100644 --- a/state-chain/runtime/src/lib.rs +++ b/state-chain/runtime/src/lib.rs @@ -18,7 +18,6 @@ use crate::{ }, Offence, }, - migrations::serialize_solana_broadcast::SerializeSolanaBroadcastMigration, monitoring_apis::{ ActivateKeysBroadcastIds, AuthoritiesInfo, BtcUtxos, EpochState, ExternalChainsBlockHeight, FeeImbalance, FlipSupply, LastRuntimeUpgradeInfo, MonitoringData, OpenDepositChannels, @@ -51,19 +50,14 @@ use cf_chains::{ Arbitrum, Bitcoin, DefaultRetryPolicy, ForeignChain, Polkadot, Solana, TransactionBuilder, }; use cf_primitives::{BroadcastId, EpochIndex, NetworkEnvironment, STABLE_ASSET}; -use cf_runtime_utilities::NoopRuntimeUpgrade; use cf_traits::{ AdjustedFeeEstimationApi, AssetConverter, BalanceApi, DummyEgressSuccessWitnesser, DummyIngressSource, GetBlockHeight, NoLimit, SwapLimits, SwapLimitsProvider, }; use codec::{alloc::string::ToString, Decode, Encode}; use core::ops::Range; -use frame_support::{derive_impl, instances::*, migrations::VersionedMigration}; +use frame_support::{derive_impl, instances::*}; pub use frame_system::Call as SystemCall; -use migrations::{ - add_liveness_electoral_system_solana::LivenessSettingsMigration, - solana_egress_success_witness::SolanaEgressSuccessWitnessMigration, -}; use pallet_cf_governance::GovCallHash; use pallet_cf_ingress_egress::{ ChannelAction, DepositWitness, IngressOrEgress, OwedAmount, TargetChainAsset, @@ -202,7 +196,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("chainflip-node"), impl_name: create_runtime_str!("chainflip-node"), authoring_version: 1, - spec_version: 170, + spec_version: 180, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 12, @@ -1236,10 +1230,9 @@ type AllMigrations = ( // UPGRADE pallet_cf_environment::migrations::VersionUpdate, PalletMigrations, - MigrationsForV1_7, + MigrationsForV1_8, migrations::housekeeping::Migration, migrations::reap_old_accounts::Migration, - chainflip::solana_elections::old::Migration, ); /// All the pallet-specific migrations and migrations that depend on pallet migration order. Do not @@ -1281,60 +1274,7 @@ type PalletMigrations = ( pallet_cf_cfe_interface::migrations::PalletMigration, ); -type MigrationsForV1_7 = ( - // Only the Solana Transaction type has changed - VersionedMigration< - 8, - 9, - SerializeSolanaBroadcastMigration, - pallet_cf_broadcast::Pallet, - DbWeight, - >, - // For clearing all Solana Egress Success election votes, and migrating Solana ApiCall to the - // newer version. - VersionedMigration< - 9, - 10, - SolanaEgressSuccessWitnessMigration, - pallet_cf_broadcast::Pallet, - DbWeight, - >, - VersionedMigration< - 8, - 10, - NoopRuntimeUpgrade, - pallet_cf_broadcast::Pallet, - DbWeight, - >, - VersionedMigration< - 8, - 10, - NoopRuntimeUpgrade, - pallet_cf_broadcast::Pallet, - DbWeight, - >, - VersionedMigration< - 8, - 10, - NoopRuntimeUpgrade, - pallet_cf_broadcast::Pallet, - DbWeight, - >, - VersionedMigration< - 8, - 10, - NoopRuntimeUpgrade, - pallet_cf_broadcast::Pallet, - DbWeight, - >, - VersionedMigration< - 0, - 1, - LivenessSettingsMigration, - pallet_cf_elections::Pallet, - DbWeight, - >, -); +type MigrationsForV1_8 = (); #[cfg(feature = "runtime-benchmarks")] #[macro_use] diff --git a/state-chain/runtime/src/migrations.rs b/state-chain/runtime/src/migrations.rs index 03536d059b..187dd5164c 100644 --- a/state-chain/runtime/src/migrations.rs +++ b/state-chain/runtime/src/migrations.rs @@ -1,7 +1,4 @@ //! Chainflip runtime storage migrations. -pub mod add_liveness_electoral_system_solana; pub mod housekeeping; pub mod reap_old_accounts; -pub mod serialize_solana_broadcast; -pub mod solana_egress_success_witness; diff --git a/state-chain/runtime/src/migrations/add_liveness_electoral_system_solana.rs b/state-chain/runtime/src/migrations/add_liveness_electoral_system_solana.rs deleted file mode 100644 index 66ed9254c9..0000000000 --- a/state-chain/runtime/src/migrations/add_liveness_electoral_system_solana.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::*; -use frame_support::{pallet_prelude::Weight, storage::unhashed, traits::UncheckedOnRuntimeUpgrade}; -use frame_system::pallet_prelude::BlockNumberFor; - -use pallet_cf_elections::{electoral_system::ElectoralSystem, Config, ElectoralSettings}; -#[cfg(feature = "try-runtime")] -use sp_runtime::DispatchError; - -use codec::{Decode, Encode}; - -pub struct LivenessSettingsMigration; - -const LIVENESS_CHECK_DURATION: BlockNumberFor = 10; - -// Because the Liveness electoral system is added to the end, and the rest of its types are the same -// we can simply append the encoded bytes to the raw storage. -impl UncheckedOnRuntimeUpgrade for LivenessSettingsMigration { - fn on_runtime_upgrade() -> Weight { - for key in ElectoralSettings::::iter_keys() { - let mut raw_storage_at_key = unhashed::get_raw(&ElectoralSettings::< - Runtime, - SolanaInstance, - >::hashed_key_for(key)) - .expect("We just got the keys directly from the storage"); - raw_storage_at_key.extend(LIVENESS_CHECK_DURATION.encode()); - ElectoralSettings::::insert(key, <>::ElectoralSystem as ElectoralSystem>::ElectoralSettings::decode(&mut &raw_storage_at_key[..]).unwrap()); - } - - Weight::zero() - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), DispatchError> { - for (.., liveness_duration) in ElectoralSettings::::iter_values() { - assert_eq!(liveness_duration, LIVENESS_CHECK_DURATION); - } - Ok(()) - } -} diff --git a/state-chain/runtime/src/migrations/housekeeping.rs b/state-chain/runtime/src/migrations/housekeeping.rs index 2c86f6d68c..9802fbf125 100644 --- a/state-chain/runtime/src/migrations/housekeeping.rs +++ b/state-chain/runtime/src/migrations/housekeeping.rs @@ -1,8 +1,6 @@ use crate::Runtime; -use cf_chains::instances::{ArbitrumInstance, EthereumInstance, PolkadotInstance}; use cf_runtime_utilities::genesis_hashes; use frame_support::{traits::OnRuntimeUpgrade, weights::Weight}; -use pallet_cf_broadcast::migrations::remove_aborted_broadcasts; #[cfg(feature = "try-runtime")] use sp_runtime::DispatchError; #[cfg(feature = "try-runtime")] @@ -14,25 +12,10 @@ impl OnRuntimeUpgrade for Migration { fn on_runtime_upgrade() -> Weight { match genesis_hashes::genesis_hash::() { genesis_hashes::BERGHAIN => { - log::info!("🧹 Housekeeping, removing stale aborted broadcasts"); - remove_aborted_broadcasts::remove_stale_and_all_older::( - remove_aborted_broadcasts::ETHEREUM_MAX_ABORTED_BROADCAST_BERGHAIN, - ); - remove_aborted_broadcasts::remove_stale_and_all_older::( - remove_aborted_broadcasts::ARBITRUM_MAX_ABORTED_BROADCAST_BERGHAIN, - ); + log::info!("🧹 No housekeeping required for Berghain."); }, genesis_hashes::PERSEVERANCE => { - log::info!("🧹 Housekeeping, removing stale aborted broadcasts"); - remove_aborted_broadcasts::remove_stale_and_all_older::( - remove_aborted_broadcasts::ETHEREUM_MAX_ABORTED_BROADCAST_PERSEVERANCE, - ); - remove_aborted_broadcasts::remove_stale_and_all_older::( - remove_aborted_broadcasts::ARBITRUM_MAX_ABORTED_BROADCAST_PERSEVERANCE, - ); - remove_aborted_broadcasts::remove_stale_and_all_older::( - remove_aborted_broadcasts::POLKADOT_MAX_ABORTED_BROADCAST_PERSEVERANCE, - ); + log::info!("🧹 No housekeeping required for Perseverance."); }, genesis_hashes::SISYPHOS => { log::info!("🧹 No housekeeping required for Sisyphos."); @@ -45,37 +28,6 @@ impl OnRuntimeUpgrade for Migration { #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), DispatchError> { - match genesis_hashes::genesis_hash::() { - genesis_hashes::BERGHAIN => { - log::info!( - "Housekeeping post_upgrade, checking stale aborted broadcasts are removed." - ); - remove_aborted_broadcasts::assert_removed::( - remove_aborted_broadcasts::ETHEREUM_MAX_ABORTED_BROADCAST_BERGHAIN, - ); - remove_aborted_broadcasts::assert_removed::( - remove_aborted_broadcasts::ARBITRUM_MAX_ABORTED_BROADCAST_BERGHAIN, - ); - }, - genesis_hashes::PERSEVERANCE => { - log::info!( - "Housekeeping post_upgrade, checking stale aborted broadcasts are removed." - ); - remove_aborted_broadcasts::assert_removed::( - remove_aborted_broadcasts::ETHEREUM_MAX_ABORTED_BROADCAST_PERSEVERANCE, - ); - remove_aborted_broadcasts::assert_removed::( - remove_aborted_broadcasts::ARBITRUM_MAX_ABORTED_BROADCAST_PERSEVERANCE, - ); - remove_aborted_broadcasts::assert_removed::( - remove_aborted_broadcasts::POLKADOT_MAX_ABORTED_BROADCAST_PERSEVERANCE, - ); - }, - genesis_hashes::SISYPHOS => { - log::info!("Skipping housekeeping post_upgrade for Sisyphos."); - }, - _ => {}, - } Ok(()) } } diff --git a/state-chain/runtime/src/migrations/serialize_solana_broadcast.rs b/state-chain/runtime/src/migrations/serialize_solana_broadcast.rs deleted file mode 100644 index e13dc3967f..0000000000 --- a/state-chain/runtime/src/migrations/serialize_solana_broadcast.rs +++ /dev/null @@ -1,83 +0,0 @@ -use frame_support::traits::UncheckedOnRuntimeUpgrade; -use pallet_cf_broadcast::BroadcastData; - -use crate::*; -use frame_support::pallet_prelude::Weight; -use sp_runtime::DispatchError; - -use cf_chains::sol::{SolTransaction, SolanaTransactionData}; -use codec::{Decode, Encode}; - -pub mod old { - use cf_chains::sol::{SolMessage, SolSignature}; - use cf_primitives::BroadcastId; - use frame_support::{pallet_prelude::OptionQuery, Twox64Concat}; - - use super::*; - - #[derive(PartialEq, Eq, Encode, Decode)] - pub struct SolanaBroadcastData { - pub broadcast_id: BroadcastId, - pub transaction_payload: SolTransaction, - pub threshold_signature_payload: SolMessage, - pub transaction_out_id: SolSignature, - pub nominee: Option<::AccountId>, - } - - #[frame_support::storage_alias] - pub type AwaitingBroadcast = - StorageMap; -} - -pub struct SerializeSolanaBroadcastMigration; - -// Tests for this migration are in: -// state-chain/cf-integration-tests/src/migrations/serialize_solana_broadcast.rs -impl UncheckedOnRuntimeUpgrade for SerializeSolanaBroadcastMigration { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, DispatchError> { - pre_upgrade_check() - } - - fn on_runtime_upgrade() -> Weight { - pallet_cf_broadcast::AwaitingBroadcast::::translate_values::< - old::SolanaBroadcastData, - _, - >(|old_sol_broadcast_data| { - Some(BroadcastData:: { - broadcast_id: old_sol_broadcast_data.broadcast_id, - transaction_payload: SolanaTransactionData { - serialized_transaction: old_sol_broadcast_data - .transaction_payload - .finalize_and_serialize() - .ok()?, - }, - threshold_signature_payload: old_sol_broadcast_data.threshold_signature_payload, - transaction_out_id: old_sol_broadcast_data.transaction_out_id, - nominee: old_sol_broadcast_data.nominee, - }) - }); - - Weight::zero() - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), DispatchError> { - post_upgrade_check(state) - } -} - -pub fn pre_upgrade_check() -> Result, DispatchError> { - Ok((old::AwaitingBroadcast::iter().count() as u64).encode()) -} - -pub fn post_upgrade_check(state: Vec) -> Result<(), DispatchError> { - let pre_awaiting_broadcast_count = ::decode(&mut state.as_slice()) - .map_err(|_| DispatchError::from("Failed to decode state"))?; - - let post_awaiting_broadcast_count = - pallet_cf_broadcast::AwaitingBroadcast::::iter().count() as u64; - - assert_eq!(pre_awaiting_broadcast_count, post_awaiting_broadcast_count); - Ok(()) -} diff --git a/state-chain/runtime/src/migrations/solana_egress_success_witness.rs b/state-chain/runtime/src/migrations/solana_egress_success_witness.rs deleted file mode 100644 index 8ad41686af..0000000000 --- a/state-chain/runtime/src/migrations/solana_egress_success_witness.rs +++ /dev/null @@ -1,150 +0,0 @@ -use crate::{ - chainflip::solana_elections::SolanaElectoralSystem, Runtime, SolEnvironment, SolanaElections, - Weight, -}; -use cf_chains::{instances::SolanaInstance, sol::api::SolanaApi, TransferAssetParams}; -use frame_support::traits::UncheckedOnRuntimeUpgrade; - -use pallet_cf_elections::{ - access_impls::ElectionAccess, electoral_system::ElectionWriteAccess, - electoral_systems::composite::tuple_6_impls::CompositeElectionIdentifierExtra, -}; - -use cf_chains::sol::{SolAddress, SolPubkey, SolTransaction}; - -#[allow(unused_imports)] -mod try_runtime_import { - pub use codec::{Decode, Encode}; - pub use sp_runtime::DispatchError; - pub use sp_std::vec::Vec; -} -#[cfg(feature = "try-runtime")] -use try_runtime_import::*; - -pub struct SolanaEgressSuccessWitnessMigration; - -mod old { - use super::*; - use frame_support::pallet_prelude::*; - - #[derive(Encode, Decode, TypeInfo)] - - pub struct SolanaApi { - pub call_type: SolanaTransactionType, - pub transaction: SolTransaction, - pub signer: Option, - pub _phantom: PhantomData, - } - #[derive(Encode, Decode, TypeInfo)] - pub enum SolanaTransactionType { - BatchFetch, - Transfer, - RotateAggKey, - CcmTransfer, - SetGovKeyWithAggKey, - } - - pub fn to_new_sol_transaction_type( - old: SolanaTransactionType, - ) -> cf_chains::sol::api::SolanaTransactionType { - // Use an invalid address and amount of 0 as fallback. - // Only CCMs submitted after the runtime upgrade support Fallback. - match old { - SolanaTransactionType::BatchFetch => - cf_chains::sol::api::SolanaTransactionType::BatchFetch, - SolanaTransactionType::Transfer => cf_chains::sol::api::SolanaTransactionType::Transfer, - SolanaTransactionType::RotateAggKey => - cf_chains::sol::api::SolanaTransactionType::RotateAggKey, - SolanaTransactionType::CcmTransfer => - cf_chains::sol::api::SolanaTransactionType::CcmTransfer { - fallback: TransferAssetParams { - asset: cf_chains::assets::sol::Asset::Sol, - to: SolPubkey([0x00; 32]).into(), - amount: Default::default(), - }, - }, - SolanaTransactionType::SetGovKeyWithAggKey => - cf_chains::sol::api::SolanaTransactionType::SetGovKeyWithAggKey, - } - } -} - -impl UncheckedOnRuntimeUpgrade for SolanaEgressSuccessWitnessMigration { - fn on_runtime_upgrade() -> Weight { - log::info!("🥮 Running Solana Success witnessing migration."); - - // Clear Solana's egress-success votes. - let _ = - SolanaElections::with_electoral_access_and_identifiers(|_, election_identifiers| { - SolanaElectoralSystem::with_identifiers( - election_identifiers, - |election_identifiers| { - // Extract egress-success elections only. - let (_, _, _, _, egress_success_election_identifiers, ..) = - election_identifiers; - egress_success_election_identifiers.into_iter().for_each( - |election_identifier| { - ElectionAccess::::new( - election_identifier - .with_extra(CompositeElectionIdentifierExtra::< - (), - (), - u32, - (), - (), - (), - >::EE(())), - ) - .clear_votes() - }, - ); - Ok(()) - }, - ) - }); - - // Solana ApiCalls are stored in the broadcaster pallets. Add empty "fallback" info for - // existing Ccms. - pallet_cf_broadcast::PendingApiCalls::::translate_values::< - old::SolanaApi, - _, - >( - |old::SolanaApi:: { - call_type: old_call_type, - transaction, - signer, - _phantom, - }| { - Some(SolanaApi:: { - call_type: old::to_new_sol_transaction_type(old_call_type), - transaction, - signer, - _phantom, - }) - }, - ); - log::info!("Solana elections cleared, storage in Broadcaster pallet migrated."); - Weight::zero() - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - Ok(Encode::encode( - &(pallet_cf_broadcast::PendingApiCalls::::iter_keys().count() - as u32), - )) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), DispatchError> { - let pending_solana_calls = u32::decode(&mut &state[..]).unwrap_or_default(); - - assert_eq!( - pending_solana_calls, - pallet_cf_broadcast::PendingApiCalls::::iter_keys().count() - as u32 - ); - - Ok(()) - } -}