From d359108d625e25bfea49104a85e3b40520eb232c Mon Sep 17 00:00:00 2001 From: ron Date: Mon, 11 Nov 2024 10:34:58 +0800 Subject: [PATCH 01/68] Merge from master --- Cargo.lock | 120 +- Cargo.toml | 12 +- .../pallets/inbound-queue-v2/Cargo.toml | 93 + .../pallets/inbound-queue-v2/README.md | 3 + .../inbound-queue-v2/fixtures/Cargo.toml | 34 + .../inbound-queue-v2/fixtures/src/lib.rs | 7 + .../fixtures/src/register_token.rs | 97 + .../fixtures/src/send_token.rs | 95 + .../fixtures/src/send_token_to_penpal.rs | 95 + .../inbound-queue-v2/src/benchmarking/mod.rs | 53 + .../pallets/inbound-queue-v2/src/envelope.rs | 50 + .../pallets/inbound-queue-v2/src/lib.rs | 378 ++++ .../pallets/inbound-queue-v2/src/mock.rs | 362 ++++ .../pallets/inbound-queue-v2/src/test.rs | 245 +++ .../pallets/inbound-queue-v2/src/weights.rs | 31 + .../pallets/inbound-queue/src/lib.rs | 2 +- .../pallets/inbound-queue/src/mock.rs | 2 +- .../pallets/outbound-queue-v2/Cargo.toml | 92 + .../pallets/outbound-queue-v2/README.md | 3 + .../outbound-queue-v2/runtime-api/Cargo.toml | 38 + .../outbound-queue-v2/runtime-api/README.md | 6 + .../outbound-queue-v2/runtime-api/src/lib.rs | 23 + .../pallets/outbound-queue-v2/src/api.rs | 68 + .../outbound-queue-v2/src/benchmarking.rs | 85 + .../pallets/outbound-queue-v2/src/envelope.rs | 47 + .../pallets/outbound-queue-v2/src/lib.rs | 445 +++++ .../pallets/outbound-queue-v2/src/mock.rs | 202 ++ .../src/process_message_impl.rs | 25 + .../src/send_message_impl.rs | 66 + .../pallets/outbound-queue-v2/src/test.rs | 273 +++ .../pallets/outbound-queue-v2/src/types.rs | 23 + .../pallets/outbound-queue-v2/src/weights.rs | 89 + .../pallets/outbound-queue/Cargo.toml | 4 +- .../outbound-queue/merkle-tree/README.md | 4 - .../outbound-queue/runtime-api/Cargo.toml | 4 +- .../outbound-queue/runtime-api/src/lib.rs | 4 +- .../pallets/outbound-queue/src/api.rs | 4 +- .../outbound-queue/src/benchmarking.rs | 2 +- .../pallets/outbound-queue/src/lib.rs | 5 +- .../pallets/outbound-queue/src/mock.rs | 2 +- .../outbound-queue/src/send_message_impl.rs | 4 +- .../pallets/outbound-queue/src/test.rs | 5 +- .../pallets/outbound-queue/src/types.rs | 4 +- bridges/snowbridge/pallets/system/src/lib.rs | 104 +- bridges/snowbridge/pallets/system/src/mock.rs | 6 +- bridges/snowbridge/primitives/core/Cargo.toml | 4 + bridges/snowbridge/primitives/core/src/lib.rs | 3 + .../primitives/core/src/outbound.rs | 475 ----- .../primitives/core/src/outbound/mod.rs | 49 + .../primitives/core/src/outbound/v1.rs | 440 ++++ .../primitives/core/src/outbound/v2.rs | 348 ++++ .../snowbridge/primitives/core/src/reward.rs | 15 + .../merkle-tree/Cargo.toml | 14 +- .../primitives/merkle-tree/README.md | 3 + .../merkle-tree/src/lib.rs | 0 .../snowbridge/primitives/router/Cargo.toml | 3 + .../primitives/router/src/inbound/mod.rs | 458 +---- .../primitives/router/src/inbound/tests.rs | 83 - .../primitives/router/src/inbound/v1.rs | 520 +++++ .../primitives/router/src/inbound/v2.rs | 520 +++++ .../primitives/router/src/outbound/mod.rs | 424 +--- .../primitives/router/src/outbound/tests.rs | 1274 ------------ .../primitives/router/src/outbound/v1.rs | 1703 ++++++++++++++++ .../primitives/router/src/outbound/v2.rs | 1777 +++++++++++++++++ .../runtime/runtime-common/src/tests.rs | 5 +- .../bridges/bridge-hub-westend/Cargo.toml | 1 + .../bridge-hub-westend/src/tests/mod.rs | 1 + .../src/tests/snowbridge.rs | 17 +- .../src/tests/snowbridge_v2.rs | 314 +++ .../asset-hub-westend/src/xcm_config.rs | 29 +- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 + .../src/bridge_to_ethereum_config.rs | 6 +- .../src/genesis_config_presets.rs | 1 + .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 5 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 12 + .../src/bridge_to_ethereum_config.rs | 74 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 22 +- .../bridge-hub-westend/src/weights/mod.rs | 2 + .../snowbridge_pallet_inbound_queue_v2.rs | 69 + .../snowbridge_pallet_outbound_queue_v2.rs | 98 + .../bridge-hub-westend/src/xcm_config.rs | 9 +- .../bridge-hubs/common/src/message_queue.rs | 14 +- umbrella/Cargo.toml | 8 +- umbrella/src/lib.rs | 4 - 84 files changed, 9322 insertions(+), 2800 deletions(-) create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/Cargo.toml create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/README.md create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/fixtures/Cargo.toml create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/lib.rs create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/register_token.rs create mode 100755 bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token.rs create mode 100755 bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token_to_penpal.rs create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/benchmarking/mod.rs create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/envelope.rs create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/lib.rs create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/test.rs create mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/weights.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/README.md create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/Cargo.toml create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/README.md create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/src/benchmarking.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/src/envelope.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/src/process_message_impl.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/src/types.rs create mode 100644 bridges/snowbridge/pallets/outbound-queue-v2/src/weights.rs delete mode 100644 bridges/snowbridge/pallets/outbound-queue/merkle-tree/README.md delete mode 100644 bridges/snowbridge/primitives/core/src/outbound.rs create mode 100644 bridges/snowbridge/primitives/core/src/outbound/mod.rs create mode 100644 bridges/snowbridge/primitives/core/src/outbound/v1.rs create mode 100644 bridges/snowbridge/primitives/core/src/outbound/v2.rs create mode 100644 bridges/snowbridge/primitives/core/src/reward.rs rename bridges/snowbridge/{pallets/outbound-queue => primitives}/merkle-tree/Cargo.toml (76%) create mode 100644 bridges/snowbridge/primitives/merkle-tree/README.md rename bridges/snowbridge/{pallets/outbound-queue => primitives}/merkle-tree/src/lib.rs (100%) delete mode 100644 bridges/snowbridge/primitives/router/src/inbound/tests.rs create mode 100644 bridges/snowbridge/primitives/router/src/inbound/v1.rs create mode 100644 bridges/snowbridge/primitives/router/src/inbound/v2.rs delete mode 100644 bridges/snowbridge/primitives/router/src/outbound/tests.rs create mode 100644 bridges/snowbridge/primitives/router/src/outbound/v1.rs create mode 100644 bridges/snowbridge/primitives/router/src/outbound/v2.rs create mode 100644 cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs create mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_inbound_queue_v2.rs create mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_outbound_queue_v2.rs diff --git a/Cargo.lock b/Cargo.lock index 1e1c902df0e1..05f30ad94495 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2322,6 +2322,7 @@ dependencies = [ "serde_json", "snowbridge-beacon-primitives", "snowbridge-core", + "snowbridge-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-inbound-queue", @@ -2439,6 +2440,7 @@ dependencies = [ "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", + "snowbridge-pallet-outbound-queue-v2", "snowbridge-pallet-system", "snowbridge-router-primitives", "sp-core 28.0.0", @@ -2514,10 +2516,14 @@ dependencies = [ "serde_json", "snowbridge-beacon-primitives", "snowbridge-core", + "snowbridge-merkle-tree", "snowbridge-outbound-queue-runtime-api", + "snowbridge-outbound-queue-runtime-api-v2", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-inbound-queue", + "snowbridge-pallet-inbound-queue-v2", "snowbridge-pallet-outbound-queue", + "snowbridge-pallet-outbound-queue-v2", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", @@ -15656,7 +15662,6 @@ dependencies = [ "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", - "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", @@ -21137,6 +21142,8 @@ dependencies = [ name = "snowbridge-core" version = "0.2.0" dependencies = [ + "alloy-primitives", + "alloy-sol-types", "ethabi-decode", "frame-support", "frame-system", @@ -21179,6 +21186,21 @@ dependencies = [ "wasm-bindgen-test", ] +[[package]] +name = "snowbridge-merkle-tree" +version = "0.2.0" +dependencies = [ + "array-bytes", + "hex", + "hex-literal", + "parity-scale-codec", + "scale-info", + "sp-core 28.0.0", + "sp-crypto-hashing 0.1.0", + "sp-runtime 31.0.1", + "sp-tracing 16.0.0", +] + [[package]] name = "snowbridge-milagro-bls" version = "1.5.4" @@ -21195,30 +21217,29 @@ dependencies = [ ] [[package]] -name = "snowbridge-outbound-queue-merkle-tree" -version = "0.3.0" +name = "snowbridge-outbound-queue-runtime-api" +version = "0.2.0" dependencies = [ - "array-bytes", - "hex", - "hex-literal", + "frame-support", "parity-scale-codec", - "scale-info", - "sp-core 28.0.0", - "sp-crypto-hashing 0.1.0", - "sp-runtime 31.0.1", - "sp-tracing 16.0.0", + "snowbridge-core", + "snowbridge-merkle-tree", + "sp-api 26.0.0", + "sp-std 14.0.0", ] [[package]] -name = "snowbridge-outbound-queue-runtime-api" +name = "snowbridge-outbound-queue-runtime-api-v2" version = "0.2.0" dependencies = [ "frame-support", "parity-scale-codec", + "scale-info", "snowbridge-core", - "snowbridge-outbound-queue-merkle-tree", + "snowbridge-merkle-tree", "sp-api 26.0.0", "sp-std 14.0.0", + "staging-xcm", ] [[package]] @@ -21299,6 +21320,46 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "snowbridge-pallet-inbound-queue-fixtures-v2" +version = "0.10.0" +dependencies = [ + "hex-literal", + "snowbridge-beacon-primitives", + "snowbridge-core", + "sp-core 28.0.0", + "sp-std 14.0.0", +] + +[[package]] +name = "snowbridge-pallet-inbound-queue-v2" +version = "0.2.0" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "serde", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-pallet-ethereum-client", + "snowbridge-pallet-inbound-queue-fixtures-v2", + "snowbridge-router-primitives", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-keyring", + "sp-runtime 31.0.1", + "sp-std 14.0.0", + "staging-xcm", + "staging-xcm-executor", +] + [[package]] name = "snowbridge-pallet-outbound-queue" version = "0.2.0" @@ -21313,13 +21374,43 @@ dependencies = [ "scale-info", "serde", "snowbridge-core", - "snowbridge-outbound-queue-merkle-tree", + "snowbridge-merkle-tree", + "sp-arithmetic 23.0.0", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-keyring", + "sp-runtime 31.0.1", + "sp-std 14.0.0", +] + +[[package]] +name = "snowbridge-pallet-outbound-queue-v2" +version = "0.2.0" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "bridge-hub-common", + "ethabi-decode", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "pallet-message-queue", + "parity-scale-codec", + "scale-info", + "serde", + "snowbridge-core", + "snowbridge-merkle-tree", + "snowbridge-router-primitives", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-keyring", "sp-runtime 31.0.1", "sp-std 14.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] @@ -21363,6 +21454,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-std 14.0.0", "staging-xcm", + "staging-xcm-builder", "staging-xcm-executor", ] diff --git a/Cargo.toml b/Cargo.toml index c00276724333..dad503a31fc9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,15 +49,19 @@ members = [ "bridges/snowbridge/pallets/ethereum-client", "bridges/snowbridge/pallets/ethereum-client/fixtures", "bridges/snowbridge/pallets/inbound-queue", + "bridges/snowbridge/pallets/inbound-queue-v2", + "bridges/snowbridge/pallets/inbound-queue-v2/fixtures", "bridges/snowbridge/pallets/inbound-queue/fixtures", "bridges/snowbridge/pallets/outbound-queue", - "bridges/snowbridge/pallets/outbound-queue/merkle-tree", + "bridges/snowbridge/pallets/outbound-queue-v2", + "bridges/snowbridge/pallets/outbound-queue-v2/runtime-api", "bridges/snowbridge/pallets/outbound-queue/runtime-api", "bridges/snowbridge/pallets/system", "bridges/snowbridge/pallets/system/runtime-api", "bridges/snowbridge/primitives/beacon", "bridges/snowbridge/primitives/core", "bridges/snowbridge/primitives/ethereum", + "bridges/snowbridge/primitives/merkle-tree", "bridges/snowbridge/primitives/router", "bridges/snowbridge/runtime/runtime-common", "bridges/snowbridge/runtime/test-common", @@ -1222,13 +1226,17 @@ smoldot-light = { version = "0.9.0", default-features = false } snowbridge-beacon-primitives = { path = "bridges/snowbridge/primitives/beacon", default-features = false } snowbridge-core = { path = "bridges/snowbridge/primitives/core", default-features = false } snowbridge-ethereum = { path = "bridges/snowbridge/primitives/ethereum", default-features = false } -snowbridge-outbound-queue-merkle-tree = { path = "bridges/snowbridge/pallets/outbound-queue/merkle-tree", default-features = false } +snowbridge-merkle-tree = { path = "bridges/snowbridge/primitives/merkle-tree", default-features = false } snowbridge-outbound-queue-runtime-api = { path = "bridges/snowbridge/pallets/outbound-queue/runtime-api", default-features = false } +snowbridge-outbound-queue-runtime-api-v2 = { path = "bridges/snowbridge/pallets/outbound-queue-v2/runtime-api", default-features = false } snowbridge-pallet-ethereum-client = { path = "bridges/snowbridge/pallets/ethereum-client", default-features = false } snowbridge-pallet-ethereum-client-fixtures = { path = "bridges/snowbridge/pallets/ethereum-client/fixtures", default-features = false } snowbridge-pallet-inbound-queue = { path = "bridges/snowbridge/pallets/inbound-queue", default-features = false } snowbridge-pallet-inbound-queue-fixtures = { path = "bridges/snowbridge/pallets/inbound-queue/fixtures", default-features = false } +snowbridge-pallet-inbound-queue-fixtures-v2 = { path = "bridges/snowbridge/pallets/inbound-queue-v2/fixtures", default-features = false } +snowbridge-pallet-inbound-queue-v2 = { path = "bridges/snowbridge/pallets/inbound-queue-v2", default-features = false } snowbridge-pallet-outbound-queue = { path = "bridges/snowbridge/pallets/outbound-queue", default-features = false } +snowbridge-pallet-outbound-queue-v2 = { path = "bridges/snowbridge/pallets/outbound-queue-v2", default-features = false } snowbridge-pallet-system = { path = "bridges/snowbridge/pallets/system", default-features = false } snowbridge-router-primitives = { path = "bridges/snowbridge/primitives/router", default-features = false } snowbridge-runtime-common = { path = "bridges/snowbridge/runtime/runtime-common", default-features = false } diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue-v2/Cargo.toml new file mode 100644 index 000000000000..d212b18d2d54 --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/Cargo.toml @@ -0,0 +1,93 @@ +[package] +name = "snowbridge-pallet-inbound-queue-v2" +description = "Snowbridge Inbound Queue Pallet V2" +version = "0.2.0" +authors = ["Snowfork "] +edition.workspace = true +repository.workspace = true +license = "Apache-2.0" +categories = ["cryptography::cryptocurrencies"] + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { optional = true, workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } +log = { workspace = true } +alloy-primitives = { features = ["rlp"], workspace = true } +alloy-sol-types = { workspace = true } + +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } + +xcm = { workspace = true } +xcm-executor = { workspace = true } + +snowbridge-core = { workspace = true } +snowbridge-router-primitives = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } +snowbridge-pallet-inbound-queue-fixtures-v2 = { optional = true, workspace = true } + +[dev-dependencies] +frame-benchmarking = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +snowbridge-pallet-ethereum-client = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } + +[features] +default = ["std"] +std = [ + "alloy-primitives/std", + "alloy-sol-types/std", + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-balances/std", + "scale-info/std", + "serde", + "snowbridge-beacon-primitives/std", + "snowbridge-core/std", + "snowbridge-pallet-inbound-queue-fixtures-v2?/std", + "snowbridge-router-primitives/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "xcm-executor/std", + "xcm/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "hex-literal", + "pallet-balances/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "snowbridge-pallet-ethereum-client/runtime-benchmarks", + "snowbridge-pallet-inbound-queue-fixtures-v2/runtime-benchmarks", + "snowbridge-router-primitives/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "snowbridge-pallet-ethereum-client/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/README.md b/bridges/snowbridge/pallets/inbound-queue-v2/README.md new file mode 100644 index 000000000000..cc2f7c636e68 --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/README.md @@ -0,0 +1,3 @@ +# Ethereum Inbound Queue + +Reads messages from Ethereum and sends it to intended destination on Polkadot, using XCM. diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/Cargo.toml new file mode 100644 index 000000000000..ea30fdddb553 --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "snowbridge-pallet-inbound-queue-fixtures-v2" +description = "Snowbridge Inbound Queue Test Fixtures V2" +version = "0.10.0" +authors = ["Snowfork "] +edition.workspace = true +repository.workspace = true +license = "Apache-2.0" +categories = ["cryptography::cryptocurrencies"] + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +hex-literal = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +snowbridge-core = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } + +[features] +default = ["std"] +std = [ + "snowbridge-beacon-primitives/std", + "snowbridge-core/std", + "sp-core/std", + "sp-std/std", +] +runtime-benchmarks = [ + "snowbridge-core/runtime-benchmarks", +] diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/lib.rs new file mode 100644 index 000000000000..00adcdfa186a --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/lib.rs @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod register_token; +pub mod send_token; +pub mod send_token_to_penpal; diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/register_token.rs b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/register_token.rs new file mode 100644 index 000000000000..340b2fadfacf --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/register_token.rs @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +// Generated, do not edit! +// See ethereum client README.md for instructions to generate + +use hex_literal::hex; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; +use sp_std::vec; + +pub fn make_register_token_message() -> InboundQueueFixture { + InboundQueueFixture { + message: Message { + event_log: Log { + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), + }, + proof: Proof { + receipt_proof: (vec![ + hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").to_vec(), + hex!("4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f").to_vec(), + ], vec![ + hex!("f851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080").to_vec(), + hex!("f9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), + ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 393, + proposer_index: 4, + parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), + body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), + hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), + hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), + hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), + hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), + hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), + hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), + hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), + hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), + receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), + prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), + block_number: 393, + gas_limit: 54492273, + gas_used: 199644, + timestamp: 1710552813, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), + transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), + ], + } + }, + }, + finalized_header: BeaconHeader { + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), + }, + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), + } +} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token.rs b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token.rs new file mode 100755 index 000000000000..4075febab59d --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token.rs @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +// Generated, do not edit! +// See ethereum client README.md for instructions to generate + +use hex_literal::hex; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; +use sp_std::vec; + +pub fn make_send_token_message() -> InboundQueueFixture { + InboundQueueFixture { + message: Message { + event_log: Log { + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + hex!("c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26f").into(), + ], + data: hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").into(), + }, + proof: Proof { + receipt_proof: (vec![ + hex!("f9d844c5b79638609ba385b910fec3b5d891c9d7b189f135f0432f33473de915").to_vec(), + ], vec![ + hex!("f90451822080b9044b02f90447018301bcb6b9010000800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000f9033cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8c000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").to_vec(), + ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 2321, + proposer_index: 5, + parent_root: hex!("2add14727840d3a5ea061e14baa47030bb81380a65999200d119e73b86411d20").into(), + state_root: hex!("d962981467920bb2b7efa4a7a1baf64745582c3250857f49a957c5dae9a0da39").into(), + body_root: hex!("18e3f7f51a350f371ad35d166f2683b42af51d1836b295e4093be08acb0dcb7a").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("2add14727840d3a5ea061e14baa47030bb81380a65999200d119e73b86411d20").into(), + hex!("48b2e2f5256906a564e5058698f70e3406765fefd6a2edc064bb5fb88aa2ed0a").into(), + hex!("e5ed7c704e845418219b2fda42cd2f3438ffbe4c4b320935ae49439c6189f7a7").into(), + hex!("4a7ce24526b3f571548ad69679e4e260653a1b3b911a344e7f988f25a5c917a7").into(), + hex!("46fc859727ab0d0e8c344011f7d7a4426ccb537bb51363397e56cc7153f56391").into(), + hex!("f496b6f85a7c6c28a9048f2153550a7c5bcb4b23844ed3b87f6baa646124d8a3").into(), + hex!("7318644e474beb46e595a1875acc7444b937f5208065241911d2a71ac50c2de3").into(), + hex!("5cf48519e518ac64286aef5391319782dd38831d5dcc960578a6b9746d5f8cee").into(), + hex!("efb3e50fa39ca9fe7f76adbfa36fa8451ec2fd5d07b22aaf822137c04cf95a76").into(), + hex!("2206cd50750355ffaef4a67634c21168f2b564c58ffd04f33b0dc7af7dab3291").into(), + hex!("1a4014f6c4fcce9949fba74cb0f9e88df086706f9e05560cc9f0926f8c90e373").into(), + hex!("2df7cc0bcf3060be4132c63da7599c2600d9bbadf37ab001f15629bc2255698e").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("f869dd1c9598043008a3ac2a5d91b3d6c7b0bb3295b3843bc84c083d70b0e604").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("5d7859883dde1eba6c98b20eac18426134b25da2a89e5e360f3343b15e0e0a31").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("f8fbebed4c84d46231bd293bb9fbc9340d5c28c284d99fdaddb77238b8960ae2").into(), + receipts_root: hex!("f9d844c5b79638609ba385b910fec3b5d891c9d7b189f135f0432f33473de915").into(), + logs_bloom: hex!("00800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000").into(), + prev_randao: hex!("15533eeb366c6386bea5aeb8f425871928348c092209e4377f2418a6dedd7fd0").into(), + block_number: 2321, + gas_limit: 30000000, + gas_used: 113846, + timestamp: 1710554741, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("585a07122a30339b03b6481eae67c2d3de2b6b64f9f426230986519bf0f1bdfe").into(), + transactions_root: hex!("09cd60ee2207d804397c81f7b7e1e5d3307712b136e5376623a80317a4bdcd7a").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("9d419471a9a4719b40e7607781fbe32d9a7766b79805505c78c0c58133496ba2").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("bee375b8f1bbe4cd0e783c78026c1829ae72741c2dead5cab05d6834c5e5df65").into(), + ], + } + }, + }, + finalized_header: BeaconHeader { + slot: 4032, + proposer_index: 5, + parent_root: hex!("180aaaec59d38c3860e8af203f01f41c9bc41665f4d17916567c80f6cd23e8a2").into(), + state_root: hex!("3341790429ed3bf894cafa3004351d0b99e08baf6c38eb2a54d58e69fd2d19c6").into(), + body_root: hex!("a221e0c695ac7b7d04ce39b28b954d8a682ecd57961d81b44783527c6295f455").into(), + }, + block_roots_root: hex!("5744385ef06f82e67606f49aa29cd162f2e837a68fb7bd82f1fc6155d9f8640f").into(), + } +} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token_to_penpal.rs b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token_to_penpal.rs new file mode 100755 index 000000000000..6a951b568ae5 --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token_to_penpal.rs @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +// Generated, do not edit! +// See ethereum client README.md for instructions to generate + +use hex_literal::hex; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; +use sp_std::vec; + +pub fn make_send_token_to_penpal_message() -> InboundQueueFixture { + InboundQueueFixture { + message: Message { + event_log: Log { + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + hex!("be323bced46a1a49c8da2ab62ad5e974fd50f1dabaeed70b23ca5bcf14bfe4aa").into(), + ], + data: hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").into(), + }, + proof: Proof { + receipt_proof: (vec![ + hex!("106f1eaeac04e469da0020ad5c8a72af66323638bd3f561a3c8236063202c120").to_vec(), + ], vec![ + hex!("f90471822080b9046b02f904670183017d9cb9010000800000000000008000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000001008000000000000000000000001000008000040000000000000000000000000008000080000000000200000000000000000000000000100000000000000000010000000000000020000000000000000000000000000003000000000080018000000000000000000040004000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000f9035cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000007d0b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000201cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07cf9015c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0be323bced46a1a49c8da2ab62ad5e974fd50f1dabaeed70b23ca5bcf14bfe4aab8e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").to_vec(), + ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 4235, + proposer_index: 4, + parent_root: hex!("1b31e6264c19bcad120e434e0aede892e7d7c8ed80ab505cb593d9a4a16bc566").into(), + state_root: hex!("725f51771a0ecf72c647a283ab814ca088f998eb8c203181496b0b8e01f624fa").into(), + body_root: hex!("6f1c326d192e7e97e21e27b16fd7f000b8fa09b435ff028849927e382302b0ce").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("1b31e6264c19bcad120e434e0aede892e7d7c8ed80ab505cb593d9a4a16bc566").into(), + hex!("335eb186c077fa7053ec96dcc5d34502c997713d2d5bc4eb74842118d8cd5a64").into(), + hex!("326607faf2a7dfc9cfc4b6895f8f3d92a659552deb2c8fd1e892ec00c86c734c").into(), + hex!("4e20002125d7b6504df7c774f3f48e018e1e6762d03489149670a8335bba1425").into(), + hex!("e76af5cd61aade5aec8282b6f1df9046efa756b0466bba5e49032410f7739a1b").into(), + hex!("ee4dcd9527712116380cddafd120484a3bedf867225bbb86850b84decf6da730").into(), + hex!("e4687a07421d3150439a2cd2f09f3b468145d75b359a2e5fa88dfbec51725b15").into(), + hex!("38eaa78978e95759aa9b6f8504a8dbe36151f20ae41907e6a1ea165700ceefcd").into(), + hex!("1c1b071ec6f13e15c47d07d1bfbcc9135d6a6c819e68e7e6078a2007418c1a23").into(), + hex!("0b3ad7ad193c691c8c4ba1606ad2a90482cd1d033c7db58cfe739d0e20431e9e").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b2ffec5f2c14640305dd941330f09216c53b99d198e93735a400a6d3a4de191f").into(), + ], + finalized_block_root: hex!("08be7a59e947f08cd95c4ef470758730bf9e3b0db0824cb663ea541c39b0e65c").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("5d1186ae041f58785edb2f01248e95832f2e5e5d6c4eb8f7ff2f58980bfc2de9").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("2a66114d20e93082c8e9b47c8d401a937013487d757c9c2f3123cf43dc1f656d").into(), + receipts_root: hex!("106f1eaeac04e469da0020ad5c8a72af66323638bd3f561a3c8236063202c120").into(), + logs_bloom: hex!("00800000000000008000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000001008000000000000000000000001000008000040000000000000000000000000008000080000000000200000000000000000000000000100000000000000000010000000000000020000000000000000000000000000003000000000080018000000000000000000040004000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000").into(), + prev_randao: hex!("92e063c7e369b74149fdd1d7132ed2f635a19b9d8bff57637b8ee4736576426e").into(), + block_number: 4235, + gas_limit: 30000000, + gas_used: 97692, + timestamp: 1710556655, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("ce24fe3047aa20a8f222cd1d04567c12b39455400d681141962c2130e690953f").into(), + transactions_root: hex!("0c8388731de94771777c60d452077065354d90d6e5088db61fc6a134684195cc").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("99d397fa180078e66cd3a3b77bcb07553052f4e21d447167f3a406f663b14e6a").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("53ddf17147819c1abb918178b0230d965d1bc2c0d389f45e91e54cb1d2d468aa").into(), + ], + } + }, + }, + finalized_header: BeaconHeader { + slot: 4672, + proposer_index: 4, + parent_root: hex!("951233bf9f4bddfb2fa8f54e3bd0c7883779ef850e13e076baae3130dd7732db").into(), + state_root: hex!("4d303003b8cb097cbcc14b0f551ee70dac42de2c1cc2f4acfca7058ca9713291").into(), + body_root: hex!("664d13952b6f369bf4cf3af74d067ec33616eb57ed3a8a403fd5bae4fbf737dd").into(), + }, + block_roots_root: hex!("af71048297c070e6539cf3b9b90ae07d86d363454606bc239734629e6b49b983").into(), + } +} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/benchmarking/mod.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/benchmarking/mod.rs new file mode 100644 index 000000000000..52461a8a7fbe --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/src/benchmarking/mod.rs @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use crate::Pallet as InboundQueue; +use frame_benchmarking::v2::*; +use frame_support::assert_ok; +use frame_system::RawOrigin; +use snowbridge_pallet_inbound_queue_fixtures_v2::register_token::make_register_token_message; + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn submit() -> Result<(), BenchmarkError> { + let caller: T::AccountId = whitelisted_caller(); + + let create_message = make_register_token_message(); + + T::Helper::initialize_storage( + create_message.finalized_header, + create_message.block_roots_root, + ); + + let sovereign_account = sibling_sovereign_account::(1000u32.into()); + + let minimum_balance = T::Token::minimum_balance(); + + // So that the receiving account exists + assert_ok!(T::Token::mint_into(&caller, minimum_balance)); + // Fund the sovereign account (parachain sovereign account) so it can transfer a reward + // fee to the caller account + assert_ok!(T::Token::mint_into( + &sovereign_account, + 3_000_000_000_000u128 + .try_into() + .unwrap_or_else(|_| panic!("unable to cast sovereign account balance")), + )); + + #[block] + { + assert_ok!(InboundQueue::::submit( + RawOrigin::Signed(caller.clone()).into(), + create_message.message, + )); + } + + Ok(()) + } + + impl_benchmark_test_suite!(InboundQueue, crate::mock::new_tester(), crate::mock::Test); +} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/envelope.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/envelope.rs new file mode 100644 index 000000000000..31a8992442d8 --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/src/envelope.rs @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use snowbridge_core::{inbound::Log, ChannelId}; + +use sp_core::{RuntimeDebug, H160, H256}; +use sp_std::prelude::*; + +use alloy_primitives::B256; +use alloy_sol_types::{sol, SolEvent}; + +sol! { + event OutboundMessageAccepted(bytes32 indexed channel_id, uint64 nonce, bytes32 indexed message_id, bytes payload); +} + +/// An inbound message that has had its outer envelope decoded. +#[derive(Clone, RuntimeDebug)] +pub struct Envelope { + /// The address of the outbound queue on Ethereum that emitted this message as an event log + pub gateway: H160, + /// The message Channel + pub channel_id: ChannelId, + /// A nonce for enforcing replay protection and ordering. + pub nonce: u64, + /// An id for tracing the message on its route (has no role in bridge consensus) + pub message_id: H256, + /// The inner payload generated from the source application. + pub payload: Vec, +} + +#[derive(Copy, Clone, RuntimeDebug)] +pub struct EnvelopeDecodeError; + +impl TryFrom<&Log> for Envelope { + type Error = EnvelopeDecodeError; + + fn try_from(log: &Log) -> Result { + let topics: Vec = log.topics.iter().map(|x| B256::from_slice(x.as_ref())).collect(); + + let event = OutboundMessageAccepted::decode_log(topics, &log.data, true) + .map_err(|_| EnvelopeDecodeError)?; + + Ok(Self { + gateway: log.address, + channel_id: ChannelId::from(event.channel_id.as_ref()), + nonce: event.nonce, + message_id: H256::from(event.message_id.as_ref()), + payload: event.payload, + }) + } +} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/lib.rs new file mode 100644 index 000000000000..c26859dcf5d7 --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/src/lib.rs @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Inbound Queue +//! +//! # Overview +//! +//! Receives messages emitted by the Gateway contract on Ethereum, whereupon they are verified, +//! translated to XCM, and finally sent to their final destination parachain. +//! +//! The message relayers are rewarded using native currency from the sovereign account of the +//! destination parachain. +//! +//! # Extrinsics +//! +//! ## Governance +//! +//! * [`Call::set_operating_mode`]: Set the operating mode of the pallet. Can be used to disable +//! processing of inbound messages. +//! +//! ## Message Submission +//! +//! * [`Call::submit`]: Submit a message for verification and dispatch the final destination +//! parachain. +#![cfg_attr(not(feature = "std"), no_std)] + +mod envelope; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +pub mod weights; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod test; + +use codec::{Decode, DecodeAll, Encode}; +use envelope::Envelope; +use frame_support::{ + traits::{ + fungible::{Inspect, Mutate}, + tokens::{Fortitude, Preservation}, + }, + weights::WeightToFee, + PalletError, +}; +use frame_system::ensure_signed; +use scale_info::TypeInfo; +use sp_core::H160; +use sp_runtime::traits::Zero; +use sp_std::vec; +use xcm::prelude::{ + send_xcm, Junction::*, Location, SendError as XcmpSendError, SendXcm, Xcm, XcmContext, XcmHash, +}; +use xcm_executor::traits::TransactAsset; + +use snowbridge_core::{ + inbound::{Message, VerificationError, Verifier}, + sibling_sovereign_account, BasicOperatingMode, Channel, ChannelId, ParaId, PricingParameters, + StaticLookup, +}; +use snowbridge_router_primitives::inbound::v2::{ + ConvertMessage, ConvertMessageError, VersionedMessage, +}; +use sp_runtime::{traits::Saturating, SaturatedConversion, TokenError}; + +pub use weights::WeightInfo; + +#[cfg(feature = "runtime-benchmarks")] +use snowbridge_beacon_primitives::BeaconHeader; + +type BalanceOf = + <::Token as Inspect<::AccountId>>::Balance; + +pub use pallet::*; + +pub const LOG_TARGET: &str = "snowbridge-inbound-queue"; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use sp_core::H256; + + #[pallet::pallet] + pub struct Pallet(_); + + #[cfg(feature = "runtime-benchmarks")] + pub trait BenchmarkHelper { + fn initialize_storage(beacon_header: BeaconHeader, block_roots_root: H256); + } + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The verifier for inbound messages from Ethereum + type Verifier: Verifier; + + /// Message relayers are rewarded with this asset + type Token: Mutate + Inspect; + + /// XCM message sender + type XcmSender: SendXcm; + + // Address of the Gateway contract + #[pallet::constant] + type GatewayAddress: Get; + + /// Convert inbound message to XCM + type MessageConverter: ConvertMessage< + AccountId = Self::AccountId, + Balance = BalanceOf, + >; + + /// Lookup a channel descriptor + type ChannelLookup: StaticLookup; + + /// Lookup pricing parameters + type PricingParameters: Get>>; + + type WeightInfo: WeightInfo; + + #[cfg(feature = "runtime-benchmarks")] + type Helper: BenchmarkHelper; + + /// Convert a weight value into deductible balance type. + type WeightToFee: WeightToFee>; + + /// Convert a length value into deductible balance type + type LengthToFee: WeightToFee>; + + /// The upper limit here only used to estimate delivery cost + type MaxMessageSize: Get; + + /// To withdraw and deposit an asset. + type AssetTransactor: TransactAsset; + } + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A message was received from Ethereum + MessageReceived { + /// The message channel + channel_id: ChannelId, + /// The message nonce + nonce: u64, + /// ID of the XCM message which was forwarded to the final destination parachain + message_id: [u8; 32], + /// Fee burned for the teleport + fee_burned: BalanceOf, + }, + /// Set OperatingMode + OperatingModeChanged { mode: BasicOperatingMode }, + } + + #[pallet::error] + pub enum Error { + /// Message came from an invalid outbound channel on the Ethereum side. + InvalidGateway, + /// Message has an invalid envelope. + InvalidEnvelope, + /// Message has an unexpected nonce. + InvalidNonce, + /// Message has an invalid payload. + InvalidPayload, + /// Message channel is invalid + InvalidChannel, + /// The max nonce for the type has been reached + MaxNonceReached, + /// Cannot convert location + InvalidAccountConversion, + /// Pallet is halted + Halted, + /// Message verification error, + Verification(VerificationError), + /// XCMP send failure + Send(SendError), + /// Message conversion error + ConvertMessage(ConvertMessageError), + } + + #[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo, PalletError)] + pub enum SendError { + NotApplicable, + NotRoutable, + Transport, + DestinationUnsupported, + ExceedsMaxMessageSize, + MissingArgument, + Fees, + } + + impl From for Error { + fn from(e: XcmpSendError) -> Self { + match e { + XcmpSendError::NotApplicable => Error::::Send(SendError::NotApplicable), + XcmpSendError::Unroutable => Error::::Send(SendError::NotRoutable), + XcmpSendError::Transport(_) => Error::::Send(SendError::Transport), + XcmpSendError::DestinationUnsupported => + Error::::Send(SendError::DestinationUnsupported), + XcmpSendError::ExceedsMaxMessageSize => + Error::::Send(SendError::ExceedsMaxMessageSize), + XcmpSendError::MissingArgument => Error::::Send(SendError::MissingArgument), + XcmpSendError::Fees => Error::::Send(SendError::Fees), + } + } + } + + /// The current nonce for each channel + #[pallet::storage] + pub type Nonce = StorageMap<_, Twox64Concat, ChannelId, u64, ValueQuery>; + + /// The current operating mode of the pallet. + #[pallet::storage] + #[pallet::getter(fn operating_mode)] + pub type OperatingMode = StorageValue<_, BasicOperatingMode, ValueQuery>; + + #[pallet::call] + impl Pallet { + /// Submit an inbound message originating from the Gateway contract on Ethereum + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::submit())] + pub fn submit(origin: OriginFor, message: Message) -> DispatchResult { + let who = ensure_signed(origin)?; + ensure!(!Self::operating_mode().is_halted(), Error::::Halted); + + // submit message to verifier for verification + T::Verifier::verify(&message.event_log, &message.proof) + .map_err(|e| Error::::Verification(e))?; + + // Decode event log into an Envelope + let envelope = + Envelope::try_from(&message.event_log).map_err(|_| Error::::InvalidEnvelope)?; + + // Verify that the message was submitted from the known Gateway contract + ensure!(T::GatewayAddress::get() == envelope.gateway, Error::::InvalidGateway); + + // Retrieve the registered channel for this message + let channel = + T::ChannelLookup::lookup(envelope.channel_id).ok_or(Error::::InvalidChannel)?; + + // Verify message nonce + >::try_mutate(envelope.channel_id, |nonce| -> DispatchResult { + if *nonce == u64::MAX { + return Err(Error::::MaxNonceReached.into()) + } + if envelope.nonce != nonce.saturating_add(1) { + Err(Error::::InvalidNonce.into()) + } else { + *nonce = nonce.saturating_add(1); + Ok(()) + } + })?; + + // Reward relayer from the sovereign account of the destination parachain, only if funds + // are available + let sovereign_account = sibling_sovereign_account::(channel.para_id); + let delivery_cost = Self::calculate_delivery_cost(message.encode().len() as u32); + let amount = T::Token::reducible_balance( + &sovereign_account, + Preservation::Preserve, + Fortitude::Polite, + ) + .min(delivery_cost); + if !amount.is_zero() { + T::Token::transfer(&sovereign_account, &who, amount, Preservation::Preserve)?; + } + + // Decode payload into `VersionedMessage` + let message = VersionedMessage::decode_all(&mut envelope.payload.as_ref()) + .map_err(|_| Error::::InvalidPayload)?; + + // Decode message into XCM + let (xcm, fee) = Self::do_convert(envelope.message_id, message.clone())?; + + log::info!( + target: LOG_TARGET, + "💫 xcm decoded as {:?} with fee {:?}", + xcm, + fee + ); + + // Burning fees for teleport + Self::burn_fees(channel.para_id, fee)?; + + // Attempt to send XCM to a dest parachain + let message_id = Self::send_xcm(xcm, channel.para_id)?; + + Self::deposit_event(Event::MessageReceived { + channel_id: envelope.channel_id, + nonce: envelope.nonce, + message_id, + fee_burned: fee, + }); + + Ok(()) + } + + /// Halt or resume all pallet operations. May only be called by root. + #[pallet::call_index(1)] + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operating_mode( + origin: OriginFor, + mode: BasicOperatingMode, + ) -> DispatchResult { + ensure_root(origin)?; + OperatingMode::::set(mode); + Self::deposit_event(Event::OperatingModeChanged { mode }); + Ok(()) + } + } + + impl Pallet { + pub fn do_convert( + message_id: H256, + message: VersionedMessage, + ) -> Result<(Xcm<()>, BalanceOf), Error> { + let (xcm, fee) = T::MessageConverter::convert(message_id, message) + .map_err(|e| Error::::ConvertMessage(e))?; + Ok((xcm, fee)) + } + + pub fn send_xcm(xcm: Xcm<()>, dest: ParaId) -> Result> { + let dest = Location::new(1, [Parachain(dest.into())]); + let (xcm_hash, _) = send_xcm::(dest, xcm).map_err(Error::::from)?; + Ok(xcm_hash) + } + + pub fn calculate_delivery_cost(length: u32) -> BalanceOf { + let weight_fee = T::WeightToFee::weight_to_fee(&T::WeightInfo::submit()); + let len_fee = T::LengthToFee::weight_to_fee(&Weight::from_parts(length as u64, 0)); + weight_fee + .saturating_add(len_fee) + .saturating_add(T::PricingParameters::get().rewards.local) + } + + /// Burn the amount of the fee embedded into the XCM for teleports + pub fn burn_fees(para_id: ParaId, fee: BalanceOf) -> DispatchResult { + let dummy_context = + XcmContext { origin: None, message_id: Default::default(), topic: None }; + let dest = Location::new(1, [Parachain(para_id.into())]); + let fees = (Location::parent(), fee.saturated_into::()).into(); + T::AssetTransactor::can_check_out(&dest, &fees, &dummy_context).map_err(|error| { + log::error!( + target: LOG_TARGET, + "XCM asset check out failed with error {:?}", error + ); + TokenError::FundsUnavailable + })?; + T::AssetTransactor::check_out(&dest, &fees, &dummy_context); + T::AssetTransactor::withdraw_asset(&fees, &dest, None).map_err(|error| { + log::error!( + target: LOG_TARGET, + "XCM asset withdraw failed with error {:?}", error + ); + TokenError::FundsUnavailable + })?; + Ok(()) + } + } + + /// API for accessing the delivery cost of a message + impl Get> for Pallet { + fn get() -> BalanceOf { + // Cost here based on MaxMessagePayloadSize(the worst case) + Self::calculate_delivery_cost(T::MaxMessageSize::get()) + } + } +} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs new file mode 100644 index 000000000000..07e0a5564e09 --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use frame_support::{derive_impl, parameter_types, traits::ConstU32, weights::IdentityFee}; +use hex_literal::hex; +use snowbridge_beacon_primitives::{ + types::deneb, BeaconHeader, ExecutionProof, Fork, ForkVersions, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::{ + gwei, + inbound::{Log, Proof, VerificationError}, + meth, Channel, ChannelId, PricingParameters, Rewards, StaticLookup, TokenId, +}; +use snowbridge_router_primitives::inbound::v2::MessageToXcm; +use sp_core::{H160, H256}; +use sp_runtime::{ + traits::{IdentifyAccount, IdentityLookup, MaybeEquivalence, Verify}, + BuildStorage, FixedU128, MultiSignature, +}; +use sp_std::{convert::From, default::Default}; +use xcm::prelude::*; +use xcm_executor::AssetsInHolding; + +use crate::{self as inbound_queue}; + +use xcm::latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system::{Pallet, Call, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + EthereumBeaconClient: snowbridge_pallet_ethereum_client::{Pallet, Call, Storage, Event}, + InboundQueue: inbound_queue::{Pallet, Call, Storage, Event}, + } +); + +pub type Signature = MultiSignature; +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + +type Balance = u128; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type AccountId = AccountId; + type Lookup = IdentityLookup; + type AccountData = pallet_balances::AccountData; + type Block = Block; +} + +parameter_types! { + pub const ExistentialDeposit: u128 = 1; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type Balance = Balance; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} + +parameter_types! { + pub const ChainForkVersions: ForkVersions = ForkVersions{ + genesis: Fork { + version: [0, 0, 0, 1], // 0x00000001 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 0, 1], // 0x01000001 + epoch: 0, + }, + bellatrix: Fork { + version: [2, 0, 0, 1], // 0x02000001 + epoch: 0, + }, + capella: Fork { + version: [3, 0, 0, 1], // 0x03000001 + epoch: 0, + }, + deneb: Fork { + version: [4, 0, 0, 1], // 0x04000001 + epoch: 4294967295, + } + }; +} + +impl snowbridge_pallet_ethereum_client::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type FreeHeadersInterval = ConstU32<32>; + type WeightInfo = (); +} + +// Mock verifier +pub struct MockVerifier; + +impl Verifier for MockVerifier { + fn verify(_: &Log, _: &Proof) -> Result<(), VerificationError> { + Ok(()) + } +} + +const GATEWAY_ADDRESS: [u8; 20] = hex!["eda338e4dc46038493b885327842fd3e301cab39"]; + +parameter_types! { + pub const EthereumNetwork: xcm::v3::NetworkId = xcm::v3::NetworkId::Ethereum { chain_id: 11155111 }; + pub const GatewayAddress: H160 = H160(GATEWAY_ADDRESS); + pub const CreateAssetCall: [u8;2] = [53, 0]; + pub const CreateAssetExecutionFee: u128 = 2_000_000_000; + pub const CreateAssetDeposit: u128 = 100_000_000_000; + pub const SendTokenExecutionFee: u128 = 1_000_000_000; + pub const InitialFund: u128 = 1_000_000_000_000; + pub const InboundQueuePalletInstance: u8 = 80; + pub UniversalLocation: InteriorLocation = + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1002)].into(); + pub AssetHubFromEthereum: Location = Location::new(1,[GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)),Parachain(1000)]); +} + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelper for Test { + // not implemented since the MockVerifier is used for tests + fn initialize_storage(_: BeaconHeader, _: H256) {} +} + +// Mock XCM sender that always succeeds +pub struct MockXcmSender; + +impl SendXcm for MockXcmSender { + type Ticket = Xcm<()>; + + fn validate( + dest: &mut Option, + xcm: &mut Option>, + ) -> SendResult { + if let Some(location) = dest { + match location.unpack() { + (_, [Parachain(1001)]) => return Err(XcmpSendError::NotApplicable), + _ => Ok((xcm.clone().unwrap(), Assets::default())), + } + } else { + Ok((xcm.clone().unwrap(), Assets::default())) + } + } + + fn deliver(xcm: Self::Ticket) -> core::result::Result { + let hash = xcm.using_encoded(sp_io::hashing::blake2_256); + Ok(hash) + } +} + +parameter_types! { + pub const OwnParaId: ParaId = ParaId::new(1013); + pub Parameters: PricingParameters = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: gwei(20), + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(1, 1), + }; +} + +pub const DOT: u128 = 10_000_000_000; + +pub struct MockChannelLookup; +impl StaticLookup for MockChannelLookup { + type Source = ChannelId; + type Target = Channel; + + fn lookup(channel_id: Self::Source) -> Option { + if channel_id != + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into() + { + return None + } + Some(Channel { agent_id: H256::zero(), para_id: ASSET_HUB_PARAID.into() }) + } +} + +pub struct SuccessfulTransactor; +impl TransactAsset for SuccessfulTransactor { + fn can_check_in(_origin: &Location, _what: &Asset, _context: &XcmContext) -> XcmResult { + Ok(()) + } + + fn can_check_out(_dest: &Location, _what: &Asset, _context: &XcmContext) -> XcmResult { + Ok(()) + } + + fn deposit_asset(_what: &Asset, _who: &Location, _context: Option<&XcmContext>) -> XcmResult { + Ok(()) + } + + fn withdraw_asset( + _what: &Asset, + _who: &Location, + _context: Option<&XcmContext>, + ) -> Result { + Ok(AssetsInHolding::default()) + } + + fn internal_transfer_asset( + _what: &Asset, + _from: &Location, + _to: &Location, + _context: &XcmContext, + ) -> Result { + Ok(AssetsInHolding::default()) + } +} + +pub struct MockTokenIdConvert; +impl MaybeEquivalence for MockTokenIdConvert { + fn convert(_id: &TokenId) -> Option { + Some(Location::parent()) + } + fn convert_back(_loc: &Location) -> Option { + None + } +} + +impl inbound_queue::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Verifier = MockVerifier; + type Token = Balances; + type XcmSender = MockXcmSender; + type WeightInfo = (); + type GatewayAddress = GatewayAddress; + type MessageConverter = MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + MockTokenIdConvert, + UniversalLocation, + AssetHubFromEthereum, + >; + type PricingParameters = Parameters; + type ChannelLookup = MockChannelLookup; + #[cfg(feature = "runtime-benchmarks")] + type Helper = Test; + type WeightToFee = IdentityFee; + type LengthToFee = IdentityFee; + type MaxMessageSize = ConstU32<1024>; + type AssetTransactor = SuccessfulTransactor; +} + +pub fn last_events(n: usize) -> Vec { + frame_system::Pallet::::events() + .into_iter() + .rev() + .take(n) + .rev() + .map(|e| e.event) + .collect() +} + +pub fn expect_events(e: Vec) { + assert_eq!(last_events(e.len()), e); +} + +pub fn setup() { + System::set_block_number(1); + Balances::mint_into( + &sibling_sovereign_account::(ASSET_HUB_PARAID.into()), + InitialFund::get(), + ) + .unwrap(); + Balances::mint_into( + &sibling_sovereign_account::(TEMPLATE_PARAID.into()), + InitialFund::get(), + ) + .unwrap(); +} + +pub fn new_tester() -> sp_io::TestExternalities { + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext: sp_io::TestExternalities = storage.into(); + ext.execute_with(setup); + ext +} + +// Generated from smoketests: +// cd smoketests +// ./make-bindings +// cargo test --test register_token -- --nocapture +pub fn mock_event_log() -> Log { + Log { + // gateway address + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + // channel id + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + // message id + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + // Nonce + Payload + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e000f000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), + } +} + +pub fn mock_event_log_invalid_channel() -> Log { + Log { + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + // invalid channel id + hex!("0000000000000000000000000000000000000000000000000000000000000000").into(), + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000001e000f000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d0000").into(), + } +} + +pub fn mock_event_log_invalid_gateway() -> Log { + Log { + // gateway address + address: H160::zero(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + // channel id + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + // message id + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + // Nonce + Payload + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000001e000f000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d0000").into(), + } +} + +pub fn mock_execution_proof() -> ExecutionProof { + ExecutionProof { + header: BeaconHeader::default(), + ancestry_proof: None, + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: Default::default(), + fee_recipient: Default::default(), + state_root: Default::default(), + receipts_root: Default::default(), + logs_bloom: vec![], + prev_randao: Default::default(), + block_number: 0, + gas_limit: 0, + gas_used: 0, + timestamp: 0, + extra_data: vec![], + base_fee_per_gas: Default::default(), + block_hash: Default::default(), + transactions_root: Default::default(), + withdrawals_root: Default::default(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![], + } +} + +pub const ASSET_HUB_PARAID: u32 = 1000u32; +pub const TEMPLATE_PARAID: u32 = 1001u32; diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/test.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/test.rs new file mode 100644 index 000000000000..44f6c0ebc658 --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/src/test.rs @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use frame_support::{assert_noop, assert_ok}; +use hex_literal::hex; +use snowbridge_core::{inbound::Proof, ChannelId}; +use sp_keyring::AccountKeyring as Keyring; +use sp_runtime::DispatchError; +use sp_std::convert::From; + +use crate::{Error, Event as InboundQueueEvent}; + +use crate::mock::*; + +#[test] +fn test_submit_happy_path() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let channel_sovereign = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + + let origin = RuntimeOrigin::signed(relayer.clone()); + + // Submit message + let message = Message { + event_log: mock_event_log(), + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + + let initial_fund = InitialFund::get(); + assert_eq!(Balances::balance(&relayer), 0); + assert_eq!(Balances::balance(&channel_sovereign), initial_fund); + + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + expect_events(vec![InboundQueueEvent::MessageReceived { + channel_id: hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539") + .into(), + nonce: 1, + message_id: [ + 183, 243, 1, 130, 170, 254, 104, 45, 116, 181, 146, 237, 14, 139, 138, 89, 43, 166, + 182, 24, 163, 222, 112, 238, 215, 83, 21, 160, 24, 88, 112, 9, + ], + fee_burned: 110000000000, + } + .into()]); + + let delivery_cost = InboundQueue::calculate_delivery_cost(message.encode().len() as u32); + assert!( + Parameters::get().rewards.local < delivery_cost, + "delivery cost exceeds pure reward" + ); + + assert_eq!(Balances::balance(&relayer), delivery_cost, "relayer was rewarded"); + assert!( + Balances::balance(&channel_sovereign) <= initial_fund - delivery_cost, + "sovereign account paid reward" + ); + }); +} + +#[test] +fn test_submit_xcm_invalid_channel() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Deposit funds into sovereign account of parachain 1001 + let sovereign_account = sibling_sovereign_account::(TEMPLATE_PARAID.into()); + println!("account: {}", sovereign_account); + let _ = Balances::mint_into(&sovereign_account, 10000); + + // Submit message + let message = Message { + event_log: mock_event_log_invalid_channel(), + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_noop!( + InboundQueue::submit(origin.clone(), message.clone()), + Error::::InvalidChannel, + ); + }); +} + +#[test] +fn test_submit_with_invalid_gateway() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Deposit funds into sovereign account of Asset Hub (Statemint) + let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + let _ = Balances::mint_into(&sovereign_account, 10000); + + // Submit message + let message = Message { + event_log: mock_event_log_invalid_gateway(), + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_noop!( + InboundQueue::submit(origin.clone(), message.clone()), + Error::::InvalidGateway + ); + }); +} + +#[test] +fn test_submit_with_invalid_nonce() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Deposit funds into sovereign account of Asset Hub (Statemint) + let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + let _ = Balances::mint_into(&sovereign_account, 10000); + + // Submit message + let message = Message { + event_log: mock_event_log(), + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + + let nonce: u64 = >::get(ChannelId::from(hex!( + "c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539" + ))); + assert_eq!(nonce, 1); + + // Submit the same again + assert_noop!( + InboundQueue::submit(origin.clone(), message.clone()), + Error::::InvalidNonce + ); + }); +} + +#[test] +fn test_submit_no_funds_to_reward_relayers_just_ignore() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Reset balance of sovereign_account to zero first + let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + Balances::set_balance(&sovereign_account, 0); + + // Submit message + let message = Message { + event_log: mock_event_log(), + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + // Check submit successfully in case no funds available + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + }); +} + +#[test] +fn test_set_operating_mode() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + let message = Message { + event_log: mock_event_log(), + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + + assert_ok!(InboundQueue::set_operating_mode( + RuntimeOrigin::root(), + snowbridge_core::BasicOperatingMode::Halted + )); + + assert_noop!(InboundQueue::submit(origin, message), Error::::Halted); + }); +} + +#[test] +fn test_set_operating_mode_root_only() { + new_tester().execute_with(|| { + assert_noop!( + InboundQueue::set_operating_mode( + RuntimeOrigin::signed(Keyring::Bob.into()), + snowbridge_core::BasicOperatingMode::Halted + ), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn test_submit_no_funds_to_reward_relayers_and_ed_preserved() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Reset balance of sovereign account to (ED+1) first + let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + Balances::set_balance(&sovereign_account, ExistentialDeposit::get() + 1); + + // Submit message successfully + let message = Message { + event_log: mock_event_log(), + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + + // Check balance of sovereign account to ED + let amount = Balances::balance(&sovereign_account); + assert_eq!(amount, ExistentialDeposit::get()); + + // Submit another message with nonce set as 2 + let mut event_log = mock_event_log(); + event_log.data[31] = 2; + let message = Message { + event_log, + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + // Check balance of sovereign account as ED does not change + let amount = Balances::balance(&sovereign_account); + assert_eq!(amount, ExistentialDeposit::get()); + }); +} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/weights.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/weights.rs new file mode 100644 index 000000000000..c2c665f40d9e --- /dev/null +++ b/bridges/snowbridge/pallets/inbound-queue-v2/src/weights.rs @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Autogenerated weights for `snowbridge_inbound_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `macbook pro 14 m2`, CPU: `m2-arm64` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for ethereum_beacon_client. +pub trait WeightInfo { + fn submit() -> Weight; +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn submit() -> Weight { + Weight::from_parts(70_000_000, 0) + .saturating_add(Weight::from_parts(0, 3601)) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(2)) + } +} diff --git a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs index 423b92b9fae0..5814886fe355 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs @@ -61,7 +61,7 @@ use snowbridge_core::{ sibling_sovereign_account, BasicOperatingMode, Channel, ChannelId, ParaId, PricingParameters, StaticLookup, }; -use snowbridge_router_primitives::inbound::{ +use snowbridge_router_primitives::inbound::v1::{ ConvertMessage, ConvertMessageError, VersionedMessage, }; use sp_runtime::{traits::Saturating, SaturatedConversion, TokenError}; diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 675d4b691593..82862616466d 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -12,7 +12,7 @@ use snowbridge_core::{ inbound::{Log, Proof, VerificationError}, meth, Channel, ChannelId, PricingParameters, Rewards, StaticLookup, TokenId, }; -use snowbridge_router_primitives::inbound::MessageToXcm; +use snowbridge_router_primitives::inbound::v1::MessageToXcm; use sp_core::{H160, H256}; use sp_runtime::{ traits::{IdentifyAccount, IdentityLookup, MaybeEquivalence, Verify}, diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml new file mode 100644 index 000000000000..560192c759f8 --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml @@ -0,0 +1,92 @@ +[package] +name = "snowbridge-pallet-outbound-queue-v2" +description = "Snowbridge Outbound Queue Pallet V2" +version = "0.2.0" +authors = ["Snowfork "] +edition.workspace = true +repository.workspace = true +license = "Apache-2.0" +categories = ["cryptography::cryptocurrencies"] + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { features = ["alloc", "derive"], workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +alloy-primitives = { features = ["rlp"], workspace = true } +alloy-sol-types = { workspace = true } + +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true } +sp-arithmetic = { workspace = true } + +bridge-hub-common = { workspace = true } + +snowbridge-core = { features = ["serde"], workspace = true } +ethabi = { workspace = true } +hex-literal = { workspace = true, default-features = true } +snowbridge-merkle-tree = { workspace = true } +snowbridge-router-primitives = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } + +[dev-dependencies] +pallet-message-queue = { workspace = true } +sp-keyring = { workspace = true, default-features = true } + +[features] +default = ["std"] +std = [ + "alloy-primitives/std", + "alloy-sol-types/std", + "bridge-hub-common/std", + "codec/std", + "ethabi/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "pallet-message-queue/std", + "scale-info/std", + "serde/std", + "snowbridge-core/std", + "snowbridge-merkle-tree/std", + "snowbridge-router-primitives/std", + "sp-arithmetic/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +runtime-benchmarks = [ + "bridge-hub-common/runtime-benchmarks", + "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "snowbridge-router-primitives/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-message-queue/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/README.md b/bridges/snowbridge/pallets/outbound-queue-v2/README.md new file mode 100644 index 000000000000..19638f90e6a5 --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/README.md @@ -0,0 +1,3 @@ +# Ethereum Outbound Queue + +Sends messages from an origin in the Polkadot ecosystem to Ethereum. diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/Cargo.toml new file mode 100644 index 000000000000..14f4a8d18c19 --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "snowbridge-outbound-queue-runtime-api-v2" +description = "Snowbridge Outbound Queue Runtime API V2" +version = "0.2.0" +authors = ["Snowfork "] +edition.workspace = true +repository.workspace = true +license = "Apache-2.0" +categories = ["cryptography::cryptocurrencies"] + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-std = { workspace = true } +sp-api = { workspace = true } +frame-support = { workspace = true } +snowbridge-core = { workspace = true } +snowbridge-merkle-tree = { workspace = true } +xcm = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "scale-info/std", + "snowbridge-core/std", + "snowbridge-merkle-tree/std", + "sp-api/std", + "sp-std/std", + "xcm/std", +] diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/README.md b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/README.md new file mode 100644 index 000000000000..98ae01fb33da --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/README.md @@ -0,0 +1,6 @@ +# Ethereum Outbound Queue Runtime API + +Provides an API: + +- to generate merkle proofs for outbound messages +- calculate delivery fee for delivering messages to Ethereum diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs new file mode 100644 index 000000000000..26ab7872ff11 --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::traits::tokens::Balance as BalanceT; +use snowbridge_core::outbound::{ + v2::{Fee, InboundMessage}, + DryRunError, +}; +use snowbridge_merkle_tree::MerkleProof; +use xcm::prelude::Xcm; + +sp_api::decl_runtime_apis! { + pub trait OutboundQueueApiV2 where Balance: BalanceT + { + /// Generate a merkle proof for a committed message identified by `leaf_index`. + /// The merkle root is stored in the block header as a + /// `sp_runtime::generic::DigestItem::Other` + fn prove_message(leaf_index: u64) -> Option; + + fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage,Fee),DryRunError>; + } +} diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs new file mode 100644 index 000000000000..f45e15bad647 --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Helpers for implementing runtime api + +use crate::{Config, MessageLeaves}; +use frame_support::storage::StorageStreamIter; +use snowbridge_core::{ + outbound::{ + v2::{CommandWrapper, Fee, GasMeter, InboundMessage, Message}, + DryRunError, + }, + AgentIdOf, +}; +use snowbridge_merkle_tree::{merkle_proof, MerkleProof}; +use snowbridge_router_primitives::outbound::v2::XcmConverter; +use sp_core::Get; +use sp_std::{default::Default, vec::Vec}; +use xcm::{ + latest::Location, + prelude::{Parachain, Xcm}, +}; +use xcm_executor::traits::ConvertLocation; + +pub fn prove_message(leaf_index: u64) -> Option +where + T: Config, +{ + if !MessageLeaves::::exists() { + return None + } + let proof = + merkle_proof::<::Hashing, _>(MessageLeaves::::stream_iter(), leaf_index); + Some(proof) +} + +pub fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage, Fee), DryRunError> +where + T: Config, +{ + let mut converter = XcmConverter::::new( + &xcm, + T::EthereumNetwork::get(), + AgentIdOf::convert_location(&Location::new(1, Parachain(1000))) + .ok_or(DryRunError::ConvertLocationFailed)?, + ); + + let message: Message = converter.convert().map_err(|_| DryRunError::ConvertXcmFailed)?; + + let fee = Fee::from(crate::Pallet::::calculate_local_fee()); + + let commands: Vec = message + .commands + .into_iter() + .map(|command| CommandWrapper { + kind: command.index(), + gas: T::GasMeter::maximum_dispatch_gas_used_at_most(&command), + payload: command.abi_encode(), + }) + .collect(); + + let committed_message = InboundMessage { + origin: message.origin, + nonce: Default::default(), + commands: commands.try_into().map_err(|_| DryRunError::ConvertXcmFailed)?, + }; + + Ok((committed_message, fee)) +} diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/benchmarking.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/benchmarking.rs new file mode 100644 index 000000000000..f6e02844a58d --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/benchmarking.rs @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use bridge_hub_common::AggregateMessageOrigin; +use codec::Encode; +use frame_benchmarking::v2::*; +use snowbridge_core::{ + outbound::v1::{Command, Initializer, QueuedMessage}, + ChannelId, +}; +use sp_core::{H160, H256}; + +#[allow(unused_imports)] +use crate::Pallet as OutboundQueue; + +#[benchmarks( + where + ::MaxMessagePayloadSize: Get, +)] +mod benchmarks { + use super::*; + + /// Benchmark for processing a message. + #[benchmark] + fn do_process_message() -> Result<(), BenchmarkError> { + let enqueued_message = QueuedMessage { + id: H256::zero(), + channel_id: ChannelId::from([1; 32]), + command: Command::Upgrade { + impl_address: H160::zero(), + impl_code_hash: H256::zero(), + initializer: Some(Initializer { + params: [7u8; 256].into_iter().collect(), + maximum_required_gas: 200_000, + }), + }, + }; + let origin = AggregateMessageOrigin::Snowbridge([1; 32].into()); + let encoded_enqueued_message = enqueued_message.encode(); + + #[block] + { + let _ = OutboundQueue::::do_process_message(origin, &encoded_enqueued_message); + } + + assert_eq!(MessageLeaves::::decode_len().unwrap(), 1); + + Ok(()) + } + + /// Benchmark for producing final messages commitment + #[benchmark] + fn commit() -> Result<(), BenchmarkError> { + // Assume worst case, where `MaxMessagesPerBlock` messages need to be committed. + for i in 0..T::MaxMessagesPerBlock::get() { + let leaf_data: [u8; 1] = [i as u8]; + let leaf = ::Hashing::hash(&leaf_data); + MessageLeaves::::append(leaf); + } + + #[block] + { + OutboundQueue::::commit(); + } + + Ok(()) + } + + /// Benchmark for producing commitment for a single message + #[benchmark] + fn commit_single() -> Result<(), BenchmarkError> { + let leaf = ::Hashing::hash(&[100; 1]); + MessageLeaves::::append(leaf); + + #[block] + { + OutboundQueue::::commit(); + } + + Ok(()) + } + + impl_benchmark_test_suite!(OutboundQueue, crate::mock::new_tester(), crate::mock::Test,); +} diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/envelope.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/envelope.rs new file mode 100644 index 000000000000..e0f6ba63291c --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/envelope.rs @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use snowbridge_core::inbound::Log; + +use sp_core::{RuntimeDebug, H160}; +use sp_std::prelude::*; + +use alloy_primitives::B256; +use alloy_sol_types::{sol, SolEvent}; + +sol! { + event InboundMessageDispatched(uint64 indexed nonce, bool success, bytes32 indexed reward_address); +} + +/// An inbound message that has had its outer envelope decoded. +#[derive(Clone, RuntimeDebug)] +pub struct Envelope { + /// The address of the outbound queue on Ethereum that emitted this message as an event log + pub gateway: H160, + /// A nonce for enforcing replay protection and ordering. + pub nonce: u64, + /// Delivery status + pub success: bool, + /// The reward address + pub reward_address: [u8; 32], +} + +#[derive(Copy, Clone, RuntimeDebug)] +pub struct EnvelopeDecodeError; + +impl TryFrom<&Log> for Envelope { + type Error = EnvelopeDecodeError; + + fn try_from(log: &Log) -> Result { + let topics: Vec = log.topics.iter().map(|x| B256::from_slice(x.as_ref())).collect(); + + let event = InboundMessageDispatched::decode_log(topics, &log.data, true) + .map_err(|_| EnvelopeDecodeError)?; + + Ok(Self { + gateway: log.address, + nonce: event.nonce, + success: event.success, + reward_address: event.reward_address.clone().into(), + }) + } +} diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs new file mode 100644 index 000000000000..43fde9528f5d --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Pallet for committing outbound messages for delivery to Ethereum +//! +//! # Overview +//! +//! Messages come either from sibling parachains via XCM, or BridgeHub itself +//! via the `snowbridge-pallet-system`: +//! +//! 1. `snowbridge_router_primitives::outbound::EthereumBlobExporter::deliver` +//! 2. `snowbridge_pallet_system::Pallet::send` +//! +//! The message submission pipeline works like this: +//! 1. The message is first validated via the implementation for +//! [`snowbridge_core::outbound::SendMessage::validate`] +//! 2. The message is then enqueued for later processing via the implementation for +//! [`snowbridge_core::outbound::SendMessage::deliver`] +//! 3. The underlying message queue is implemented by [`Config::MessageQueue`] +//! 4. The message queue delivers messages back to this pallet via the implementation for +//! [`frame_support::traits::ProcessMessage::process_message`] +//! 5. The message is processed in `Pallet::do_process_message`: a. Assigned a nonce b. ABI-encoded, +//! hashed, and stored in the `MessageLeaves` vector +//! 6. At the end of the block, a merkle root is constructed from all the leaves in `MessageLeaves`. +//! 7. This merkle root is inserted into the parachain header as a digest item +//! 8. Offchain relayers are able to relay the message to Ethereum after: a. Generating a merkle +//! proof for the committed message using the `prove_message` runtime API b. Reading the actual +//! message content from the `Messages` vector in storage +//! +//! On the Ethereum side, the message root is ultimately the thing being +//! verified by the Polkadot light client. +//! +//! # Message Priorities +//! +//! The processing of governance commands can never be halted. This effectively +//! allows us to pause processing of normal user messages while still allowing +//! governance commands to be sent to Ethereum. +//! +//! # Fees +//! +//! An upfront fee must be paid for delivering a message. This fee covers several +//! components: +//! 1. The weight of processing the message locally +//! 2. The gas refund paid out to relayers for message submission +//! 3. An additional reward paid out to relayers for message submission +//! +//! Messages are weighed to determine the maximum amount of gas they could +//! consume on Ethereum. Using this upper bound, a final fee can be calculated. +//! +//! The fee calculation also requires the following parameters: +//! * Average ETH/DOT exchange rate over some period +//! * Max fee per unit of gas that bridge is willing to refund relayers for +//! +//! By design, it is expected that governance should manually update these +//! parameters every few weeks using the `set_pricing_parameters` extrinsic in the +//! system pallet. +//! +//! This is an interim measure. Once ETH/DOT liquidity pools are available in the Polkadot network, +//! we'll use them as a source of pricing info, subject to certain safeguards. +//! +//! ## Fee Computation Function +//! +//! ```text +//! LocalFee(Message) = WeightToFee(ProcessMessageWeight(Message)) +//! RemoteFee(Message) = MaxGasRequired(Message) * Params.MaxFeePerGas + Params.Reward +//! RemoteFeeAdjusted(Message) = Params.Multiplier * (RemoteFee(Message) / Params.Ratio("ETH/DOT")) +//! Fee(Message) = LocalFee(Message) + RemoteFeeAdjusted(Message) +//! ``` +//! +//! By design, the computed fee includes a safety factor (the `Multiplier`) to cover +//! unfavourable fluctuations in the ETH/DOT exchange rate. +//! +//! ## Fee Settlement +//! +//! On the remote side, in the gateway contract, the relayer accrues +//! +//! ```text +//! Min(GasPrice, Message.MaxFeePerGas) * GasUsed() + Message.Reward +//! ``` +//! Or in plain english, relayers are refunded for gas consumption, using a +//! price that is a minimum of the actual gas price, or `Message.MaxFeePerGas`. +//! +//! # Extrinsics +//! +//! * [`Call::set_operating_mode`]: Set the operating mode +//! +//! # Runtime API +//! +//! * `prove_message`: Generate a merkle proof for a committed message +//! * `calculate_fee`: Calculate the delivery fee for a message +#![cfg_attr(not(feature = "std"), no_std)] +pub mod api; +pub mod envelope; +pub mod process_message_impl; +pub mod send_message_impl; +pub mod types; +pub mod weights; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod test; + +use bridge_hub_common::{AggregateMessageOrigin, CustomDigestItem}; +use codec::Decode; +use envelope::Envelope; +use frame_support::{ + storage::StorageStreamIter, + traits::{tokens::Balance, EnqueueMessage, Get, ProcessMessageError}, + weights::{Weight, WeightToFee}, +}; +use snowbridge_core::{ + inbound::Message as DeliveryMessage, + outbound::v2::{CommandWrapper, Fee, GasMeter, InboundMessage, InboundMessageWrapper, Message}, + BasicOperatingMode, RewardLedger, TokenId, +}; +use snowbridge_merkle_tree::merkle_root; +use sp_core::H256; +use sp_runtime::{ + traits::{BlockNumberProvider, Hash}, + ArithmeticError, DigestItem, +}; +use sp_std::prelude::*; +pub use types::{PendingOrder, ProcessMessageOriginOf}; +pub use weights::WeightInfo; + +pub use pallet::*; + +use alloy_sol_types::SolValue; + +use alloy_primitives::FixedBytes; + +use sp_runtime::traits::TrailingZeroInput; + +use sp_runtime::traits::MaybeEquivalence; + +use xcm::prelude::{Location, NetworkId}; + +use snowbridge_core::inbound::{VerificationError, Verifier}; + +use sp_core::H160; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + type Hashing: Hash; + + type MessageQueue: EnqueueMessage; + + /// Measures the maximum gas used to execute a command on Ethereum + type GasMeter: GasMeter; + + type Balance: Balance + From; + + /// Max bytes in a message payload + #[pallet::constant] + type MaxMessagePayloadSize: Get; + + /// Max number of messages processed per block + #[pallet::constant] + type MaxMessagesPerBlock: Get; + + /// Convert a weight value into a deductible fee based. + type WeightToFee: WeightToFee; + + /// Weight information for extrinsics in this pallet + type WeightInfo: WeightInfo; + + /// The verifier for delivery proof from Ethereum + type Verifier: Verifier; + + /// Address of the Gateway contract + #[pallet::constant] + type GatewayAddress: Get; + + /// Reward leger + type RewardLedger: RewardLedger<::AccountId, Self::Balance>; + + type ConvertAssetId: MaybeEquivalence; + + type EthereumNetwork: Get; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Message has been queued and will be processed in the future + MessageQueued { + /// The message + message: Message, + }, + /// Message will be committed at the end of current block. From now on, to track the + /// progress the message, use the `nonce` of `id`. + MessageAccepted { + /// ID of the message + id: H256, + /// The nonce assigned to this message + nonce: u64, + }, + /// Some messages have been committed + MessagesCommitted { + /// Merkle root of the committed messages + root: H256, + /// number of committed messages + count: u64, + }, + /// Set OperatingMode + OperatingModeChanged { mode: BasicOperatingMode }, + /// Delivery Proof received + MessageDeliveryProofReceived { nonce: u64 }, + } + + #[pallet::error] + pub enum Error { + /// The message is too large + MessageTooLarge, + /// The pallet is halted + Halted, + /// Invalid Channel + InvalidChannel, + /// Invalid Envelope + InvalidEnvelope, + /// Message verification error + Verification(VerificationError), + /// Invalid Gateway + InvalidGateway, + /// No pending nonce + PendingNonceNotExist, + } + + /// Messages to be committed in the current block. This storage value is killed in + /// `on_initialize`, so should never go into block PoV. + /// + /// Is never read in the runtime, only by offchain message relayers. + /// + /// Inspired by the `frame_system::Pallet::Events` storage value + #[pallet::storage] + #[pallet::unbounded] + pub(super) type Messages = StorageValue<_, Vec, ValueQuery>; + + /// Hashes of the ABI-encoded messages in the [`Messages`] storage value. Used to generate a + /// merkle root during `on_finalize`. This storage value is killed in + /// `on_initialize`, so should never go into block PoV. + #[pallet::storage] + #[pallet::unbounded] + #[pallet::getter(fn message_leaves)] + pub(super) type MessageLeaves = StorageValue<_, Vec, ValueQuery>; + + /// The current nonce for the messages + #[pallet::storage] + pub type Nonce = StorageValue<_, u64, ValueQuery>; + + /// The current operating mode of the pallet. + #[pallet::storage] + #[pallet::getter(fn operating_mode)] + pub type OperatingMode = StorageValue<_, BasicOperatingMode, ValueQuery>; + + /// Pending orders to relay + #[pallet::storage] + pub type PendingOrders = + StorageMap<_, Identity, u64, PendingOrder>, OptionQuery>; + + #[pallet::hooks] + impl Hooks> for Pallet + where + T::AccountId: AsRef<[u8]>, + { + fn on_initialize(_: BlockNumberFor) -> Weight { + // Remove storage from previous block + Messages::::kill(); + MessageLeaves::::kill(); + // Reserve some weight for the `on_finalize` handler + T::WeightInfo::commit() + } + + fn on_finalize(_: BlockNumberFor) { + Self::commit(); + } + } + + #[pallet::call] + impl Pallet { + /// Halt or resume all pallet operations. May only be called by root. + #[pallet::call_index(0)] + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operating_mode( + origin: OriginFor, + mode: BasicOperatingMode, + ) -> DispatchResult { + ensure_root(origin)?; + OperatingMode::::put(mode); + Self::deposit_event(Event::OperatingModeChanged { mode }); + Ok(()) + } + + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::submit_delivery_proof())] + pub fn submit_delivery_proof( + origin: OriginFor, + message: DeliveryMessage, + ) -> DispatchResult { + ensure_signed(origin)?; + ensure!(!Self::operating_mode().is_halted(), Error::::Halted); + + // submit message to verifier for verification + T::Verifier::verify(&message.event_log, &message.proof) + .map_err(|e| Error::::Verification(e))?; + + // Decode event log into an Envelope + let envelope = + Envelope::try_from(&message.event_log).map_err(|_| Error::::InvalidEnvelope)?; + + // Verify that the message was submitted from the known Gateway contract + ensure!(T::GatewayAddress::get() == envelope.gateway, Error::::InvalidGateway); + + let nonce = envelope.nonce; + ensure!(>::contains_key(nonce), Error::::PendingNonceNotExist); + + let order = >::get(nonce).ok_or(Error::::PendingNonceNotExist)?; + let account = T::AccountId::decode(&mut &envelope.reward_address[..]).unwrap_or( + T::AccountId::decode(&mut TrailingZeroInput::zeroes()).expect("zero address"), + ); + // No fee for governance order + if !order.fee.is_zero() { + T::RewardLedger::deposit(account, order.fee.into())?; + } + + >::remove(nonce); + + Self::deposit_event(Event::MessageDeliveryProofReceived { nonce }); + + Ok(()) + } + } + + impl Pallet { + /// Generate a messages commitment and insert it into the header digest + pub(crate) fn commit() { + let count = MessageLeaves::::decode_len().unwrap_or_default() as u64; + if count == 0 { + return + } + + // Create merkle root of messages + let root = merkle_root::<::Hashing, _>(MessageLeaves::::stream_iter()); + + let digest_item: DigestItem = CustomDigestItem::Snowbridge(root).into(); + + // Insert merkle root into the header digest + >::deposit_log(digest_item); + + Self::deposit_event(Event::MessagesCommitted { root, count }); + } + + /// Process a message delivered by the MessageQueue pallet + pub(crate) fn do_process_message( + _: ProcessMessageOriginOf, + mut message: &[u8], + ) -> Result { + use ProcessMessageError::*; + + // Yield if the maximum number of messages has been processed this block. + // This ensures that the weight of `on_finalize` has a known maximum bound. + ensure!( + MessageLeaves::::decode_len().unwrap_or(0) < + T::MaxMessagesPerBlock::get() as usize, + Yield + ); + + // Decode bytes into versioned message + let message: Message = Message::decode(&mut message).map_err(|_| Corrupt)?; + + let nonce = Nonce::::get(); + + let commands: Vec = message + .commands + .into_iter() + .map(|command| CommandWrapper { + kind: command.index(), + gas: T::GasMeter::maximum_dispatch_gas_used_at_most(&command), + payload: command.abi_encode(), + }) + .collect(); + + // Construct the final committed message + let inbound_message = InboundMessage { + origin: message.origin, + nonce, + commands: commands.clone().try_into().map_err(|_| Corrupt)?, + }; + + let committed_message = InboundMessageWrapper { + origin: FixedBytes::from(message.origin.as_fixed_bytes()), + nonce, + commands, + }; + + // ABI-encode and hash the prepared message + let message_abi_encoded = committed_message.abi_encode(); + let message_abi_encoded_hash = ::Hashing::hash(&message_abi_encoded); + + Messages::::append(Box::new(inbound_message)); + MessageLeaves::::append(message_abi_encoded_hash); + + >::try_mutate(nonce, |maybe_locked| -> DispatchResult { + let mut locked = maybe_locked.clone().unwrap_or_else(|| PendingOrder { + nonce, + fee: 0, + block_number: frame_system::Pallet::::current_block_number(), + }); + locked.fee = + locked.fee.checked_add(message.fee).ok_or(ArithmeticError::Overflow)?; + *maybe_locked = Some(locked); + Ok(()) + }) + .map_err(|_| Unsupported)?; + + Nonce::::set(nonce.checked_add(1).ok_or(Unsupported)?); + + Self::deposit_event(Event::MessageAccepted { id: message.id, nonce }); + + Ok(true) + } + + /// The local component of the message processing fees in native currency + pub(crate) fn calculate_local_fee() -> T::Balance { + T::WeightToFee::weight_to_fee( + &T::WeightInfo::do_process_message().saturating_add(T::WeightInfo::commit_single()), + ) + } + } +} diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs new file mode 100644 index 000000000000..353747b23a5f --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::*; + +use frame_support::{ + derive_impl, parameter_types, + traits::{Everything, Hooks}, + weights::IdentityFee, + BoundedVec, +}; + +use hex_literal::hex; +use snowbridge_core::{ + gwei, + inbound::{Log, Proof, VerificationError, Verifier}, + meth, + outbound::v2::*, + pricing::{PricingParameters, Rewards}, + ParaId, +}; +use sp_core::{ConstU32, H160, H256}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup, Keccak256}, + AccountId32, BuildStorage, FixedU128, +}; +use sp_std::marker::PhantomData; + +type Block = frame_system::mocking::MockBlock; +type AccountId = AccountId32; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system::{Pallet, Call, Storage, Event}, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event}, + OutboundQueue: crate::{Pallet, Storage, Event}, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type BaseCallFilter = Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type PalletInfo = PalletInfo; + type Nonce = u64; + type Block = Block; +} + +parameter_types! { + pub const HeapSize: u32 = 32 * 1024; + pub const MaxStale: u32 = 32; + pub static ServiceWeight: Option = Some(Weight::from_parts(100, 100)); +} + +impl pallet_message_queue::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type MessageProcessor = OutboundQueue; + type Size = u32; + type QueueChangeHandler = (); + type HeapSize = HeapSize; + type MaxStale = MaxStale; + type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = (); + type QueuePausedQuery = (); +} + +// Mock verifier +pub struct MockVerifier; + +impl Verifier for MockVerifier { + fn verify(_: &Log, _: &Proof) -> Result<(), VerificationError> { + Ok(()) + } +} + +const GATEWAY_ADDRESS: [u8; 20] = hex!["eda338e4dc46038493b885327842fd3e301cab39"]; +const WETH: [u8; 20] = hex!["C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"]; +const ASSET_HUB_AGENT: [u8; 32] = + hex!["81c5ab2571199e3188135178f3c2c8e2d268be1313d029b30f534fa579b69b79"]; + +parameter_types! { + pub const OwnParaId: ParaId = ParaId::new(1013); + pub Parameters: PricingParameters = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: gwei(20), + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(4, 3), + }; + pub const GatewayAddress: H160 = H160(GATEWAY_ADDRESS); + pub EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; + +} + +pub const DOT: u128 = 10_000_000_000; +impl crate::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Verifier = MockVerifier; + type GatewayAddress = GatewayAddress; + type Hashing = Keccak256; + type MessageQueue = MessageQueue; + type MaxMessagePayloadSize = ConstU32<1024>; + type MaxMessagesPerBlock = ConstU32<20>; + type GasMeter = ConstantGasMeter; + type Balance = u128; + type WeightToFee = IdentityFee; + type WeightInfo = (); + type RewardLedger = (); + type ConvertAssetId = (); + type EthereumNetwork = EthereumNetwork; +} + +fn setup() { + System::set_block_number(1); +} + +pub fn new_tester() -> sp_io::TestExternalities { + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext: sp_io::TestExternalities = storage.into(); + ext.execute_with(setup); + ext +} + +pub fn run_to_end_of_next_block() { + // finish current block + MessageQueue::on_finalize(System::block_number()); + OutboundQueue::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + // start next block + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + OutboundQueue::on_initialize(System::block_number()); + MessageQueue::on_initialize(System::block_number()); + // finish next block + MessageQueue::on_finalize(System::block_number()); + OutboundQueue::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); +} + +pub fn mock_governance_message() -> Message +where + T: Config, +{ + let _marker = PhantomData::; // for clippy + + Message { + origin: primary_governance_origin(), + id: Default::default(), + fee: 0, + commands: BoundedVec::try_from(vec![Command::Upgrade { + impl_address: Default::default(), + impl_code_hash: Default::default(), + initializer: None, + }]) + .unwrap(), + } +} + +// Message should fail validation as it is too large +pub fn mock_invalid_governance_message() -> Message +where + T: Config, +{ + let _marker = PhantomData::; // for clippy + + Message { + origin: Default::default(), + id: Default::default(), + fee: 0, + commands: BoundedVec::try_from(vec![Command::Upgrade { + impl_address: H160::zero(), + impl_code_hash: H256::zero(), + initializer: Some(Initializer { + params: (0..1000).map(|_| 1u8).collect::>(), + maximum_required_gas: 0, + }), + }]) + .unwrap(), + } +} + +pub fn mock_message(sibling_para_id: u32) -> Message { + Message { + origin: H256::from_low_u64_be(sibling_para_id as u64), + id: H256::from_low_u64_be(1), + fee: 1_000, + commands: BoundedVec::try_from(vec![Command::UnlockNativeToken { + agent_id: H256(ASSET_HUB_AGENT), + token: H160(WETH), + recipient: H160(GATEWAY_ADDRESS), + amount: 1_000_000, + }]) + .unwrap(), + } +} diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/process_message_impl.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/process_message_impl.rs new file mode 100644 index 000000000000..731aa6fa6d5c --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/process_message_impl.rs @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Implementation for [`frame_support::traits::ProcessMessage`] +use super::*; +use crate::weights::WeightInfo; +use frame_support::{ + traits::{ProcessMessage, ProcessMessageError}, + weights::WeightMeter, +}; + +impl ProcessMessage for Pallet { + type Origin = AggregateMessageOrigin; + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + _: &mut [u8; 32], + ) -> Result { + let weight = T::WeightInfo::do_process_message(); + if meter.try_consume(weight).is_err() { + return Err(ProcessMessageError::Overweight(weight)) + } + Self::do_process_message(origin, message) + } +} diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs new file mode 100644 index 000000000000..c37cf0dfa530 --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Implementation for [`snowbridge_core::outbound::SendMessage`] +use super::*; +use bridge_hub_common::AggregateMessageOrigin; +use codec::Encode; +use frame_support::{ + ensure, + traits::{EnqueueMessage, Get}, +}; +use snowbridge_core::outbound::{ + v2::{primary_governance_origin, Message, SendMessage}, + SendError, SendMessageFeeProvider, +}; +use sp_core::H256; +use sp_runtime::BoundedVec; + +/// The maximal length of an enqueued message, as determined by the MessageQueue pallet +pub type MaxEnqueuedMessageSizeOf = + <::MessageQueue as EnqueueMessage>::MaxMessageLen; + +impl SendMessage for Pallet +where + T: Config, +{ + type Ticket = Message; + + fn validate( + message: &Message, + ) -> Result<(Self::Ticket, Fee<::Balance>), SendError> { + // The inner payload should not be too large + let payload = message.encode(); + ensure!( + payload.len() < T::MaxMessagePayloadSize::get() as usize, + SendError::MessageTooLarge + ); + + let fee = Fee::from(Self::calculate_local_fee()); + + Ok((message.clone(), fee)) + } + + fn deliver(ticket: Self::Ticket) -> Result { + let origin = AggregateMessageOrigin::SnowbridgeV2(ticket.origin); + + if ticket.origin != primary_governance_origin() { + ensure!(!Self::operating_mode().is_halted(), SendError::Halted); + } + + let message = + BoundedVec::try_from(ticket.encode()).map_err(|_| SendError::MessageTooLarge)?; + + T::MessageQueue::enqueue_message(message.as_bounded_slice(), origin); + Self::deposit_event(Event::MessageQueued { message: ticket.clone() }); + Ok(ticket.id) + } +} + +impl SendMessageFeeProvider for Pallet { + type Balance = T::Balance; + + /// The local component of the message processing fees in native currency + fn local_fee() -> Self::Balance { + Self::calculate_local_fee() + } +} diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs new file mode 100644 index 000000000000..b4d70e37a9e4 --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs @@ -0,0 +1,273 @@ +use alloy_primitives::FixedBytes; +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate::{mock::*, *}; + +use frame_support::{ + assert_err, assert_noop, assert_ok, + traits::{Hooks, ProcessMessage, ProcessMessageError}, + weights::WeightMeter, + BoundedVec, +}; + +use codec::Encode; +use snowbridge_core::{ + outbound::{ + v2::{primary_governance_origin, Command, InboundMessageWrapper, SendMessage}, + SendError, + }, + ChannelId, ParaId, +}; +use sp_core::{hexdisplay::HexDisplay, H256}; + +#[test] +fn submit_messages_and_commit() { + new_tester().execute_with(|| { + for para_id in 1000..1004 { + let message = mock_message(para_id); + let (ticket, _) = OutboundQueue::validate(&message).unwrap(); + assert_ok!(OutboundQueue::deliver(ticket)); + } + + ServiceWeight::set(Some(Weight::MAX)); + run_to_end_of_next_block(); + + assert_eq!(Nonce::::get(), 4); + + let digest = System::digest(); + let digest_items = digest.logs(); + assert!(digest_items.len() == 1 && digest_items[0].as_other().is_some()); + assert_eq!(Messages::::decode_len(), Some(4)); + }); +} + +#[test] +fn submit_message_fail_too_large() { + new_tester().execute_with(|| { + let message = mock_invalid_governance_message::(); + assert_err!(OutboundQueue::validate(&message), SendError::MessageTooLarge); + }); +} + +#[test] +fn commit_exits_early_if_no_processed_messages() { + new_tester().execute_with(|| { + // on_finalize should do nothing, nor should it panic + OutboundQueue::on_finalize(System::block_number()); + + let digest = System::digest(); + let digest_items = digest.logs(); + assert_eq!(digest_items.len(), 0); + }); +} + +#[test] +fn process_message_yields_on_max_messages_per_block() { + new_tester().execute_with(|| { + for _ in 0..::MaxMessagesPerBlock::get() { + MessageLeaves::::append(H256::zero()) + } + + let _channel_id: ChannelId = ParaId::from(1000).into(); + let origin = AggregateMessageOrigin::SnowbridgeV2(H256::zero()); + let message = Message { + origin: Default::default(), + id: Default::default(), + fee: 0, + commands: BoundedVec::try_from(vec![Command::Upgrade { + impl_address: Default::default(), + impl_code_hash: Default::default(), + initializer: None, + }]) + .unwrap(), + }; + + let mut meter = WeightMeter::new(); + + assert_noop!( + OutboundQueue::process_message( + message.encode().as_slice(), + origin, + &mut meter, + &mut [0u8; 32] + ), + ProcessMessageError::Yield + ); + }) +} + +#[test] +fn process_message_fails_on_max_nonce_reached() { + new_tester().execute_with(|| { + let sibling_id = 1000; + let _channel_id: ChannelId = ParaId::from(sibling_id).into(); + let origin = AggregateMessageOrigin::SnowbridgeV2(H256::zero()); + let message: Message = mock_message(sibling_id); + + let mut meter = WeightMeter::with_limit(Weight::MAX); + + Nonce::::set(u64::MAX); + + let result = OutboundQueue::process_message( + message.encode().as_slice(), + origin, + &mut meter, + &mut [0u8; 32], + ); + assert_err!(result, ProcessMessageError::Unsupported) + }) +} + +#[test] +fn process_message_fails_on_overweight_message() { + new_tester().execute_with(|| { + let sibling_id = 1000; + let _channel_id: ChannelId = ParaId::from(sibling_id).into(); + let origin = AggregateMessageOrigin::SnowbridgeV2(H256::zero()); + let message: Message = mock_message(sibling_id); + let mut meter = WeightMeter::with_limit(Weight::from_parts(1, 1)); + assert_noop!( + OutboundQueue::process_message( + message.encode().as_slice(), + origin, + &mut meter, + &mut [0u8; 32] + ), + ProcessMessageError::Overweight(::WeightInfo::do_process_message()) + ); + }) +} + +// Governance messages should be able to bypass a halted operating mode +// Other message sends should fail when halted +#[test] +fn submit_upgrade_message_success_when_queue_halted() { + new_tester().execute_with(|| { + // halt the outbound queue + OutboundQueue::set_operating_mode(RuntimeOrigin::root(), BasicOperatingMode::Halted) + .unwrap(); + + // submit a high priority message from bridge_hub should success + let message = mock_governance_message::(); + let (ticket, _) = OutboundQueue::validate(&message).unwrap(); + assert_ok!(OutboundQueue::deliver(ticket)); + + // submit a low priority message from asset_hub will fail as pallet is halted + let message = mock_message(1000); + let (ticket, _) = OutboundQueue::validate(&message).unwrap(); + assert_noop!(OutboundQueue::deliver(ticket), SendError::Halted); + }); +} + +#[test] +fn governance_message_does_not_get_the_chance_to_processed_in_same_block_when_congest_of_low_priority_sibling_messages( +) { + use AggregateMessageOrigin::*; + + let sibling_id: u32 = 1000; + + new_tester().execute_with(|| { + // submit a lot of low priority messages from asset_hub which will need multiple blocks to + // execute(20 messages for each block so 40 required at least 2 blocks) + let max_messages = 40; + for _ in 0..max_messages { + // submit low priority message + let message = mock_message(sibling_id); + let (ticket, _) = OutboundQueue::validate(&message).unwrap(); + OutboundQueue::deliver(ticket).unwrap(); + } + + let footprint = + MessageQueue::footprint(SnowbridgeV2(H256::from_low_u64_be(sibling_id as u64))); + assert_eq!(footprint.storage.count, (max_messages) as u64); + + let message = mock_governance_message::(); + let (ticket, _) = OutboundQueue::validate(&message).unwrap(); + OutboundQueue::deliver(ticket).unwrap(); + + // move to next block + ServiceWeight::set(Some(Weight::MAX)); + run_to_end_of_next_block(); + + // first process 20 messages from sibling channel + let footprint = + MessageQueue::footprint(SnowbridgeV2(H256::from_low_u64_be(sibling_id as u64))); + assert_eq!(footprint.storage.count, 40 - 20); + + // and governance message does not have the chance to execute in same block + let footprint = MessageQueue::footprint(SnowbridgeV2(primary_governance_origin())); + assert_eq!(footprint.storage.count, 1); + + // move to next block + ServiceWeight::set(Some(Weight::MAX)); + run_to_end_of_next_block(); + + // now governance message get executed in this block + let footprint = MessageQueue::footprint(SnowbridgeV2(primary_governance_origin())); + assert_eq!(footprint.storage.count, 0); + + // and this time process 19 messages from sibling channel so we have 1 message left + let footprint = + MessageQueue::footprint(SnowbridgeV2(H256::from_low_u64_be(sibling_id as u64))); + assert_eq!(footprint.storage.count, 1); + + // move to the next block, the last 1 message from sibling channel get executed + ServiceWeight::set(Some(Weight::MAX)); + run_to_end_of_next_block(); + let footprint = + MessageQueue::footprint(SnowbridgeV2(H256::from_low_u64_be(sibling_id as u64))); + assert_eq!(footprint.storage.count, 0); + }); +} + +#[test] +fn encode_digest_item_with_correct_index() { + new_tester().execute_with(|| { + let digest_item: DigestItem = CustomDigestItem::Snowbridge(H256::default()).into(); + let enum_prefix = match digest_item { + DigestItem::Other(data) => data[0], + _ => u8::MAX, + }; + assert_eq!(enum_prefix, 0); + }); +} + +#[test] +fn encode_digest_item() { + new_tester().execute_with(|| { + let digest_item: DigestItem = CustomDigestItem::Snowbridge([5u8; 32].into()).into(); + let digest_item_raw = digest_item.encode(); + assert_eq!(digest_item_raw[0], 0); // DigestItem::Other + assert_eq!(digest_item_raw[2], 0); // CustomDigestItem::Snowbridge + assert_eq!( + digest_item_raw, + [ + 0, 132, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5 + ] + ); + }); +} + +#[test] +fn encode_mock_message() { + let message: Message = mock_message(1000); + let commands: Vec = message + .commands + .into_iter() + .map(|command| CommandWrapper { + kind: command.index(), + gas: ::GasMeter::maximum_dispatch_gas_used_at_most(&command), + payload: command.abi_encode(), + }) + .collect(); + + // print the abi-encoded message and decode with solidity test + let committed_message = InboundMessageWrapper { + origin: FixedBytes::from(message.origin.as_fixed_bytes()), + nonce: 1, + commands, + }; + let message_abi_encoded = committed_message.abi_encode(); + println!("{}", HexDisplay::from(&message_abi_encoded)); +} diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/types.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/types.rs new file mode 100644 index 000000000000..db1f567e42fc --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/types.rs @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use super::Pallet; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::traits::ProcessMessage; +use scale_info::TypeInfo; +pub use snowbridge_merkle_tree::MerkleProof; +use sp_runtime::RuntimeDebug; +use sp_std::prelude::*; + +pub type ProcessMessageOriginOf = as ProcessMessage>::Origin; + +/// Pending order +#[derive(Encode, Decode, TypeInfo, Clone, Eq, PartialEq, RuntimeDebug, MaxEncodedLen)] +pub struct PendingOrder { + /// The nonce used to identify the message + pub nonce: u64, + /// The block number in which the message was committed + pub block_number: BlockNumber, + /// The fee + #[codec(compact)] + pub fee: u128, +} diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/weights.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/weights.rs new file mode 100644 index 000000000000..196cc49a4c4d --- /dev/null +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/weights.rs @@ -0,0 +1,89 @@ + +//! Autogenerated weights for `snowbridge-pallet-outbound-queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-19, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `192.168.1.7`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --pallet=snowbridge-pallet-outbound-queue +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --template +// ../parachain/templates/module-weight-template.hbs +// --output +// ../parachain/pallets/outbound-queue/src/weights.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `snowbridge-pallet-outbound-queue`. +pub trait WeightInfo { + fn do_process_message() -> Weight; + fn commit() -> Weight; + fn commit_single() -> Weight; + fn submit_delivery_proof() -> Weight; +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: EthereumOutboundQueue MessageLeaves (r:1 w:1) + /// Proof Skipped: EthereumOutboundQueue MessageLeaves (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: EthereumOutboundQueue PendingHighPriorityMessageCount (r:1 w:1) + /// Proof: EthereumOutboundQueue PendingHighPriorityMessageCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue Nonce (r:1 w:1) + /// Proof: EthereumOutboundQueue Nonce (max_values: None, max_size: Some(20), added: 2495, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue Messages (r:1 w:1) + /// Proof Skipped: EthereumOutboundQueue Messages (max_values: Some(1), max_size: None, mode: Measured) + fn do_process_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3485` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(39_000_000, 3485) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: EthereumOutboundQueue MessageLeaves (r:1 w:0) + /// Proof Skipped: EthereumOutboundQueue MessageLeaves (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: System Digest (r:1 w:1) + /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) + fn commit() -> Weight { + // Proof Size summary in bytes: + // Measured: `1094` + // Estimated: `2579` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(28_000_000, 2579) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + + fn commit_single() -> Weight { + // Proof Size summary in bytes: + // Measured: `1094` + // Estimated: `2579` + // Minimum execution time: 9_000_000 picoseconds. + Weight::from_parts(9_000_000, 1586) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + + fn submit_delivery_proof() -> Weight { + Weight::from_parts(70_000_000, 0) + .saturating_add(Weight::from_parts(0, 3601)) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(2)) + } +} diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 78546e258daa..5aa10e69a01e 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -31,7 +31,7 @@ sp-arithmetic = { workspace = true } bridge-hub-common = { workspace = true } snowbridge-core = { features = ["serde"], workspace = true } -snowbridge-outbound-queue-merkle-tree = { workspace = true } +snowbridge-merkle-tree = { workspace = true } ethabi = { workspace = true } [dev-dependencies] @@ -51,7 +51,7 @@ std = [ "scale-info/std", "serde/std", "snowbridge-core/std", - "snowbridge-outbound-queue-merkle-tree/std", + "snowbridge-merkle-tree/std", "sp-arithmetic/std", "sp-core/std", "sp-io/std", diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/README.md b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/README.md deleted file mode 100644 index a3afef1d6713..000000000000 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Snowbridge Outbound Queue Merkle Tree - -This crate implements a simple binary Merkle Tree utilities required for inter-op with Ethereum -bridge & Solidity contract. diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index d35bdde5a81e..f050db9378a9 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -19,7 +19,7 @@ codec = { features = ["derive"], workspace = true } sp-std = { workspace = true } sp-api = { workspace = true } frame-support = { workspace = true } -snowbridge-outbound-queue-merkle-tree = { workspace = true } +snowbridge-merkle-tree = { workspace = true } snowbridge-core = { workspace = true } [features] @@ -28,7 +28,7 @@ std = [ "codec/std", "frame-support/std", "snowbridge-core/std", - "snowbridge-outbound-queue-merkle-tree/std", + "snowbridge-merkle-tree/std", "sp-api/std", "sp-std/std", ] diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs index e6ddaa439352..ecd2de682268 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs @@ -4,10 +4,10 @@ use frame_support::traits::tokens::Balance as BalanceT; use snowbridge_core::{ - outbound::{Command, Fee}, + outbound::v1::{Command, Fee}, PricingParameters, }; -use snowbridge_outbound_queue_merkle_tree::MerkleProof; +use snowbridge_merkle_tree::MerkleProof; sp_api::decl_runtime_apis! { pub trait OutboundQueueApi where Balance: BalanceT diff --git a/bridges/snowbridge/pallets/outbound-queue/src/api.rs b/bridges/snowbridge/pallets/outbound-queue/src/api.rs index b904819b1b18..08f4f1561968 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/api.rs @@ -5,10 +5,10 @@ use crate::{Config, MessageLeaves}; use frame_support::storage::StorageStreamIter; use snowbridge_core::{ - outbound::{Command, Fee, GasMeter}, + outbound::v1::{Command, Fee, GasMeter}, PricingParameters, }; -use snowbridge_outbound_queue_merkle_tree::{merkle_proof, MerkleProof}; +use snowbridge_merkle_tree::{merkle_proof, MerkleProof}; use sp_core::Get; pub fn prove_message(leaf_index: u64) -> Option diff --git a/bridges/snowbridge/pallets/outbound-queue/src/benchmarking.rs b/bridges/snowbridge/pallets/outbound-queue/src/benchmarking.rs index ee5754e86962..0eff490b1ae4 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/benchmarking.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/benchmarking.rs @@ -6,7 +6,7 @@ use bridge_hub_common::AggregateMessageOrigin; use codec::Encode; use frame_benchmarking::v2::*; use snowbridge_core::{ - outbound::{Command, Initializer}, + outbound::v1::{Command, Initializer}, ChannelId, }; use sp_core::{H160, H256}; diff --git a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs index 9b9dbe854a5e..0d43519167af 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs @@ -111,11 +111,10 @@ use frame_support::{ weights::{Weight, WeightToFee}, }; use snowbridge_core::{ - outbound::{Fee, GasMeter, QueuedMessage, VersionedQueuedMessage, ETHER_DECIMALS}, + outbound::v1::{Fee, GasMeter, QueuedMessage, VersionedQueuedMessage, ETHER_DECIMALS}, BasicOperatingMode, ChannelId, }; -use snowbridge_outbound_queue_merkle_tree::merkle_root; -pub use snowbridge_outbound_queue_merkle_tree::MerkleProof; +use snowbridge_merkle_tree::merkle_root; use sp_core::{H256, U256}; use sp_runtime::{ traits::{CheckedDiv, Hash}, diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index 0b34893333e4..d7bc4a8bcb5d 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -10,7 +10,7 @@ use frame_support::{ use snowbridge_core::{ gwei, meth, - outbound::*, + outbound::v1::*, pricing::{PricingParameters, Rewards}, ParaId, PRIMARY_GOVERNANCE_CHANNEL, }; diff --git a/bridges/snowbridge/pallets/outbound-queue/src/send_message_impl.rs b/bridges/snowbridge/pallets/outbound-queue/src/send_message_impl.rs index 03be61819973..39b41b1c792a 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/send_message_impl.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/send_message_impl.rs @@ -12,8 +12,8 @@ use frame_support::{ use frame_system::unique; use snowbridge_core::{ outbound::{ - Fee, Message, QueuedMessage, SendError, SendMessage, SendMessageFeeProvider, - VersionedQueuedMessage, + v1::{Fee, Message, QueuedMessage, SendMessage, VersionedQueuedMessage}, + SendError, SendMessageFeeProvider, }, ChannelId, PRIMARY_GOVERNANCE_CHANNEL, }; diff --git a/bridges/snowbridge/pallets/outbound-queue/src/test.rs b/bridges/snowbridge/pallets/outbound-queue/src/test.rs index 4e9ea36e24bc..36227817f368 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/test.rs @@ -10,7 +10,10 @@ use frame_support::{ use codec::Encode; use snowbridge_core::{ - outbound::{Command, SendError, SendMessage}, + outbound::{ + v1::{Command, SendMessage}, + SendError, + }, ParaId, PricingParameters, Rewards, }; use sp_arithmetic::FixedU128; diff --git a/bridges/snowbridge/pallets/outbound-queue/src/types.rs b/bridges/snowbridge/pallets/outbound-queue/src/types.rs index f65a15d3d2f9..76d32fab462a 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/types.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/types.rs @@ -4,15 +4,13 @@ use codec::{Decode, Encode}; use ethabi::Token; use frame_support::traits::ProcessMessage; use scale_info::TypeInfo; +use snowbridge_core::ChannelId; use sp_core::H256; use sp_runtime::RuntimeDebug; use sp_std::prelude::*; use super::Pallet; -use snowbridge_core::ChannelId; -pub use snowbridge_outbound_queue_merkle_tree::MerkleProof; - pub type ProcessMessageOriginOf = as ProcessMessage>::Origin; pub const LOG_TARGET: &str = "snowbridge-outbound-queue"; diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index eb3da095fe85..52cc28b7de75 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -68,7 +68,11 @@ use frame_support::{ use frame_system::pallet_prelude::*; use snowbridge_core::{ meth, - outbound::{Command, Initializer, Message, OperatingMode, SendError, SendMessage}, + outbound::{ + v1::{Command, Initializer, Message, SendMessage}, + v2::{Command as CommandV2, Message as MessageV2, SendMessage as SendMessageV2}, + OperatingMode, SendError, + }, sibling_sovereign_account, AgentId, AssetMetadata, Channel, ChannelId, ParaId, PricingParameters as PricingParametersRecord, TokenId, TokenIdOf, PRIMARY_GOVERNANCE_CHANNEL, SECONDARY_GOVERNANCE_CHANNEL, @@ -137,7 +141,7 @@ where #[frame_support::pallet] pub mod pallet { use frame_support::dispatch::PostDispatchInfo; - use snowbridge_core::StaticLookup; + use snowbridge_core::{outbound::v2::second_governance_origin, StaticLookup}; use sp_core::U256; use super::*; @@ -152,6 +156,8 @@ pub mod pallet { /// Send messages to Ethereum type OutboundQueue: SendMessage>; + type OutboundQueueV2: SendMessageV2>; + /// Origin check for XCM locations that can create agents type SiblingOrigin: EnsureOrigin; @@ -635,6 +641,34 @@ pub mod pallet { pays_fee: Pays::No, }) } + + /// Registers a Polkadot-native token as a wrapped ERC20 token on Ethereum. + /// Privileged. Can only be called by root. + /// + /// Fee required: No + /// + /// - `origin`: Must be root + /// - `location`: Location of the asset (relative to this chain) + /// - `metadata`: Metadata to include in the instantiated ERC20 contract on Ethereum + #[pallet::call_index(11)] + #[pallet::weight(T::WeightInfo::register_token())] + pub fn register_token_v2( + origin: OriginFor, + location: Box, + metadata: AssetMetadata, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + + let location: Location = + (*location).try_into().map_err(|_| Error::::UnsupportedLocationVersion)?; + + Self::do_register_token_v2(&location, metadata, PaysFee::::No)?; + + Ok(PostDispatchInfo { + actual_weight: Some(T::WeightInfo::register_token()), + pays_fee: Pays::No, + }) + } } impl Pallet { @@ -760,6 +794,72 @@ pub mod pallet { Ok(()) } + + pub(crate) fn do_register_token_v2( + location: &Location, + metadata: AssetMetadata, + pays_fee: PaysFee, + ) -> Result<(), DispatchError> { + let ethereum_location = T::EthereumLocation::get(); + // reanchor to Ethereum context + let location = location + .clone() + .reanchored(ðereum_location, &T::UniversalLocation::get()) + .map_err(|_| Error::::LocationConversionFailed)?; + + let token_id = TokenIdOf::convert_location(&location) + .ok_or(Error::::LocationConversionFailed)?; + + if !ForeignToNativeId::::contains_key(token_id) { + NativeToForeignId::::insert(location.clone(), token_id); + ForeignToNativeId::::insert(token_id, location.clone()); + } + + let command = CommandV2::RegisterForeignToken { + token_id, + name: metadata.name.into_inner(), + symbol: metadata.symbol.into_inner(), + decimals: metadata.decimals, + }; + Self::send_v2(second_governance_origin(), command, pays_fee)?; + + Self::deposit_event(Event::::RegisterToken { + location: location.clone().into(), + foreign_token_id: token_id, + }); + + Ok(()) + } + + /// Send `command` to the Gateway on the Channel identified by `channel_id` + fn send_v2(origin: H256, command: CommandV2, pays_fee: PaysFee) -> DispatchResult { + let message = MessageV2 { + origin, + id: Default::default(), + fee: Default::default(), + commands: BoundedVec::try_from(vec![command]).unwrap(), + }; + + let (ticket, fee) = + T::OutboundQueueV2::validate(&message).map_err(|err| Error::::Send(err))?; + + let payment = match pays_fee { + PaysFee::Yes(account) | PaysFee::Partial(account) => Some((account, fee.total())), + PaysFee::No => None, + }; + + if let Some((payer, fee)) = payment { + T::Token::transfer( + &payer, + &T::TreasuryAccount::get(), + fee, + Preservation::Preserve, + )?; + } + + T::OutboundQueueV2::deliver(ticket).map_err(|err| Error::::Send(err))?; + Ok(()) + } } impl StaticLookup for Pallet { diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index 47b089866a53..f20f8886450f 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -11,8 +11,9 @@ use sp_core::H256; use xcm_executor::traits::ConvertLocation; use snowbridge_core::{ - gwei, meth, outbound::ConstantGasMeter, sibling_sovereign_account, AgentId, AllowSiblingsOnly, - ParaId, PricingParameters, Rewards, + gwei, meth, + outbound::{v1::ConstantGasMeter, v2::DefaultOutboundQueue}, + sibling_sovereign_account, AgentId, AllowSiblingsOnly, ParaId, PricingParameters, Rewards, }; use sp_runtime::{ traits::{AccountIdConversion, BlakeTwo256, IdentityLookup, Keccak256}, @@ -213,6 +214,7 @@ impl crate::Config for Test { type EthereumLocation = EthereumDestination; #[cfg(feature = "runtime-benchmarks")] type Helper = (); + type OutboundQueueV2 = DefaultOutboundQueue; } // Build genesis storage according to the mock runtime. diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index fa37c795b2d1..0e696f0d2256 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -32,6 +32,8 @@ sp-arithmetic = { workspace = true } snowbridge-beacon-primitives = { workspace = true } ethabi = { workspace = true } +alloy-primitives = { features = ["rlp"], workspace = true } +alloy-sol-types = { workspace = true } [dev-dependencies] hex = { workspace = true, default-features = true } @@ -40,6 +42,8 @@ xcm-executor = { workspace = true, default-features = true } [features] default = ["std"] std = [ + "alloy-primitives/std", + "alloy-sol-types/std", "codec/std", "ethabi/std", "frame-support/std", diff --git a/bridges/snowbridge/primitives/core/src/lib.rs b/bridges/snowbridge/primitives/core/src/lib.rs index 7ad129a52542..88ac8124a15b 100644 --- a/bridges/snowbridge/primitives/core/src/lib.rs +++ b/bridges/snowbridge/primitives/core/src/lib.rs @@ -13,6 +13,7 @@ pub mod location; pub mod operating_mode; pub mod outbound; pub mod pricing; +pub mod reward; pub mod ringbuffer; pub use location::{AgentId, AgentIdOf, TokenId, TokenIdOf}; @@ -37,6 +38,8 @@ pub use operating_mode::BasicOperatingMode; pub use pricing::{PricingParameters, Rewards}; +pub use reward::RewardLedger; + pub fn sibling_sovereign_account(para_id: ParaId) -> T::AccountId where T: frame_system::Config, diff --git a/bridges/snowbridge/primitives/core/src/outbound.rs b/bridges/snowbridge/primitives/core/src/outbound.rs deleted file mode 100644 index 77770761822a..000000000000 --- a/bridges/snowbridge/primitives/core/src/outbound.rs +++ /dev/null @@ -1,475 +0,0 @@ -use codec::{Decode, Encode}; -use frame_support::PalletError; -use scale_info::TypeInfo; -use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; -use sp_core::{RuntimeDebug, H256}; -pub use v1::{AgentExecuteCommand, Command, Initializer, Message, OperatingMode, QueuedMessage}; - -/// Enqueued outbound messages need to be versioned to prevent data corruption -/// or loss after forkless runtime upgrades -#[derive(Encode, Decode, TypeInfo, Clone, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(PartialEq))] -pub enum VersionedQueuedMessage { - V1(QueuedMessage), -} - -impl TryFrom for QueuedMessage { - type Error = (); - fn try_from(x: VersionedQueuedMessage) -> Result { - use VersionedQueuedMessage::*; - match x { - V1(x) => Ok(x), - } - } -} - -impl> From for VersionedQueuedMessage { - fn from(x: T) -> Self { - VersionedQueuedMessage::V1(x.into()) - } -} - -mod v1 { - use crate::{pricing::UD60x18, ChannelId}; - use codec::{Decode, Encode}; - use ethabi::Token; - use scale_info::TypeInfo; - use sp_core::{RuntimeDebug, H160, H256, U256}; - use sp_std::{borrow::ToOwned, vec, vec::Vec}; - - /// A message which can be accepted by implementations of `/[`SendMessage`\]` - #[derive(Encode, Decode, TypeInfo, Clone, RuntimeDebug)] - #[cfg_attr(feature = "std", derive(PartialEq))] - pub struct Message { - /// ID for this message. One will be automatically generated if not provided. - /// - /// When this message is created from an XCM message, the ID should be extracted - /// from the `SetTopic` instruction. - /// - /// The ID plays no role in bridge consensus, and is purely meant for message tracing. - pub id: Option, - /// The message channel ID - pub channel_id: ChannelId, - /// The stable ID for a receiving gateway contract - pub command: Command, - } - - /// The operating mode of Channels and Gateway contract on Ethereum. - #[derive(Copy, Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)] - pub enum OperatingMode { - /// Normal operations. Allow sending and receiving messages. - Normal, - /// Reject outbound messages. This allows receiving governance messages but does now allow - /// enqueuing of new messages from the Ethereum side. This can be used to close off an - /// deprecated channel or pause the bridge for upgrade operations. - RejectingOutboundMessages, - } - - /// A command which is executable by the Gateway contract on Ethereum - #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] - #[cfg_attr(feature = "std", derive(PartialEq))] - pub enum Command { - /// Execute a sub-command within an agent for a consensus system in Polkadot - AgentExecute { - /// The ID of the agent - agent_id: H256, - /// The sub-command to be executed - command: AgentExecuteCommand, - }, - /// Upgrade the Gateway contract - Upgrade { - /// Address of the new implementation contract - impl_address: H160, - /// Codehash of the implementation contract - impl_code_hash: H256, - /// Optionally invoke an initializer in the implementation contract - initializer: Option, - }, - /// Create an agent representing a consensus system on Polkadot - CreateAgent { - /// The ID of the agent, derived from the `MultiLocation` of the consensus system on - /// Polkadot - agent_id: H256, - }, - /// Create bidirectional messaging channel to a parachain - CreateChannel { - /// The ID of the channel - channel_id: ChannelId, - /// The agent ID of the parachain - agent_id: H256, - /// Initial operating mode - mode: OperatingMode, - }, - /// Update the configuration of a channel - UpdateChannel { - /// The ID of the channel - channel_id: ChannelId, - /// The new operating mode - mode: OperatingMode, - }, - /// Set the global operating mode of the Gateway contract - SetOperatingMode { - /// The new operating mode - mode: OperatingMode, - }, - /// Transfer ether from an agent contract to a recipient account - TransferNativeFromAgent { - /// The agent ID - agent_id: H256, - /// The recipient of the ether - recipient: H160, - /// The amount to transfer - amount: u128, - }, - /// Set token fees of the Gateway contract - SetTokenTransferFees { - /// The fee(DOT) for the cost of creating asset on AssetHub - create_asset_xcm: u128, - /// The fee(DOT) for the cost of sending asset on AssetHub - transfer_asset_xcm: u128, - /// The fee(Ether) for register token to discourage spamming - register_token: U256, - }, - /// Set pricing parameters - SetPricingParameters { - // ETH/DOT exchange rate - exchange_rate: UD60x18, - // Cost of delivering a message from Ethereum to BridgeHub, in ROC/KSM/DOT - delivery_cost: u128, - // Fee multiplier - multiplier: UD60x18, - }, - /// Transfer ERC20 tokens - TransferNativeToken { - /// ID of the agent - agent_id: H256, - /// Address of the ERC20 token - token: H160, - /// The recipient of the tokens - recipient: H160, - /// The amount of tokens to transfer - amount: u128, - }, - /// Register foreign token from Polkadot - RegisterForeignToken { - /// ID for the token - token_id: H256, - /// Name of the token - name: Vec, - /// Short symbol for the token - symbol: Vec, - /// Number of decimal places - decimals: u8, - }, - /// Mint foreign token from Polkadot - MintForeignToken { - /// ID for the token - token_id: H256, - /// The recipient of the newly minted tokens - recipient: H160, - /// The amount of tokens to mint - amount: u128, - }, - } - - impl Command { - /// Compute the enum variant index - pub fn index(&self) -> u8 { - match self { - Command::AgentExecute { .. } => 0, - Command::Upgrade { .. } => 1, - Command::CreateAgent { .. } => 2, - Command::CreateChannel { .. } => 3, - Command::UpdateChannel { .. } => 4, - Command::SetOperatingMode { .. } => 5, - Command::TransferNativeFromAgent { .. } => 6, - Command::SetTokenTransferFees { .. } => 7, - Command::SetPricingParameters { .. } => 8, - Command::TransferNativeToken { .. } => 9, - Command::RegisterForeignToken { .. } => 10, - Command::MintForeignToken { .. } => 11, - } - } - - /// ABI-encode the Command. - pub fn abi_encode(&self) -> Vec { - match self { - Command::AgentExecute { agent_id, command } => - ethabi::encode(&[Token::Tuple(vec![ - Token::FixedBytes(agent_id.as_bytes().to_owned()), - Token::Bytes(command.abi_encode()), - ])]), - Command::Upgrade { impl_address, impl_code_hash, initializer, .. } => - ethabi::encode(&[Token::Tuple(vec![ - Token::Address(*impl_address), - Token::FixedBytes(impl_code_hash.as_bytes().to_owned()), - initializer - .clone() - .map_or(Token::Bytes(vec![]), |i| Token::Bytes(i.params)), - ])]), - Command::CreateAgent { agent_id } => - ethabi::encode(&[Token::Tuple(vec![Token::FixedBytes( - agent_id.as_bytes().to_owned(), - )])]), - Command::CreateChannel { channel_id, agent_id, mode } => - ethabi::encode(&[Token::Tuple(vec![ - Token::FixedBytes(channel_id.as_ref().to_owned()), - Token::FixedBytes(agent_id.as_bytes().to_owned()), - Token::Uint(U256::from((*mode) as u64)), - ])]), - Command::UpdateChannel { channel_id, mode } => - ethabi::encode(&[Token::Tuple(vec![ - Token::FixedBytes(channel_id.as_ref().to_owned()), - Token::Uint(U256::from((*mode) as u64)), - ])]), - Command::SetOperatingMode { mode } => - ethabi::encode(&[Token::Tuple(vec![Token::Uint(U256::from((*mode) as u64))])]), - Command::TransferNativeFromAgent { agent_id, recipient, amount } => - ethabi::encode(&[Token::Tuple(vec![ - Token::FixedBytes(agent_id.as_bytes().to_owned()), - Token::Address(*recipient), - Token::Uint(U256::from(*amount)), - ])]), - Command::SetTokenTransferFees { - create_asset_xcm, - transfer_asset_xcm, - register_token, - } => ethabi::encode(&[Token::Tuple(vec![ - Token::Uint(U256::from(*create_asset_xcm)), - Token::Uint(U256::from(*transfer_asset_xcm)), - Token::Uint(*register_token), - ])]), - Command::SetPricingParameters { exchange_rate, delivery_cost, multiplier } => - ethabi::encode(&[Token::Tuple(vec![ - Token::Uint(exchange_rate.clone().into_inner()), - Token::Uint(U256::from(*delivery_cost)), - Token::Uint(multiplier.clone().into_inner()), - ])]), - Command::TransferNativeToken { agent_id, token, recipient, amount } => - ethabi::encode(&[Token::Tuple(vec![ - Token::FixedBytes(agent_id.as_bytes().to_owned()), - Token::Address(*token), - Token::Address(*recipient), - Token::Uint(U256::from(*amount)), - ])]), - Command::RegisterForeignToken { token_id, name, symbol, decimals } => - ethabi::encode(&[Token::Tuple(vec![ - Token::FixedBytes(token_id.as_bytes().to_owned()), - Token::String(name.to_owned()), - Token::String(symbol.to_owned()), - Token::Uint(U256::from(*decimals)), - ])]), - Command::MintForeignToken { token_id, recipient, amount } => - ethabi::encode(&[Token::Tuple(vec![ - Token::FixedBytes(token_id.as_bytes().to_owned()), - Token::Address(*recipient), - Token::Uint(U256::from(*amount)), - ])]), - } - } - } - - /// Representation of a call to the initializer of an implementation contract. - /// The initializer has the following ABI signature: `initialize(bytes)`. - #[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] - pub struct Initializer { - /// ABI-encoded params of type `bytes` to pass to the initializer - pub params: Vec, - /// The initializer is allowed to consume this much gas at most. - pub maximum_required_gas: u64, - } - - /// A Sub-command executable within an agent - #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] - #[cfg_attr(feature = "std", derive(PartialEq))] - pub enum AgentExecuteCommand { - /// Transfer ERC20 tokens - TransferToken { - /// Address of the ERC20 token - token: H160, - /// The recipient of the tokens - recipient: H160, - /// The amount of tokens to transfer - amount: u128, - }, - } - - impl AgentExecuteCommand { - fn index(&self) -> u8 { - match self { - AgentExecuteCommand::TransferToken { .. } => 0, - } - } - - /// ABI-encode the sub-command - pub fn abi_encode(&self) -> Vec { - match self { - AgentExecuteCommand::TransferToken { token, recipient, amount } => - ethabi::encode(&[ - Token::Uint(self.index().into()), - Token::Bytes(ethabi::encode(&[ - Token::Address(*token), - Token::Address(*recipient), - Token::Uint(U256::from(*amount)), - ])), - ]), - } - } - } - - /// Message which is awaiting processing in the MessageQueue pallet - #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] - #[cfg_attr(feature = "std", derive(PartialEq))] - pub struct QueuedMessage { - /// Message ID - pub id: H256, - /// Channel ID - pub channel_id: ChannelId, - /// Command to execute in the Gateway contract - pub command: Command, - } -} - -#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -#[cfg_attr(feature = "std", derive(PartialEq))] -/// Fee for delivering message -pub struct Fee -where - Balance: BaseArithmetic + Unsigned + Copy, -{ - /// Fee to cover cost of processing the message locally - pub local: Balance, - /// Fee to cover cost processing the message remotely - pub remote: Balance, -} - -impl Fee -where - Balance: BaseArithmetic + Unsigned + Copy, -{ - pub fn total(&self) -> Balance { - self.local.saturating_add(self.remote) - } -} - -impl From<(Balance, Balance)> for Fee -where - Balance: BaseArithmetic + Unsigned + Copy, -{ - fn from((local, remote): (Balance, Balance)) -> Self { - Self { local, remote } - } -} - -/// A trait for sending messages to Ethereum -pub trait SendMessage: SendMessageFeeProvider { - type Ticket: Clone + Encode + Decode; - - /// Validate an outbound message and return a tuple: - /// 1. Ticket for submitting the message - /// 2. Delivery fee - fn validate( - message: &Message, - ) -> Result<(Self::Ticket, Fee<::Balance>), SendError>; - - /// Submit the message ticket for eventual delivery to Ethereum - fn deliver(ticket: Self::Ticket) -> Result; -} - -pub trait Ticket: Encode + Decode + Clone { - fn message_id(&self) -> H256; -} - -/// A trait for getting the local costs associated with sending a message. -pub trait SendMessageFeeProvider { - type Balance: BaseArithmetic + Unsigned + Copy; - - /// The local component of the message processing fees in native currency - fn local_fee() -> Self::Balance; -} - -/// Reasons why sending to Ethereum could not be initiated -#[derive(Copy, Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, PalletError, TypeInfo)] -pub enum SendError { - /// Message is too large to be safely executed on Ethereum - MessageTooLarge, - /// The bridge has been halted for maintenance - Halted, - /// Invalid Channel - InvalidChannel, -} - -pub trait GasMeter { - /// All the gas used for submitting a message to Ethereum, minus the cost of dispatching - /// the command within the message - const MAXIMUM_BASE_GAS: u64; - - /// Total gas consumed at most, including verification & dispatch - fn maximum_gas_used_at_most(command: &Command) -> u64 { - Self::MAXIMUM_BASE_GAS + Self::maximum_dispatch_gas_used_at_most(command) - } - - /// Measures the maximum amount of gas a command payload will require to *dispatch*, NOT - /// including validation & verification. - fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64; -} - -/// A meter that assigns a constant amount of gas for the execution of a command -/// -/// The gas figures are extracted from this report: -/// > forge test --match-path test/Gateway.t.sol --gas-report -/// -/// A healthy buffer is added on top of these figures to account for: -/// * The EIP-150 63/64 rule -/// * Future EVM upgrades that may increase gas cost -pub struct ConstantGasMeter; - -impl GasMeter for ConstantGasMeter { - // The base transaction cost, which includes: - // 21_000 transaction cost, roughly worst case 64_000 for calldata, and 100_000 - // for message verification - const MAXIMUM_BASE_GAS: u64 = 185_000; - - fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64 { - match command { - Command::CreateAgent { .. } => 275_000, - Command::CreateChannel { .. } => 100_000, - Command::UpdateChannel { .. } => 50_000, - Command::TransferNativeFromAgent { .. } => 60_000, - Command::SetOperatingMode { .. } => 40_000, - Command::AgentExecute { command, .. } => match command { - // Execute IERC20.transferFrom - // - // Worst-case assumptions are important: - // * No gas refund for clearing storage slot of source account in ERC20 contract - // * Assume dest account in ERC20 contract does not yet have a storage slot - // * ERC20.transferFrom possibly does other business logic besides updating balances - AgentExecuteCommand::TransferToken { .. } => 100_000, - }, - Command::Upgrade { initializer, .. } => { - let initializer_max_gas = match *initializer { - Some(Initializer { maximum_required_gas, .. }) => maximum_required_gas, - None => 0, - }; - // total maximum gas must also include the gas used for updating the proxy before - // the the initializer is called. - 50_000 + initializer_max_gas - }, - Command::SetTokenTransferFees { .. } => 60_000, - Command::SetPricingParameters { .. } => 60_000, - Command::TransferNativeToken { .. } => 100_000, - Command::RegisterForeignToken { .. } => 1_200_000, - Command::MintForeignToken { .. } => 100_000, - } - } -} - -impl GasMeter for () { - const MAXIMUM_BASE_GAS: u64 = 1; - - fn maximum_dispatch_gas_used_at_most(_: &Command) -> u64 { - 1 - } -} - -pub const ETHER_DECIMALS: u8 = 18; diff --git a/bridges/snowbridge/primitives/core/src/outbound/mod.rs b/bridges/snowbridge/primitives/core/src/outbound/mod.rs new file mode 100644 index 000000000000..0aa60f479195 --- /dev/null +++ b/bridges/snowbridge/primitives/core/src/outbound/mod.rs @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! # Outbound +//! +//! Common traits and types +use codec::{Decode, Encode}; +use frame_support::PalletError; +use scale_info::TypeInfo; +use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; +use sp_core::RuntimeDebug; + +pub mod v1; +pub mod v2; + +/// The operating mode of Channels and Gateway contract on Ethereum. +#[derive(Copy, Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub enum OperatingMode { + /// Normal operations. Allow sending and receiving messages. + Normal, + /// Reject outbound messages. This allows receiving governance messages but does now allow + /// enqueuing of new messages from the Ethereum side. This can be used to close off an + /// deprecated channel or pause the bridge for upgrade operations. + RejectingOutboundMessages, +} + +/// A trait for getting the local costs associated with sending a message. +pub trait SendMessageFeeProvider { + type Balance: BaseArithmetic + Unsigned + Copy; + + /// The local component of the message processing fees in native currency + fn local_fee() -> Self::Balance; +} + +/// Reasons why sending to Ethereum could not be initiated +#[derive(Copy, Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, PalletError, TypeInfo)] +pub enum SendError { + /// Message is too large to be safely executed on Ethereum + MessageTooLarge, + /// The bridge has been halted for maintenance + Halted, + /// Invalid Channel + InvalidChannel, +} + +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum DryRunError { + ConvertLocationFailed, + ConvertXcmFailed, +} diff --git a/bridges/snowbridge/primitives/core/src/outbound/v1.rs b/bridges/snowbridge/primitives/core/src/outbound/v1.rs new file mode 100644 index 000000000000..037fc21db017 --- /dev/null +++ b/bridges/snowbridge/primitives/core/src/outbound/v1.rs @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! # Outbound V1 primitives + +use crate::{ + outbound::{OperatingMode, SendError, SendMessageFeeProvider}, + pricing::UD60x18, + ChannelId, +}; +use codec::{Decode, Encode}; +use ethabi::Token; +use scale_info::TypeInfo; +use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; +use sp_core::{RuntimeDebug, H160, H256, U256}; +use sp_std::{borrow::ToOwned, vec, vec::Vec}; + +/// Enqueued outbound messages need to be versioned to prevent data corruption +/// or loss after forkless runtime upgrades +#[derive(Encode, Decode, TypeInfo, Clone, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub enum VersionedQueuedMessage { + V1(QueuedMessage), +} + +impl TryFrom for QueuedMessage { + type Error = (); + fn try_from(x: VersionedQueuedMessage) -> Result { + use VersionedQueuedMessage::*; + match x { + V1(x) => Ok(x), + } + } +} + +impl> From for VersionedQueuedMessage { + fn from(x: T) -> Self { + VersionedQueuedMessage::V1(x.into()) + } +} + +/// A message which can be accepted by implementations of `/[`SendMessage`\]` +#[derive(Encode, Decode, TypeInfo, Clone, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub struct Message { + /// ID for this message. One will be automatically generated if not provided. + /// + /// When this message is created from an XCM message, the ID should be extracted + /// from the `SetTopic` instruction. + /// + /// The ID plays no role in bridge consensus, and is purely meant for message tracing. + pub id: Option, + /// The message channel ID + pub channel_id: ChannelId, + /// The stable ID for a receiving gateway contract + pub command: Command, +} + +/// A command which is executable by the Gateway contract on Ethereum +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub enum Command { + /// Execute a sub-command within an agent for a consensus system in Polkadot + AgentExecute { + /// The ID of the agent + agent_id: H256, + /// The sub-command to be executed + command: AgentExecuteCommand, + }, + /// Upgrade the Gateway contract + Upgrade { + /// Address of the new implementation contract + impl_address: H160, + /// Codehash of the implementation contract + impl_code_hash: H256, + /// Optionally invoke an initializer in the implementation contract + initializer: Option, + }, + /// Create an agent representing a consensus system on Polkadot + CreateAgent { + /// The ID of the agent, derived from the `MultiLocation` of the consensus system on + /// Polkadot + agent_id: H256, + }, + /// Create bidirectional messaging channel to a parachain + CreateChannel { + /// The ID of the channel + channel_id: ChannelId, + /// The agent ID of the parachain + agent_id: H256, + /// Initial operating mode + mode: OperatingMode, + }, + /// Update the configuration of a channel + UpdateChannel { + /// The ID of the channel + channel_id: ChannelId, + /// The new operating mode + mode: OperatingMode, + }, + /// Set the global operating mode of the Gateway contract + SetOperatingMode { + /// The new operating mode + mode: OperatingMode, + }, + /// Transfer ether from an agent contract to a recipient account + TransferNativeFromAgent { + /// The agent ID + agent_id: H256, + /// The recipient of the ether + recipient: H160, + /// The amount to transfer + amount: u128, + }, + /// Set token fees of the Gateway contract + SetTokenTransferFees { + /// The fee(DOT) for the cost of creating asset on AssetHub + create_asset_xcm: u128, + /// The fee(DOT) for the cost of sending asset on AssetHub + transfer_asset_xcm: u128, + /// The fee(Ether) for register token to discourage spamming + register_token: U256, + }, + /// Set pricing parameters + SetPricingParameters { + // ETH/DOT exchange rate + exchange_rate: UD60x18, + // Cost of delivering a message from Ethereum to BridgeHub, in ROC/KSM/DOT + delivery_cost: u128, + // Fee multiplier + multiplier: UD60x18, + }, + /// Transfer ERC20 tokens + TransferNativeToken { + /// ID of the agent + agent_id: H256, + /// Address of the ERC20 token + token: H160, + /// The recipient of the tokens + recipient: H160, + /// The amount of tokens to transfer + amount: u128, + }, + /// Register foreign token from Polkadot + RegisterForeignToken { + /// ID for the token + token_id: H256, + /// Name of the token + name: Vec, + /// Short symbol for the token + symbol: Vec, + /// Number of decimal places + decimals: u8, + }, + /// Mint foreign token from Polkadot + MintForeignToken { + /// ID for the token + token_id: H256, + /// The recipient of the newly minted tokens + recipient: H160, + /// The amount of tokens to mint + amount: u128, + }, +} + +impl Command { + /// Compute the enum variant index + pub fn index(&self) -> u8 { + match self { + Command::AgentExecute { .. } => 0, + Command::Upgrade { .. } => 1, + Command::CreateAgent { .. } => 2, + Command::CreateChannel { .. } => 3, + Command::UpdateChannel { .. } => 4, + Command::SetOperatingMode { .. } => 5, + Command::TransferNativeFromAgent { .. } => 6, + Command::SetTokenTransferFees { .. } => 7, + Command::SetPricingParameters { .. } => 8, + Command::TransferNativeToken { .. } => 9, + Command::RegisterForeignToken { .. } => 10, + Command::MintForeignToken { .. } => 11, + } + } + + /// ABI-encode the Command. + pub fn abi_encode(&self) -> Vec { + match self { + Command::AgentExecute { agent_id, command } => ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(agent_id.as_bytes().to_owned()), + Token::Bytes(command.abi_encode()), + ])]), + Command::Upgrade { impl_address, impl_code_hash, initializer, .. } => + ethabi::encode(&[Token::Tuple(vec![ + Token::Address(*impl_address), + Token::FixedBytes(impl_code_hash.as_bytes().to_owned()), + initializer.clone().map_or(Token::Bytes(vec![]), |i| Token::Bytes(i.params)), + ])]), + Command::CreateAgent { agent_id } => + ethabi::encode(&[Token::Tuple(vec![Token::FixedBytes( + agent_id.as_bytes().to_owned(), + )])]), + Command::CreateChannel { channel_id, agent_id, mode } => + ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(channel_id.as_ref().to_owned()), + Token::FixedBytes(agent_id.as_bytes().to_owned()), + Token::Uint(U256::from((*mode) as u64)), + ])]), + Command::UpdateChannel { channel_id, mode } => ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(channel_id.as_ref().to_owned()), + Token::Uint(U256::from((*mode) as u64)), + ])]), + Command::SetOperatingMode { mode } => + ethabi::encode(&[Token::Tuple(vec![Token::Uint(U256::from((*mode) as u64))])]), + Command::TransferNativeFromAgent { agent_id, recipient, amount } => + ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(agent_id.as_bytes().to_owned()), + Token::Address(*recipient), + Token::Uint(U256::from(*amount)), + ])]), + Command::SetTokenTransferFees { + create_asset_xcm, + transfer_asset_xcm, + register_token, + } => ethabi::encode(&[Token::Tuple(vec![ + Token::Uint(U256::from(*create_asset_xcm)), + Token::Uint(U256::from(*transfer_asset_xcm)), + Token::Uint(*register_token), + ])]), + Command::SetPricingParameters { exchange_rate, delivery_cost, multiplier } => + ethabi::encode(&[Token::Tuple(vec![ + Token::Uint(exchange_rate.clone().into_inner()), + Token::Uint(U256::from(*delivery_cost)), + Token::Uint(multiplier.clone().into_inner()), + ])]), + Command::TransferNativeToken { agent_id, token, recipient, amount } => + ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(agent_id.as_bytes().to_owned()), + Token::Address(*token), + Token::Address(*recipient), + Token::Uint(U256::from(*amount)), + ])]), + Command::RegisterForeignToken { token_id, name, symbol, decimals } => + ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(token_id.as_bytes().to_owned()), + Token::String(name.to_owned()), + Token::String(symbol.to_owned()), + Token::Uint(U256::from(*decimals)), + ])]), + Command::MintForeignToken { token_id, recipient, amount } => + ethabi::encode(&[Token::Tuple(vec![ + Token::FixedBytes(token_id.as_bytes().to_owned()), + Token::Address(*recipient), + Token::Uint(U256::from(*amount)), + ])]), + } + } +} + +/// Representation of a call to the initializer of an implementation contract. +/// The initializer has the following ABI signature: `initialize(bytes)`. +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct Initializer { + /// ABI-encoded params of type `bytes` to pass to the initializer + pub params: Vec, + /// The initializer is allowed to consume this much gas at most. + pub maximum_required_gas: u64, +} + +/// A Sub-command executable within an agent +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub enum AgentExecuteCommand { + /// Transfer ERC20 tokens + TransferToken { + /// Address of the ERC20 token + token: H160, + /// The recipient of the tokens + recipient: H160, + /// The amount of tokens to transfer + amount: u128, + }, +} + +impl AgentExecuteCommand { + fn index(&self) -> u8 { + match self { + AgentExecuteCommand::TransferToken { .. } => 0, + } + } + + /// ABI-encode the sub-command + pub fn abi_encode(&self) -> Vec { + match self { + AgentExecuteCommand::TransferToken { token, recipient, amount } => ethabi::encode(&[ + Token::Uint(self.index().into()), + Token::Bytes(ethabi::encode(&[ + Token::Address(*token), + Token::Address(*recipient), + Token::Uint(U256::from(*amount)), + ])), + ]), + } + } +} + +/// Message which is awaiting processing in the MessageQueue pallet +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub struct QueuedMessage { + /// Message ID + pub id: H256, + /// Channel ID + pub channel_id: ChannelId, + /// Command to execute in the Gateway contract + pub command: Command, +} + +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] +/// Fee for delivering message +pub struct Fee +where + Balance: BaseArithmetic + Unsigned + Copy, +{ + /// Fee to cover cost of processing the message locally + pub local: Balance, + /// Fee to cover cost processing the message remotely + pub remote: Balance, +} + +impl Fee +where + Balance: BaseArithmetic + Unsigned + Copy, +{ + pub fn total(&self) -> Balance { + self.local.saturating_add(self.remote) + } +} + +impl From<(Balance, Balance)> for Fee +where + Balance: BaseArithmetic + Unsigned + Copy, +{ + fn from((local, remote): (Balance, Balance)) -> Self { + Self { local, remote } + } +} + +/// A trait for sending messages to Ethereum +pub trait SendMessage: SendMessageFeeProvider { + type Ticket: Clone + Encode + Decode; + + /// Validate an outbound message and return a tuple: + /// 1. Ticket for submitting the message + /// 2. Delivery fee + fn validate( + message: &Message, + ) -> Result<(Self::Ticket, Fee<::Balance>), SendError>; + + /// Submit the message ticket for eventual delivery to Ethereum + fn deliver(ticket: Self::Ticket) -> Result; +} + +pub trait Ticket: Encode + Decode + Clone { + fn message_id(&self) -> H256; +} + +pub trait GasMeter { + /// All the gas used for submitting a message to Ethereum, minus the cost of dispatching + /// the command within the message + const MAXIMUM_BASE_GAS: u64; + + /// Total gas consumed at most, including verification & dispatch + fn maximum_gas_used_at_most(command: &Command) -> u64 { + Self::MAXIMUM_BASE_GAS + Self::maximum_dispatch_gas_used_at_most(command) + } + + /// Measures the maximum amount of gas a command payload will require to *dispatch*, NOT + /// including validation & verification. + fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64; +} + +/// A meter that assigns a constant amount of gas for the execution of a command +/// +/// The gas figures are extracted from this report: +/// > forge test --match-path test/Gateway.t.sol --gas-report +/// +/// A healthy buffer is added on top of these figures to account for: +/// * The EIP-150 63/64 rule +/// * Future EVM upgrades that may increase gas cost +pub struct ConstantGasMeter; + +impl GasMeter for ConstantGasMeter { + // The base transaction cost, which includes: + // 21_000 transaction cost, roughly worst case 64_000 for calldata, and 100_000 + // for message verification + const MAXIMUM_BASE_GAS: u64 = 185_000; + + fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64 { + match command { + Command::CreateAgent { .. } => 275_000, + Command::CreateChannel { .. } => 100_000, + Command::UpdateChannel { .. } => 50_000, + Command::TransferNativeFromAgent { .. } => 60_000, + Command::SetOperatingMode { .. } => 40_000, + Command::AgentExecute { command, .. } => match command { + // Execute IERC20.transferFrom + // + // Worst-case assumptions are important: + // * No gas refund for clearing storage slot of source account in ERC20 contract + // * Assume dest account in ERC20 contract does not yet have a storage slot + // * ERC20.transferFrom possibly does other business logic besides updating balances + AgentExecuteCommand::TransferToken { .. } => 100_000, + }, + Command::Upgrade { initializer, .. } => { + let initializer_max_gas = match *initializer { + Some(Initializer { maximum_required_gas, .. }) => maximum_required_gas, + None => 0, + }; + // total maximum gas must also include the gas used for updating the proxy before + // the the initializer is called. + 50_000 + initializer_max_gas + }, + Command::SetTokenTransferFees { .. } => 60_000, + Command::SetPricingParameters { .. } => 60_000, + Command::TransferNativeToken { .. } => 100_000, + Command::RegisterForeignToken { .. } => 1_200_000, + Command::MintForeignToken { .. } => 100_000, + } + } +} + +impl GasMeter for () { + const MAXIMUM_BASE_GAS: u64 = 1; + + fn maximum_dispatch_gas_used_at_most(_: &Command) -> u64 { + 1 + } +} + +pub const ETHER_DECIMALS: u8 = 18; diff --git a/bridges/snowbridge/primitives/core/src/outbound/v2.rs b/bridges/snowbridge/primitives/core/src/outbound/v2.rs new file mode 100644 index 000000000000..4443a6ea5297 --- /dev/null +++ b/bridges/snowbridge/primitives/core/src/outbound/v2.rs @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! # Outbound V2 primitives + +use crate::outbound::{OperatingMode, SendError, SendMessageFeeProvider}; +use alloy_sol_types::sol; +use codec::{Decode, Encode}; +use frame_support::{pallet_prelude::ConstU32, BoundedVec}; +use hex_literal::hex; +use scale_info::TypeInfo; +use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; +use sp_core::{RuntimeDebug, H160, H256}; +use sp_std::{vec, vec::Vec}; + +use alloy_primitives::{Address, FixedBytes}; +use alloy_sol_types::SolValue; + +sol! { + struct InboundMessageWrapper { + // origin + bytes32 origin; + // Message nonce + uint64 nonce; + // Commands + CommandWrapper[] commands; + } + + #[derive(Encode, Decode, RuntimeDebug, PartialEq,TypeInfo)] + struct CommandWrapper { + uint8 kind; + uint64 gas; + bytes payload; + } + + // Payload for Upgrade + struct UpgradeParams { + // The address of the implementation contract + address implAddress; + // Codehash of the new implementation contract. + bytes32 implCodeHash; + // Parameters used to upgrade storage of the gateway + bytes initParams; + } + + // Payload for CreateAgent + struct CreateAgentParams { + /// @dev The agent ID of the consensus system + bytes32 agentID; + } + + // Payload for SetOperatingMode instruction + struct SetOperatingModeParams { + /// The new operating mode + uint8 mode; + } + + // Payload for NativeTokenUnlock instruction + struct UnlockNativeTokenParams { + // Token address + address token; + // Recipient address + address recipient; + // Amount to unlock + uint128 amount; + } + + // Payload for RegisterForeignToken + struct RegisterForeignTokenParams { + /// @dev The token ID (hash of stable location id of token) + bytes32 foreignTokenID; + /// @dev The name of the token + bytes name; + /// @dev The symbol of the token + bytes symbol; + /// @dev The decimal of the token + uint8 decimals; + } + + // Payload for MintForeignTokenParams instruction + struct MintForeignTokenParams { + // Foreign token ID + bytes32 foreignTokenID; + // Recipient address + address recipient; + // Amount to mint + uint128 amount; + } +} + +#[derive(Encode, Decode, TypeInfo, PartialEq, Clone, RuntimeDebug)] +pub struct InboundMessage { + /// Origin + pub origin: H256, + /// Nonce + pub nonce: u64, + /// Commands + pub commands: BoundedVec>, +} + +pub const MAX_COMMANDS: u32 = 8; + +/// A message which can be accepted by implementations of `/[`SendMessage`\]` +#[derive(Encode, Decode, TypeInfo, PartialEq, Clone, RuntimeDebug)] +pub struct Message { + /// Origin + pub origin: H256, + /// ID + pub id: H256, + /// Fee + pub fee: u128, + /// Commands + pub commands: BoundedVec>, +} + +/// A command which is executable by the Gateway contract on Ethereum +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub enum Command { + /// Upgrade the Gateway contract + Upgrade { + /// Address of the new implementation contract + impl_address: H160, + /// Codehash of the implementation contract + impl_code_hash: H256, + /// Optionally invoke an initializer in the implementation contract + initializer: Option, + }, + /// Create an agent representing a consensus system on Polkadot + CreateAgent { + /// The ID of the agent, derived from the `MultiLocation` of the consensus system on + /// Polkadot + agent_id: H256, + }, + /// Set the global operating mode of the Gateway contract + SetOperatingMode { + /// The new operating mode + mode: OperatingMode, + }, + /// Unlock ERC20 tokens + UnlockNativeToken { + /// ID of the agent + agent_id: H256, + /// Address of the ERC20 token + token: H160, + /// The recipient of the tokens + recipient: H160, + /// The amount of tokens to transfer + amount: u128, + }, + /// Register foreign token from Polkadot + RegisterForeignToken { + /// ID for the token + token_id: H256, + /// Name of the token + name: Vec, + /// Short symbol for the token + symbol: Vec, + /// Number of decimal places + decimals: u8, + }, + /// Mint foreign token from Polkadot + MintForeignToken { + /// ID for the token + token_id: H256, + /// The recipient of the newly minted tokens + recipient: H160, + /// The amount of tokens to mint + amount: u128, + }, +} + +impl Command { + /// Compute the enum variant index + pub fn index(&self) -> u8 { + match self { + Command::Upgrade { .. } => 0, + Command::SetOperatingMode { .. } => 1, + Command::UnlockNativeToken { .. } => 2, + Command::RegisterForeignToken { .. } => 3, + Command::MintForeignToken { .. } => 4, + Command::CreateAgent { .. } => 5, + } + } + + /// ABI-encode the Command. + pub fn abi_encode(&self) -> Vec { + match self { + Command::Upgrade { impl_address, impl_code_hash, initializer, .. } => UpgradeParams { + implAddress: Address::from(impl_address.as_fixed_bytes()), + implCodeHash: FixedBytes::from(impl_code_hash.as_fixed_bytes()), + initParams: initializer.clone().map_or(vec![], |i| i.params), + } + .abi_encode(), + Command::CreateAgent { agent_id } => + CreateAgentParams { agentID: FixedBytes::from(agent_id.as_fixed_bytes()) } + .abi_encode(), + Command::SetOperatingMode { mode } => + SetOperatingModeParams { mode: (*mode) as u8 }.abi_encode(), + Command::UnlockNativeToken { token, recipient, amount, .. } => + UnlockNativeTokenParams { + token: Address::from(token.as_fixed_bytes()), + recipient: Address::from(recipient.as_fixed_bytes()), + amount: *amount, + } + .abi_encode(), + Command::RegisterForeignToken { token_id, name, symbol, decimals } => + RegisterForeignTokenParams { + foreignTokenID: FixedBytes::from(token_id.as_fixed_bytes()), + name: name.to_vec(), + symbol: symbol.to_vec(), + decimals: *decimals, + } + .abi_encode(), + Command::MintForeignToken { token_id, recipient, amount } => MintForeignTokenParams { + foreignTokenID: FixedBytes::from(token_id.as_fixed_bytes()), + recipient: Address::from(recipient.as_fixed_bytes()), + amount: *amount, + } + .abi_encode(), + } + } +} + +/// Representation of a call to the initializer of an implementation contract. +/// The initializer has the following ABI signature: `initialize(bytes)`. +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct Initializer { + /// ABI-encoded params of type `bytes` to pass to the initializer + pub params: Vec, + /// The initializer is allowed to consume this much gas at most. + pub maximum_required_gas: u64, +} + +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] +/// Fee for delivering message +pub struct Fee +where + Balance: BaseArithmetic + Unsigned + Copy, +{ + /// Fee to cover cost of processing the message locally + pub local: Balance, +} + +impl Fee +where + Balance: BaseArithmetic + Unsigned + Copy, +{ + pub fn total(&self) -> Balance { + self.local + } +} + +impl From for Fee +where + Balance: BaseArithmetic + Unsigned + Copy, +{ + fn from(local: Balance) -> Self { + Self { local } + } +} + +pub trait SendMessage: SendMessageFeeProvider { + type Ticket: Clone + Encode + Decode; + + /// Validate an outbound message and return a tuple: + /// 1. Ticket for submitting the message + /// 2. Delivery fee + fn validate( + message: &Message, + ) -> Result<(Self::Ticket, Fee<::Balance>), SendError>; + + /// Submit the message ticket for eventual delivery to Ethereum + fn deliver(ticket: Self::Ticket) -> Result; +} + +pub struct DefaultOutboundQueue; +impl SendMessage for DefaultOutboundQueue { + type Ticket = (); + + fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { + Ok(((), Fee { local: Default::default() })) + } + + fn deliver(_: Self::Ticket) -> Result { + Ok(H256::zero()) + } +} + +impl SendMessageFeeProvider for DefaultOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + Default::default() + } +} + +pub trait GasMeter { + /// Measures the maximum amount of gas a command payload will require to *dispatch*, NOT + /// including validation & verification. + fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64; +} + +/// A meter that assigns a constant amount of gas for the execution of a command +/// +/// The gas figures are extracted from this report: +/// > forge test --match-path test/Gateway.t.sol --gas-report +/// +/// A healthy buffer is added on top of these figures to account for: +/// * The EIP-150 63/64 rule +/// * Future EVM upgrades that may increase gas cost +pub struct ConstantGasMeter; + +impl GasMeter for ConstantGasMeter { + fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64 { + match command { + Command::CreateAgent { .. } => 275_000, + Command::SetOperatingMode { .. } => 40_000, + Command::Upgrade { initializer, .. } => { + let initializer_max_gas = match *initializer { + Some(Initializer { maximum_required_gas, .. }) => maximum_required_gas, + None => 0, + }; + // total maximum gas must also include the gas used for updating the proxy before + // the the initializer is called. + 50_000 + initializer_max_gas + }, + Command::UnlockNativeToken { .. } => 100_000, + Command::RegisterForeignToken { .. } => 1_200_000, + Command::MintForeignToken { .. } => 100_000, + } + } +} + +impl GasMeter for () { + fn maximum_dispatch_gas_used_at_most(_: &Command) -> u64 { + 1 + } +} + +// Origin for high-priority governance commands +pub fn primary_governance_origin() -> H256 { + hex!("0000000000000000000000000000000000000000000000000000000000000001").into() +} + +// Origin for lower-priority governance commands +pub fn second_governance_origin() -> H256 { + hex!("0000000000000000000000000000000000000000000000000000000000000002").into() +} diff --git a/bridges/snowbridge/primitives/core/src/reward.rs b/bridges/snowbridge/primitives/core/src/reward.rs new file mode 100644 index 000000000000..80e0d9b492d8 --- /dev/null +++ b/bridges/snowbridge/primitives/core/src/reward.rs @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork + +use frame_support::pallet_prelude::DispatchResult; + +pub trait RewardLedger { + // Deposit reward which can later be claimed by `account` + fn deposit(account: AccountId, value: Balance) -> DispatchResult; +} + +impl RewardLedger for () { + fn deposit(_: AccountId, _: Balance) -> DispatchResult { + Ok(()) + } +} diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/primitives/merkle-tree/Cargo.toml similarity index 76% rename from bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml rename to bridges/snowbridge/primitives/merkle-tree/Cargo.toml index 16241428df80..f1e3efd80a3d 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/primitives/merkle-tree/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "snowbridge-outbound-queue-merkle-tree" -description = "Snowbridge Outbound Queue Merkle Tree" -version = "0.3.0" +name = "snowbridge-merkle-tree" +description = "Snowbridge Merkle Tree" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true @@ -11,15 +11,11 @@ categories = ["cryptography::cryptocurrencies"] [lints] workspace = true -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - [dependencies] -codec = { features = ["derive"], workspace = true } +codec = { workspace = true } scale-info = { features = ["derive"], workspace = true } - -sp-core = { workspace = true } sp-runtime = { workspace = true } +sp-core = { workspace = true } [dev-dependencies] hex-literal = { workspace = true, default-features = true } diff --git a/bridges/snowbridge/primitives/merkle-tree/README.md b/bridges/snowbridge/primitives/merkle-tree/README.md new file mode 100644 index 000000000000..a9c17ad4f2d1 --- /dev/null +++ b/bridges/snowbridge/primitives/merkle-tree/README.md @@ -0,0 +1,3 @@ +# Merkle-Tree Primitives + +Contains the custom merkle tree implementation optimized for Ethereum. diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/src/lib.rs b/bridges/snowbridge/primitives/merkle-tree/src/lib.rs similarity index 100% rename from bridges/snowbridge/pallets/outbound-queue/merkle-tree/src/lib.rs rename to bridges/snowbridge/primitives/merkle-tree/src/lib.rs diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index ee8d481cec12..664f2dbf7930 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -24,6 +24,7 @@ sp-std = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } +xcm-builder = { workspace = true } snowbridge-core = { workspace = true } @@ -43,6 +44,7 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "xcm-builder/std", "xcm-executor/std", "xcm/std", ] @@ -50,5 +52,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", ] diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index e03560f66e24..abd32aa3897f 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -1,458 +1,16 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork -//! Converts messages from Ethereum to XCM messages +// SPDX-FileCopyrightText: 2021-2022 Parity Technologies (UK) Ltd. -#[cfg(test)] -mod tests; +pub mod v1; +pub mod v2; -use codec::{Decode, Encode}; -use core::marker::PhantomData; -use frame_support::{traits::tokens::Balance as BalanceT, PalletError}; -use scale_info::TypeInfo; -use snowbridge_core::TokenId; -use sp_core::{Get, RuntimeDebug, H160, H256}; -use sp_io::hashing::blake2_256; -use sp_runtime::{traits::MaybeEquivalence, MultiAddress}; -use sp_std::prelude::*; -use xcm::prelude::{Junction::AccountKey20, *}; +use codec::Encode; +use sp_core::blake2_256; +use sp_std::marker::PhantomData; +use xcm::prelude::{AccountKey20, Ethereum, GlobalConsensus, Location}; use xcm_executor::traits::ConvertLocation; -const MINIMUM_DEPOSIT: u128 = 1; - -/// Messages from Ethereum are versioned. This is because in future, -/// we may want to evolve the protocol so that the ethereum side sends XCM messages directly. -/// Instead having BridgeHub transcode the messages into XCM. -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub enum VersionedMessage { - V1(MessageV1), -} - -/// For V1, the ethereum side sends messages which are transcoded into XCM. These messages are -/// self-contained, in that they can be transcoded using only information in the message. -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub struct MessageV1 { - /// EIP-155 chain id of the origin Ethereum network - pub chain_id: u64, - /// The command originating from the Gateway contract - pub command: Command, -} - -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub enum Command { - /// Register a wrapped token on the AssetHub `ForeignAssets` pallet - RegisterToken { - /// The address of the ERC20 token to be bridged over to AssetHub - token: H160, - /// XCM execution fee on AssetHub - fee: u128, - }, - /// Send Ethereum token to AssetHub or another parachain - SendToken { - /// The address of the ERC20 token to be bridged over to AssetHub - token: H160, - /// The destination for the transfer - destination: Destination, - /// Amount to transfer - amount: u128, - /// XCM execution fee on AssetHub - fee: u128, - }, - /// Send Polkadot token back to the original parachain - SendNativeToken { - /// The Id of the token - token_id: TokenId, - /// The destination for the transfer - destination: Destination, - /// Amount to transfer - amount: u128, - /// XCM execution fee on AssetHub - fee: u128, - }, -} - -/// Destination for bridged tokens -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub enum Destination { - /// The funds will be deposited into account `id` on AssetHub - AccountId32 { id: [u8; 32] }, - /// The funds will deposited into the sovereign account of destination parachain `para_id` on - /// AssetHub, Account `id` on the destination parachain will receive the funds via a - /// reserve-backed transfer. See - ForeignAccountId32 { - para_id: u32, - id: [u8; 32], - /// XCM execution fee on final destination - fee: u128, - }, - /// The funds will deposited into the sovereign account of destination parachain `para_id` on - /// AssetHub, Account `id` on the destination parachain will receive the funds via a - /// reserve-backed transfer. See - ForeignAccountId20 { - para_id: u32, - id: [u8; 20], - /// XCM execution fee on final destination - fee: u128, - }, -} - -pub struct MessageToXcm< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, -> where - CreateAssetCall: Get, - CreateAssetDeposit: Get, - Balance: BalanceT, - ConvertAssetId: MaybeEquivalence, - EthereumUniversalLocation: Get, - GlobalAssetHubLocation: Get, -{ - _phantom: PhantomData<( - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - )>, -} - -/// Reason why a message conversion failed. -#[derive(Copy, Clone, TypeInfo, PalletError, Encode, Decode, RuntimeDebug)] -pub enum ConvertMessageError { - /// The message version is not supported for conversion. - UnsupportedVersion, - InvalidDestination, - InvalidToken, - /// The fee asset is not supported for conversion. - UnsupportedFeeAsset, - CannotReanchor, -} - -/// convert the inbound message to xcm which will be forwarded to the destination chain -pub trait ConvertMessage { - type Balance: BalanceT + From; - type AccountId; - /// Converts a versioned message into an XCM message and an optional topicID - fn convert( - message_id: H256, - message: VersionedMessage, - ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError>; -} - -pub type CallIndex = [u8; 2]; - -impl< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > ConvertMessage - for MessageToXcm< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > -where - CreateAssetCall: Get, - CreateAssetDeposit: Get, - InboundQueuePalletInstance: Get, - Balance: BalanceT + From, - AccountId: Into<[u8; 32]>, - ConvertAssetId: MaybeEquivalence, - EthereumUniversalLocation: Get, - GlobalAssetHubLocation: Get, -{ - type Balance = Balance; - type AccountId = AccountId; - - fn convert( - message_id: H256, - message: VersionedMessage, - ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError> { - use Command::*; - use VersionedMessage::*; - match message { - V1(MessageV1 { chain_id, command: RegisterToken { token, fee } }) => - Ok(Self::convert_register_token(message_id, chain_id, token, fee)), - V1(MessageV1 { chain_id, command: SendToken { token, destination, amount, fee } }) => - Ok(Self::convert_send_token(message_id, chain_id, token, destination, amount, fee)), - V1(MessageV1 { - chain_id, - command: SendNativeToken { token_id, destination, amount, fee }, - }) => Self::convert_send_native_token( - message_id, - chain_id, - token_id, - destination, - amount, - fee, - ), - } - } -} - -impl< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > - MessageToXcm< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > -where - CreateAssetCall: Get, - CreateAssetDeposit: Get, - InboundQueuePalletInstance: Get, - Balance: BalanceT + From, - AccountId: Into<[u8; 32]>, - ConvertAssetId: MaybeEquivalence, - EthereumUniversalLocation: Get, - GlobalAssetHubLocation: Get, -{ - fn convert_register_token( - message_id: H256, - chain_id: u64, - token: H160, - fee: u128, - ) -> (Xcm<()>, Balance) { - let network = Ethereum { chain_id }; - let xcm_fee: Asset = (Location::parent(), fee).into(); - let deposit: Asset = (Location::parent(), CreateAssetDeposit::get()).into(); - - let total_amount = fee + CreateAssetDeposit::get(); - let total: Asset = (Location::parent(), total_amount).into(); - - let bridge_location = Location::new(2, GlobalConsensus(network)); - - let owner = EthereumLocationsConverterFor::<[u8; 32]>::from_chain_id(&chain_id); - let asset_id = Self::convert_token_address(network, token); - let create_call_index: [u8; 2] = CreateAssetCall::get(); - let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); - - let xcm: Xcm<()> = vec![ - // Teleport required fees. - ReceiveTeleportedAsset(total.into()), - // Pay for execution. - BuyExecution { fees: xcm_fee, weight_limit: Unlimited }, - // Fund the snowbridge sovereign with the required deposit for creation. - DepositAsset { assets: Definite(deposit.into()), beneficiary: bridge_location.clone() }, - // This `SetAppendix` ensures that `xcm_fee` not spent by `Transact` will be - // deposited to snowbridge sovereign, instead of being trapped, regardless of - // `Transact` success or not. - SetAppendix(Xcm(vec![ - RefundSurplus, - DepositAsset { assets: AllCounted(1).into(), beneficiary: bridge_location }, - ])), - // Only our inbound-queue pallet is allowed to invoke `UniversalOrigin`. - DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), - // Change origin to the bridge. - UniversalOrigin(GlobalConsensus(network)), - // Call create_asset on foreign assets pallet. - Transact { - origin_kind: OriginKind::Xcm, - call: ( - create_call_index, - asset_id, - MultiAddress::<[u8; 32], ()>::Id(owner), - MINIMUM_DEPOSIT, - ) - .encode() - .into(), - }, - // Forward message id to Asset Hub - SetTopic(message_id.into()), - // Once the program ends here, appendix program will run, which will deposit any - // leftover fee to snowbridge sovereign. - ] - .into(); - - (xcm, total_amount.into()) - } - - fn convert_send_token( - message_id: H256, - chain_id: u64, - token: H160, - destination: Destination, - amount: u128, - asset_hub_fee: u128, - ) -> (Xcm<()>, Balance) { - let network = Ethereum { chain_id }; - let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); - let asset: Asset = (Self::convert_token_address(network, token), amount).into(); - - let (dest_para_id, beneficiary, dest_para_fee) = match destination { - // Final destination is a 32-byte account on AssetHub - Destination::AccountId32 { id } => - (None, Location::new(0, [AccountId32 { network: None, id }]), 0), - // Final destination is a 32-byte account on a sibling of AssetHub - Destination::ForeignAccountId32 { para_id, id, fee } => ( - Some(para_id), - Location::new(0, [AccountId32 { network: None, id }]), - // Total fee needs to cover execution on AssetHub and Sibling - fee, - ), - // Final destination is a 20-byte account on a sibling of AssetHub - Destination::ForeignAccountId20 { para_id, id, fee } => ( - Some(para_id), - Location::new(0, [AccountKey20 { network: None, key: id }]), - // Total fee needs to cover execution on AssetHub and Sibling - fee, - ), - }; - - let total_fees = asset_hub_fee.saturating_add(dest_para_fee); - let total_fee_asset: Asset = (Location::parent(), total_fees).into(); - let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); - - let mut instructions = vec![ - ReceiveTeleportedAsset(total_fee_asset.into()), - BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, - DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), - UniversalOrigin(GlobalConsensus(network)), - ReserveAssetDeposited(asset.clone().into()), - ClearOrigin, - ]; - - match dest_para_id { - Some(dest_para_id) => { - let dest_para_fee_asset: Asset = (Location::parent(), dest_para_fee).into(); - let bridge_location = Location::new(2, GlobalConsensus(network)); - - instructions.extend(vec![ - // After program finishes deposit any leftover assets to the snowbridge - // sovereign. - SetAppendix(Xcm(vec![DepositAsset { - assets: Wild(AllCounted(2)), - beneficiary: bridge_location, - }])), - // Perform a deposit reserve to send to destination chain. - DepositReserveAsset { - assets: Definite(vec![dest_para_fee_asset.clone(), asset].into()), - dest: Location::new(1, [Parachain(dest_para_id)]), - xcm: vec![ - // Buy execution on target. - BuyExecution { fees: dest_para_fee_asset, weight_limit: Unlimited }, - // Deposit assets to beneficiary. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - // Forward message id to destination parachain. - SetTopic(message_id.into()), - ] - .into(), - }, - ]); - }, - None => { - instructions.extend(vec![ - // Deposit both asset and fees to beneficiary so the fees will not get - // trapped. Another benefit is when fees left more than ED on AssetHub could be - // used to create the beneficiary account in case it does not exist. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - ]); - }, - } - - // Forward message id to Asset Hub. - instructions.push(SetTopic(message_id.into())); - - // The `instructions` to forward to AssetHub, and the `total_fees` to locally burn (since - // they are teleported within `instructions`). - (instructions.into(), total_fees.into()) - } - - // Convert ERC20 token address to a location that can be understood by Assets Hub. - fn convert_token_address(network: NetworkId, token: H160) -> Location { - Location::new( - 2, - [GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }], - ) - } - - /// Constructs an XCM message destined for AssetHub that withdraws assets from the sovereign - /// account of the Gateway contract and either deposits those assets into a recipient account or - /// forwards the assets to another parachain. - fn convert_send_native_token( - message_id: H256, - chain_id: u64, - token_id: TokenId, - destination: Destination, - amount: u128, - asset_hub_fee: u128, - ) -> Result<(Xcm<()>, Balance), ConvertMessageError> { - let network = Ethereum { chain_id }; - let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); - - let beneficiary = match destination { - // Final destination is a 32-byte account on AssetHub - Destination::AccountId32 { id } => - Ok(Location::new(0, [AccountId32 { network: None, id }])), - // Forwarding to a destination parachain is not allowed for PNA and is validated on the - // Ethereum side. https://github.com/Snowfork/snowbridge/blob/e87ddb2215b513455c844463a25323bb9c01ff36/contracts/src/Assets.sol#L216-L224 - _ => Err(ConvertMessageError::InvalidDestination), - }?; - - let total_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); - - let asset_loc = - ConvertAssetId::convert(&token_id).ok_or(ConvertMessageError::InvalidToken)?; - - let mut reanchored_asset_loc = asset_loc.clone(); - reanchored_asset_loc - .reanchor(&GlobalAssetHubLocation::get(), &EthereumUniversalLocation::get()) - .map_err(|_| ConvertMessageError::CannotReanchor)?; - - let asset: Asset = (reanchored_asset_loc, amount).into(); - - let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); - - let instructions = vec![ - ReceiveTeleportedAsset(total_fee_asset.clone().into()), - BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, - DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), - UniversalOrigin(GlobalConsensus(network)), - WithdrawAsset(asset.clone().into()), - // Deposit both asset and fees to beneficiary so the fees will not get - // trapped. Another benefit is when fees left more than ED on AssetHub could be - // used to create the beneficiary account in case it does not exist. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - SetTopic(message_id.into()), - ]; - - // `total_fees` to burn on this chain when sending `instructions` to run on AH (which also - // teleport fees) - Ok((instructions.into(), asset_hub_fee.into())) - } -} - pub struct EthereumLocationsConverterFor(PhantomData); impl ConvertLocation for EthereumLocationsConverterFor where @@ -477,3 +35,5 @@ impl EthereumLocationsConverterFor { (b"ethereum-chain", chain_id, key).using_encoded(blake2_256) } } + +pub type CallIndex = [u8; 2]; diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs deleted file mode 100644 index 786aa594f653..000000000000 --- a/bridges/snowbridge/primitives/router/src/inbound/tests.rs +++ /dev/null @@ -1,83 +0,0 @@ -use super::EthereumLocationsConverterFor; -use crate::inbound::CallIndex; -use frame_support::{assert_ok, parameter_types}; -use hex_literal::hex; -use xcm::prelude::*; -use xcm_executor::traits::ConvertLocation; - -const NETWORK: NetworkId = Ethereum { chain_id: 11155111 }; - -parameter_types! { - pub EthereumNetwork: NetworkId = NETWORK; - - pub const CreateAssetCall: CallIndex = [1, 1]; - pub const CreateAssetExecutionFee: u128 = 123; - pub const CreateAssetDeposit: u128 = 891; - pub const SendTokenExecutionFee: u128 = 592; -} - -#[test] -fn test_ethereum_network_converts_successfully() { - let expected_account: [u8; 32] = - hex!("ce796ae65569a670d0c1cc1ac12515a3ce21b5fbf729d63d7b289baad070139d"); - let contract_location = Location::new(2, [GlobalConsensus(NETWORK)]); - - let account = - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location).unwrap(); - - assert_eq!(account, expected_account); -} - -#[test] -fn test_contract_location_with_network_converts_successfully() { - let expected_account: [u8; 32] = - hex!("9038d35aba0e78e072d29b2d65be9df5bb4d7d94b4609c9cf98ea8e66e544052"); - let contract_location = Location::new( - 2, - [GlobalConsensus(NETWORK), AccountKey20 { network: None, key: [123u8; 20] }], - ); - - let account = - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location).unwrap(); - - assert_eq!(account, expected_account); -} - -#[test] -fn test_contract_location_with_incorrect_location_fails_convert() { - let contract_location = Location::new(2, [GlobalConsensus(Polkadot), Parachain(1000)]); - - assert_eq!( - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location), - None, - ); -} - -#[test] -fn test_reanchor_all_assets() { - let ethereum_context: InteriorLocation = [GlobalConsensus(Ethereum { chain_id: 1 })].into(); - let ethereum = Location::new(2, ethereum_context.clone()); - let ah_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); - let global_ah = Location::new(1, ah_context.clone()); - let assets = vec![ - // DOT - Location::new(1, []), - // GLMR (Some Polkadot parachain currency) - Location::new(1, [Parachain(2004)]), - // AH asset - Location::new(0, [PalletInstance(50), GeneralIndex(42)]), - // KSM - Location::new(2, [GlobalConsensus(Kusama)]), - // KAR (Some Kusama parachain currency) - Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), - ]; - for asset in assets.iter() { - // reanchor logic in pallet_xcm on AH - let mut reanchored_asset = asset.clone(); - assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); - // reanchor back to original location in context of Ethereum - let mut reanchored_asset_with_ethereum_context = reanchored_asset.clone(); - assert_ok!(reanchored_asset_with_ethereum_context.reanchor(&global_ah, ðereum_context)); - assert_eq!(reanchored_asset_with_ethereum_context, asset.clone()); - } -} diff --git a/bridges/snowbridge/primitives/router/src/inbound/v1.rs b/bridges/snowbridge/primitives/router/src/inbound/v1.rs new file mode 100644 index 000000000000..73e5f5ada939 --- /dev/null +++ b/bridges/snowbridge/primitives/router/src/inbound/v1.rs @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Converts messages from Ethereum to XCM messages + +use crate::inbound::{CallIndex, EthereumLocationsConverterFor}; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use frame_support::{traits::tokens::Balance as BalanceT, PalletError}; +use scale_info::TypeInfo; +use snowbridge_core::TokenId; +use sp_core::{Get, RuntimeDebug, H160, H256}; +use sp_runtime::{traits::MaybeEquivalence, MultiAddress}; +use sp_std::prelude::*; +use xcm::prelude::{Junction::AccountKey20, *}; + +const MINIMUM_DEPOSIT: u128 = 1; + +/// Messages from Ethereum are versioned. This is because in future, +/// we may want to evolve the protocol so that the ethereum side sends XCM messages directly. +/// Instead having BridgeHub transcode the messages into XCM. +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum VersionedMessage { + V1(MessageV1), +} + +/// For V1, the ethereum side sends messages which are transcoded into XCM. These messages are +/// self-contained, in that they can be transcoded using only information in the message. +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub struct MessageV1 { + /// EIP-155 chain id of the origin Ethereum network + pub chain_id: u64, + /// The command originating from the Gateway contract + pub command: Command, +} + +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum Command { + /// Register a wrapped token on the AssetHub `ForeignAssets` pallet + RegisterToken { + /// The address of the ERC20 token to be bridged over to AssetHub + token: H160, + /// XCM execution fee on AssetHub + fee: u128, + }, + /// Send Ethereum token to AssetHub or another parachain + SendToken { + /// The address of the ERC20 token to be bridged over to AssetHub + token: H160, + /// The destination for the transfer + destination: Destination, + /// Amount to transfer + amount: u128, + /// XCM execution fee on AssetHub + fee: u128, + }, + /// Send Polkadot token back to the original parachain + SendNativeToken { + /// The Id of the token + token_id: TokenId, + /// The destination for the transfer + destination: Destination, + /// Amount to transfer + amount: u128, + /// XCM execution fee on AssetHub + fee: u128, + }, +} + +/// Destination for bridged tokens +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum Destination { + /// The funds will be deposited into account `id` on AssetHub + AccountId32 { id: [u8; 32] }, + /// The funds will deposited into the sovereign account of destination parachain `para_id` on + /// AssetHub, Account `id` on the destination parachain will receive the funds via a + /// reserve-backed transfer. See + ForeignAccountId32 { + para_id: u32, + id: [u8; 32], + /// XCM execution fee on final destination + fee: u128, + }, + /// The funds will deposited into the sovereign account of destination parachain `para_id` on + /// AssetHub, Account `id` on the destination parachain will receive the funds via a + /// reserve-backed transfer. See + ForeignAccountId20 { + para_id: u32, + id: [u8; 20], + /// XCM execution fee on final destination + fee: u128, + }, +} + +pub struct MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, +> where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + Balance: BalanceT, + ConvertAssetId: MaybeEquivalence, + EthereumUniversalLocation: Get, + GlobalAssetHubLocation: Get, +{ + _phantom: PhantomData<( + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + )>, +} + +/// Reason why a message conversion failed. +#[derive(Copy, Clone, TypeInfo, PalletError, Encode, Decode, RuntimeDebug)] +pub enum ConvertMessageError { + /// The message version is not supported for conversion. + UnsupportedVersion, + InvalidDestination, + InvalidToken, + /// The fee asset is not supported for conversion. + UnsupportedFeeAsset, + CannotReanchor, +} + +/// convert the inbound message to xcm which will be forwarded to the destination chain +pub trait ConvertMessage { + type Balance: BalanceT + From; + type AccountId; + /// Converts a versioned message into an XCM message and an optional topicID + fn convert( + message_id: H256, + message: VersionedMessage, + ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError>; +} + +impl< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > ConvertMessage + for MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > +where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + InboundQueuePalletInstance: Get, + Balance: BalanceT + From, + AccountId: Into<[u8; 32]>, + ConvertAssetId: MaybeEquivalence, + EthereumUniversalLocation: Get, + GlobalAssetHubLocation: Get, +{ + type Balance = Balance; + type AccountId = AccountId; + + fn convert( + message_id: H256, + message: VersionedMessage, + ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError> { + use Command::*; + use VersionedMessage::*; + match message { + V1(MessageV1 { chain_id, command: RegisterToken { token, fee } }) => + Ok(Self::convert_register_token(message_id, chain_id, token, fee)), + V1(MessageV1 { chain_id, command: SendToken { token, destination, amount, fee } }) => + Ok(Self::convert_send_token(message_id, chain_id, token, destination, amount, fee)), + V1(MessageV1 { + chain_id, + command: SendNativeToken { token_id, destination, amount, fee }, + }) => Self::convert_send_native_token( + message_id, + chain_id, + token_id, + destination, + amount, + fee, + ), + } + } +} + +impl< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > + MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > +where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + InboundQueuePalletInstance: Get, + Balance: BalanceT + From, + AccountId: Into<[u8; 32]>, + ConvertAssetId: MaybeEquivalence, + EthereumUniversalLocation: Get, + GlobalAssetHubLocation: Get, +{ + fn convert_register_token( + message_id: H256, + chain_id: u64, + token: H160, + fee: u128, + ) -> (Xcm<()>, Balance) { + let network = Ethereum { chain_id }; + let xcm_fee: Asset = (Location::parent(), fee).into(); + let deposit: Asset = (Location::parent(), CreateAssetDeposit::get()).into(); + + let total_amount = fee + CreateAssetDeposit::get(); + let total: Asset = (Location::parent(), total_amount).into(); + + let bridge_location = Location::new(2, GlobalConsensus(network)); + + let owner = EthereumLocationsConverterFor::<[u8; 32]>::from_chain_id(&chain_id); + let asset_id = Self::convert_token_address(network, token); + let create_call_index: [u8; 2] = CreateAssetCall::get(); + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let xcm: Xcm<()> = vec![ + // Teleport required fees. + ReceiveTeleportedAsset(total.into()), + // Pay for execution. + BuyExecution { fees: xcm_fee, weight_limit: Unlimited }, + // Fund the snowbridge sovereign with the required deposit for creation. + DepositAsset { assets: Definite(deposit.into()), beneficiary: bridge_location.clone() }, + // This `SetAppendix` ensures that `xcm_fee` not spent by `Transact` will be + // deposited to snowbridge sovereign, instead of being trapped, regardless of + // `Transact` success or not. + SetAppendix(Xcm(vec![ + RefundSurplus, + DepositAsset { assets: AllCounted(1).into(), beneficiary: bridge_location }, + ])), + // Only our inbound-queue pallet is allowed to invoke `UniversalOrigin`. + DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), + // Change origin to the bridge. + UniversalOrigin(GlobalConsensus(network)), + // Call create_asset on foreign assets pallet. + Transact { + origin_kind: OriginKind::Xcm, + call: ( + create_call_index, + asset_id, + MultiAddress::<[u8; 32], ()>::Id(owner), + MINIMUM_DEPOSIT, + ) + .encode() + .into(), + }, + // Forward message id to Asset Hub + SetTopic(message_id.into()), + // Once the program ends here, appendix program will run, which will deposit any + // leftover fee to snowbridge sovereign. + ] + .into(); + + (xcm, total_amount.into()) + } + + fn convert_send_token( + message_id: H256, + chain_id: u64, + token: H160, + destination: Destination, + amount: u128, + asset_hub_fee: u128, + ) -> (Xcm<()>, Balance) { + let network = Ethereum { chain_id }; + let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); + let asset: Asset = (Self::convert_token_address(network, token), amount).into(); + + let (dest_para_id, beneficiary, dest_para_fee) = match destination { + // Final destination is a 32-byte account on AssetHub + Destination::AccountId32 { id } => + (None, Location::new(0, [AccountId32 { network: None, id }]), 0), + // Final destination is a 32-byte account on a sibling of AssetHub + Destination::ForeignAccountId32 { para_id, id, fee } => ( + Some(para_id), + Location::new(0, [AccountId32 { network: None, id }]), + // Total fee needs to cover execution on AssetHub and Sibling + fee, + ), + // Final destination is a 20-byte account on a sibling of AssetHub + Destination::ForeignAccountId20 { para_id, id, fee } => ( + Some(para_id), + Location::new(0, [AccountKey20 { network: None, key: id }]), + // Total fee needs to cover execution on AssetHub and Sibling + fee, + ), + }; + + let total_fees = asset_hub_fee.saturating_add(dest_para_fee); + let total_fee_asset: Asset = (Location::parent(), total_fees).into(); + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let mut instructions = vec![ + ReceiveTeleportedAsset(total_fee_asset.into()), + BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, + DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), + UniversalOrigin(GlobalConsensus(network)), + ReserveAssetDeposited(asset.clone().into()), + ClearOrigin, + ]; + + match dest_para_id { + Some(dest_para_id) => { + let dest_para_fee_asset: Asset = (Location::parent(), dest_para_fee).into(); + let bridge_location = Location::new(2, GlobalConsensus(network)); + + instructions.extend(vec![ + // After program finishes deposit any leftover assets to the snowbridge + // sovereign. + SetAppendix(Xcm(vec![DepositAsset { + assets: Wild(AllCounted(2)), + beneficiary: bridge_location, + }])), + // Perform a deposit reserve to send to destination chain. + DepositReserveAsset { + assets: Definite(vec![dest_para_fee_asset.clone(), asset].into()), + dest: Location::new(1, [Parachain(dest_para_id)]), + xcm: vec![ + // Buy execution on target. + BuyExecution { fees: dest_para_fee_asset, weight_limit: Unlimited }, + // Deposit assets to beneficiary. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + // Forward message id to destination parachain. + SetTopic(message_id.into()), + ] + .into(), + }, + ]); + }, + None => { + instructions.extend(vec![ + // Deposit both asset and fees to beneficiary so the fees will not get + // trapped. Another benefit is when fees left more than ED on AssetHub could be + // used to create the beneficiary account in case it does not exist. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + ]); + }, + } + + // Forward message id to Asset Hub. + instructions.push(SetTopic(message_id.into())); + + // The `instructions` to forward to AssetHub, and the `total_fees` to locally burn (since + // they are teleported within `instructions`). + (instructions.into(), total_fees.into()) + } + + // Convert ERC20 token address to a location that can be understood by Assets Hub. + fn convert_token_address(network: NetworkId, token: H160) -> Location { + Location::new( + 2, + [GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }], + ) + } + + /// Constructs an XCM message destined for AssetHub that withdraws assets from the sovereign + /// account of the Gateway contract and either deposits those assets into a recipient account or + /// forwards the assets to another parachain. + fn convert_send_native_token( + message_id: H256, + chain_id: u64, + token_id: TokenId, + destination: Destination, + amount: u128, + asset_hub_fee: u128, + ) -> Result<(Xcm<()>, Balance), ConvertMessageError> { + let network = Ethereum { chain_id }; + let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); + + let beneficiary = match destination { + // Final destination is a 32-byte account on AssetHub + Destination::AccountId32 { id } => + Ok(Location::new(0, [AccountId32 { network: None, id }])), + _ => Err(ConvertMessageError::InvalidDestination), + }?; + + let total_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); + + let asset_loc = + ConvertAssetId::convert(&token_id).ok_or(ConvertMessageError::InvalidToken)?; + + let mut reanchored_asset_loc = asset_loc.clone(); + reanchored_asset_loc + .reanchor(&GlobalAssetHubLocation::get(), &EthereumUniversalLocation::get()) + .map_err(|_| ConvertMessageError::CannotReanchor)?; + + let asset: Asset = (reanchored_asset_loc, amount).into(); + + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let instructions = vec![ + ReceiveTeleportedAsset(total_fee_asset.clone().into()), + BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, + DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), + UniversalOrigin(GlobalConsensus(network)), + WithdrawAsset(asset.clone().into()), + // Deposit both asset and fees to beneficiary so the fees will not get + // trapped. Another benefit is when fees left more than ED on AssetHub could be + // used to create the beneficiary account in case it does not exist. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + SetTopic(message_id.into()), + ]; + + // `total_fees` to burn on this chain when sending `instructions` to run on AH (which also + // teleport fees) + Ok((instructions.into(), asset_hub_fee.into())) + } +} + +#[cfg(test)] +mod tests { + use crate::inbound::{CallIndex, EthereumLocationsConverterFor}; + use frame_support::{assert_ok, parameter_types}; + use hex_literal::hex; + use xcm::prelude::*; + use xcm_executor::traits::ConvertLocation; + + const NETWORK: NetworkId = Ethereum { chain_id: 11155111 }; + + parameter_types! { + pub EthereumNetwork: NetworkId = NETWORK; + + pub const CreateAssetCall: CallIndex = [1, 1]; + pub const CreateAssetExecutionFee: u128 = 123; + pub const CreateAssetDeposit: u128 = 891; + pub const SendTokenExecutionFee: u128 = 592; + } + + #[test] + fn test_contract_location_with_network_converts_successfully() { + let expected_account: [u8; 32] = + hex!("ce796ae65569a670d0c1cc1ac12515a3ce21b5fbf729d63d7b289baad070139d"); + let contract_location = Location::new(2, [GlobalConsensus(NETWORK)]); + + let account = + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location) + .unwrap(); + + assert_eq!(account, expected_account); + } + + #[test] + fn test_contract_location_with_incorrect_location_fails_convert() { + let contract_location = Location::new(2, [GlobalConsensus(Polkadot), Parachain(1000)]); + + assert_eq!( + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location), + None, + ); + } + + #[test] + fn test_reanchor_all_assets() { + let ethereum_context: InteriorLocation = [GlobalConsensus(Ethereum { chain_id: 1 })].into(); + let ethereum = Location::new(2, ethereum_context.clone()); + let ah_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); + let global_ah = Location::new(1, ah_context.clone()); + let assets = vec![ + // DOT + Location::new(1, []), + // GLMR (Some Polkadot parachain currency) + Location::new(1, [Parachain(2004)]), + // AH asset + Location::new(0, [PalletInstance(50), GeneralIndex(42)]), + // KSM + Location::new(2, [GlobalConsensus(Kusama)]), + // KAR (Some Kusama parachain currency) + Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), + ]; + for asset in assets.iter() { + // reanchor logic in pallet_xcm on AH + let mut reanchored_asset = asset.clone(); + assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); + // reanchor back to original location in context of Ethereum + let mut reanchored_asset_with_ethereum_context = reanchored_asset.clone(); + assert_ok!( + reanchored_asset_with_ethereum_context.reanchor(&global_ah, ðereum_context) + ); + assert_eq!(reanchored_asset_with_ethereum_context, asset.clone()); + } + } +} diff --git a/bridges/snowbridge/primitives/router/src/inbound/v2.rs b/bridges/snowbridge/primitives/router/src/inbound/v2.rs new file mode 100644 index 000000000000..73e5f5ada939 --- /dev/null +++ b/bridges/snowbridge/primitives/router/src/inbound/v2.rs @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Converts messages from Ethereum to XCM messages + +use crate::inbound::{CallIndex, EthereumLocationsConverterFor}; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use frame_support::{traits::tokens::Balance as BalanceT, PalletError}; +use scale_info::TypeInfo; +use snowbridge_core::TokenId; +use sp_core::{Get, RuntimeDebug, H160, H256}; +use sp_runtime::{traits::MaybeEquivalence, MultiAddress}; +use sp_std::prelude::*; +use xcm::prelude::{Junction::AccountKey20, *}; + +const MINIMUM_DEPOSIT: u128 = 1; + +/// Messages from Ethereum are versioned. This is because in future, +/// we may want to evolve the protocol so that the ethereum side sends XCM messages directly. +/// Instead having BridgeHub transcode the messages into XCM. +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum VersionedMessage { + V1(MessageV1), +} + +/// For V1, the ethereum side sends messages which are transcoded into XCM. These messages are +/// self-contained, in that they can be transcoded using only information in the message. +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub struct MessageV1 { + /// EIP-155 chain id of the origin Ethereum network + pub chain_id: u64, + /// The command originating from the Gateway contract + pub command: Command, +} + +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum Command { + /// Register a wrapped token on the AssetHub `ForeignAssets` pallet + RegisterToken { + /// The address of the ERC20 token to be bridged over to AssetHub + token: H160, + /// XCM execution fee on AssetHub + fee: u128, + }, + /// Send Ethereum token to AssetHub or another parachain + SendToken { + /// The address of the ERC20 token to be bridged over to AssetHub + token: H160, + /// The destination for the transfer + destination: Destination, + /// Amount to transfer + amount: u128, + /// XCM execution fee on AssetHub + fee: u128, + }, + /// Send Polkadot token back to the original parachain + SendNativeToken { + /// The Id of the token + token_id: TokenId, + /// The destination for the transfer + destination: Destination, + /// Amount to transfer + amount: u128, + /// XCM execution fee on AssetHub + fee: u128, + }, +} + +/// Destination for bridged tokens +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum Destination { + /// The funds will be deposited into account `id` on AssetHub + AccountId32 { id: [u8; 32] }, + /// The funds will deposited into the sovereign account of destination parachain `para_id` on + /// AssetHub, Account `id` on the destination parachain will receive the funds via a + /// reserve-backed transfer. See + ForeignAccountId32 { + para_id: u32, + id: [u8; 32], + /// XCM execution fee on final destination + fee: u128, + }, + /// The funds will deposited into the sovereign account of destination parachain `para_id` on + /// AssetHub, Account `id` on the destination parachain will receive the funds via a + /// reserve-backed transfer. See + ForeignAccountId20 { + para_id: u32, + id: [u8; 20], + /// XCM execution fee on final destination + fee: u128, + }, +} + +pub struct MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, +> where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + Balance: BalanceT, + ConvertAssetId: MaybeEquivalence, + EthereumUniversalLocation: Get, + GlobalAssetHubLocation: Get, +{ + _phantom: PhantomData<( + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + )>, +} + +/// Reason why a message conversion failed. +#[derive(Copy, Clone, TypeInfo, PalletError, Encode, Decode, RuntimeDebug)] +pub enum ConvertMessageError { + /// The message version is not supported for conversion. + UnsupportedVersion, + InvalidDestination, + InvalidToken, + /// The fee asset is not supported for conversion. + UnsupportedFeeAsset, + CannotReanchor, +} + +/// convert the inbound message to xcm which will be forwarded to the destination chain +pub trait ConvertMessage { + type Balance: BalanceT + From; + type AccountId; + /// Converts a versioned message into an XCM message and an optional topicID + fn convert( + message_id: H256, + message: VersionedMessage, + ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError>; +} + +impl< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > ConvertMessage + for MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > +where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + InboundQueuePalletInstance: Get, + Balance: BalanceT + From, + AccountId: Into<[u8; 32]>, + ConvertAssetId: MaybeEquivalence, + EthereumUniversalLocation: Get, + GlobalAssetHubLocation: Get, +{ + type Balance = Balance; + type AccountId = AccountId; + + fn convert( + message_id: H256, + message: VersionedMessage, + ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError> { + use Command::*; + use VersionedMessage::*; + match message { + V1(MessageV1 { chain_id, command: RegisterToken { token, fee } }) => + Ok(Self::convert_register_token(message_id, chain_id, token, fee)), + V1(MessageV1 { chain_id, command: SendToken { token, destination, amount, fee } }) => + Ok(Self::convert_send_token(message_id, chain_id, token, destination, amount, fee)), + V1(MessageV1 { + chain_id, + command: SendNativeToken { token_id, destination, amount, fee }, + }) => Self::convert_send_native_token( + message_id, + chain_id, + token_id, + destination, + amount, + fee, + ), + } + } +} + +impl< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > + MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > +where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + InboundQueuePalletInstance: Get, + Balance: BalanceT + From, + AccountId: Into<[u8; 32]>, + ConvertAssetId: MaybeEquivalence, + EthereumUniversalLocation: Get, + GlobalAssetHubLocation: Get, +{ + fn convert_register_token( + message_id: H256, + chain_id: u64, + token: H160, + fee: u128, + ) -> (Xcm<()>, Balance) { + let network = Ethereum { chain_id }; + let xcm_fee: Asset = (Location::parent(), fee).into(); + let deposit: Asset = (Location::parent(), CreateAssetDeposit::get()).into(); + + let total_amount = fee + CreateAssetDeposit::get(); + let total: Asset = (Location::parent(), total_amount).into(); + + let bridge_location = Location::new(2, GlobalConsensus(network)); + + let owner = EthereumLocationsConverterFor::<[u8; 32]>::from_chain_id(&chain_id); + let asset_id = Self::convert_token_address(network, token); + let create_call_index: [u8; 2] = CreateAssetCall::get(); + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let xcm: Xcm<()> = vec![ + // Teleport required fees. + ReceiveTeleportedAsset(total.into()), + // Pay for execution. + BuyExecution { fees: xcm_fee, weight_limit: Unlimited }, + // Fund the snowbridge sovereign with the required deposit for creation. + DepositAsset { assets: Definite(deposit.into()), beneficiary: bridge_location.clone() }, + // This `SetAppendix` ensures that `xcm_fee` not spent by `Transact` will be + // deposited to snowbridge sovereign, instead of being trapped, regardless of + // `Transact` success or not. + SetAppendix(Xcm(vec![ + RefundSurplus, + DepositAsset { assets: AllCounted(1).into(), beneficiary: bridge_location }, + ])), + // Only our inbound-queue pallet is allowed to invoke `UniversalOrigin`. + DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), + // Change origin to the bridge. + UniversalOrigin(GlobalConsensus(network)), + // Call create_asset on foreign assets pallet. + Transact { + origin_kind: OriginKind::Xcm, + call: ( + create_call_index, + asset_id, + MultiAddress::<[u8; 32], ()>::Id(owner), + MINIMUM_DEPOSIT, + ) + .encode() + .into(), + }, + // Forward message id to Asset Hub + SetTopic(message_id.into()), + // Once the program ends here, appendix program will run, which will deposit any + // leftover fee to snowbridge sovereign. + ] + .into(); + + (xcm, total_amount.into()) + } + + fn convert_send_token( + message_id: H256, + chain_id: u64, + token: H160, + destination: Destination, + amount: u128, + asset_hub_fee: u128, + ) -> (Xcm<()>, Balance) { + let network = Ethereum { chain_id }; + let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); + let asset: Asset = (Self::convert_token_address(network, token), amount).into(); + + let (dest_para_id, beneficiary, dest_para_fee) = match destination { + // Final destination is a 32-byte account on AssetHub + Destination::AccountId32 { id } => + (None, Location::new(0, [AccountId32 { network: None, id }]), 0), + // Final destination is a 32-byte account on a sibling of AssetHub + Destination::ForeignAccountId32 { para_id, id, fee } => ( + Some(para_id), + Location::new(0, [AccountId32 { network: None, id }]), + // Total fee needs to cover execution on AssetHub and Sibling + fee, + ), + // Final destination is a 20-byte account on a sibling of AssetHub + Destination::ForeignAccountId20 { para_id, id, fee } => ( + Some(para_id), + Location::new(0, [AccountKey20 { network: None, key: id }]), + // Total fee needs to cover execution on AssetHub and Sibling + fee, + ), + }; + + let total_fees = asset_hub_fee.saturating_add(dest_para_fee); + let total_fee_asset: Asset = (Location::parent(), total_fees).into(); + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let mut instructions = vec![ + ReceiveTeleportedAsset(total_fee_asset.into()), + BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, + DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), + UniversalOrigin(GlobalConsensus(network)), + ReserveAssetDeposited(asset.clone().into()), + ClearOrigin, + ]; + + match dest_para_id { + Some(dest_para_id) => { + let dest_para_fee_asset: Asset = (Location::parent(), dest_para_fee).into(); + let bridge_location = Location::new(2, GlobalConsensus(network)); + + instructions.extend(vec![ + // After program finishes deposit any leftover assets to the snowbridge + // sovereign. + SetAppendix(Xcm(vec![DepositAsset { + assets: Wild(AllCounted(2)), + beneficiary: bridge_location, + }])), + // Perform a deposit reserve to send to destination chain. + DepositReserveAsset { + assets: Definite(vec![dest_para_fee_asset.clone(), asset].into()), + dest: Location::new(1, [Parachain(dest_para_id)]), + xcm: vec![ + // Buy execution on target. + BuyExecution { fees: dest_para_fee_asset, weight_limit: Unlimited }, + // Deposit assets to beneficiary. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + // Forward message id to destination parachain. + SetTopic(message_id.into()), + ] + .into(), + }, + ]); + }, + None => { + instructions.extend(vec![ + // Deposit both asset and fees to beneficiary so the fees will not get + // trapped. Another benefit is when fees left more than ED on AssetHub could be + // used to create the beneficiary account in case it does not exist. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + ]); + }, + } + + // Forward message id to Asset Hub. + instructions.push(SetTopic(message_id.into())); + + // The `instructions` to forward to AssetHub, and the `total_fees` to locally burn (since + // they are teleported within `instructions`). + (instructions.into(), total_fees.into()) + } + + // Convert ERC20 token address to a location that can be understood by Assets Hub. + fn convert_token_address(network: NetworkId, token: H160) -> Location { + Location::new( + 2, + [GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }], + ) + } + + /// Constructs an XCM message destined for AssetHub that withdraws assets from the sovereign + /// account of the Gateway contract and either deposits those assets into a recipient account or + /// forwards the assets to another parachain. + fn convert_send_native_token( + message_id: H256, + chain_id: u64, + token_id: TokenId, + destination: Destination, + amount: u128, + asset_hub_fee: u128, + ) -> Result<(Xcm<()>, Balance), ConvertMessageError> { + let network = Ethereum { chain_id }; + let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); + + let beneficiary = match destination { + // Final destination is a 32-byte account on AssetHub + Destination::AccountId32 { id } => + Ok(Location::new(0, [AccountId32 { network: None, id }])), + _ => Err(ConvertMessageError::InvalidDestination), + }?; + + let total_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); + + let asset_loc = + ConvertAssetId::convert(&token_id).ok_or(ConvertMessageError::InvalidToken)?; + + let mut reanchored_asset_loc = asset_loc.clone(); + reanchored_asset_loc + .reanchor(&GlobalAssetHubLocation::get(), &EthereumUniversalLocation::get()) + .map_err(|_| ConvertMessageError::CannotReanchor)?; + + let asset: Asset = (reanchored_asset_loc, amount).into(); + + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let instructions = vec![ + ReceiveTeleportedAsset(total_fee_asset.clone().into()), + BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, + DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), + UniversalOrigin(GlobalConsensus(network)), + WithdrawAsset(asset.clone().into()), + // Deposit both asset and fees to beneficiary so the fees will not get + // trapped. Another benefit is when fees left more than ED on AssetHub could be + // used to create the beneficiary account in case it does not exist. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + SetTopic(message_id.into()), + ]; + + // `total_fees` to burn on this chain when sending `instructions` to run on AH (which also + // teleport fees) + Ok((instructions.into(), asset_hub_fee.into())) + } +} + +#[cfg(test)] +mod tests { + use crate::inbound::{CallIndex, EthereumLocationsConverterFor}; + use frame_support::{assert_ok, parameter_types}; + use hex_literal::hex; + use xcm::prelude::*; + use xcm_executor::traits::ConvertLocation; + + const NETWORK: NetworkId = Ethereum { chain_id: 11155111 }; + + parameter_types! { + pub EthereumNetwork: NetworkId = NETWORK; + + pub const CreateAssetCall: CallIndex = [1, 1]; + pub const CreateAssetExecutionFee: u128 = 123; + pub const CreateAssetDeposit: u128 = 891; + pub const SendTokenExecutionFee: u128 = 592; + } + + #[test] + fn test_contract_location_with_network_converts_successfully() { + let expected_account: [u8; 32] = + hex!("ce796ae65569a670d0c1cc1ac12515a3ce21b5fbf729d63d7b289baad070139d"); + let contract_location = Location::new(2, [GlobalConsensus(NETWORK)]); + + let account = + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location) + .unwrap(); + + assert_eq!(account, expected_account); + } + + #[test] + fn test_contract_location_with_incorrect_location_fails_convert() { + let contract_location = Location::new(2, [GlobalConsensus(Polkadot), Parachain(1000)]); + + assert_eq!( + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location), + None, + ); + } + + #[test] + fn test_reanchor_all_assets() { + let ethereum_context: InteriorLocation = [GlobalConsensus(Ethereum { chain_id: 1 })].into(); + let ethereum = Location::new(2, ethereum_context.clone()); + let ah_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); + let global_ah = Location::new(1, ah_context.clone()); + let assets = vec![ + // DOT + Location::new(1, []), + // GLMR (Some Polkadot parachain currency) + Location::new(1, [Parachain(2004)]), + // AH asset + Location::new(0, [PalletInstance(50), GeneralIndex(42)]), + // KSM + Location::new(2, [GlobalConsensus(Kusama)]), + // KAR (Some Kusama parachain currency) + Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), + ]; + for asset in assets.iter() { + // reanchor logic in pallet_xcm on AH + let mut reanchored_asset = asset.clone(); + assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); + // reanchor back to original location in context of Ethereum + let mut reanchored_asset_with_ethereum_context = reanchored_asset.clone(); + assert_ok!( + reanchored_asset_with_ethereum_context.reanchor(&global_ah, ðereum_context) + ); + assert_eq!(reanchored_asset_with_ethereum_context, asset.clone()); + } + } +} diff --git a/bridges/snowbridge/primitives/router/src/outbound/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/mod.rs index 3b5dbdb77c89..22756b222812 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/mod.rs @@ -1,423 +1,5 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork -//! Converts XCM messages into simpler commands that can be processed by the Gateway contract - -#[cfg(test)] -mod tests; - -use core::slice::Iter; - -use codec::{Decode, Encode}; - -use frame_support::{ensure, traits::Get}; -use snowbridge_core::{ - outbound::{AgentExecuteCommand, Command, Message, SendMessage}, - AgentId, ChannelId, ParaId, TokenId, TokenIdOf, -}; -use sp_core::{H160, H256}; -use sp_runtime::traits::MaybeEquivalence; -use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; -use xcm::prelude::*; -use xcm_executor::traits::{ConvertLocation, ExportXcm}; - -pub struct EthereumBlobExporter< - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, ->( - PhantomData<( - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, - )>, -); - -impl - ExportXcm - for EthereumBlobExporter< - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, - > -where - UniversalLocation: Get, - EthereumNetwork: Get, - OutboundQueue: SendMessage, - AgentHashedDescription: ConvertLocation, - ConvertAssetId: MaybeEquivalence, -{ - type Ticket = (Vec, XcmHash); - - fn validate( - network: NetworkId, - _channel: u32, - universal_source: &mut Option, - destination: &mut Option, - message: &mut Option>, - ) -> SendResult { - let expected_network = EthereumNetwork::get(); - let universal_location = UniversalLocation::get(); - - if network != expected_network { - log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched bridge network {network:?}."); - return Err(SendError::NotApplicable) - } - - // Cloning destination to avoid modifying the value so subsequent exporters can use it. - let dest = destination.clone().take().ok_or(SendError::MissingArgument)?; - if dest != Here { - log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched remote destination {dest:?}."); - return Err(SendError::NotApplicable) - } - - // Cloning universal_source to avoid modifying the value so subsequent exporters can use it. - let (local_net, local_sub) = universal_source.clone() - .take() - .ok_or_else(|| { - log::error!(target: "xcm::ethereum_blob_exporter", "universal source not provided."); - SendError::MissingArgument - })? - .split_global() - .map_err(|()| { - log::error!(target: "xcm::ethereum_blob_exporter", "could not get global consensus from universal source '{universal_source:?}'."); - SendError::NotApplicable - })?; - - if Ok(local_net) != universal_location.global_consensus() { - log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched relay network {local_net:?}."); - return Err(SendError::NotApplicable) - } - - let para_id = match local_sub.as_slice() { - [Parachain(para_id)] => *para_id, - _ => { - log::error!(target: "xcm::ethereum_blob_exporter", "could not get parachain id from universal source '{local_sub:?}'."); - return Err(SendError::NotApplicable) - }, - }; - - let source_location = Location::new(1, local_sub.clone()); - - let agent_id = match AgentHashedDescription::convert_location(&source_location) { - Some(id) => id, - None => { - log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to not being able to create agent id. '{source_location:?}'"); - return Err(SendError::NotApplicable) - }, - }; - - let message = message.take().ok_or_else(|| { - log::error!(target: "xcm::ethereum_blob_exporter", "xcm message not provided."); - SendError::MissingArgument - })?; - - let mut converter = - XcmConverter::::new(&message, expected_network, agent_id); - let (command, message_id) = converter.convert().map_err(|err|{ - log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to pattern matching error '{err:?}'."); - SendError::Unroutable - })?; - - let channel_id: ChannelId = ParaId::from(para_id).into(); - - let outbound_message = Message { id: Some(message_id.into()), channel_id, command }; - - // validate the message - let (ticket, fee) = OutboundQueue::validate(&outbound_message).map_err(|err| { - log::error!(target: "xcm::ethereum_blob_exporter", "OutboundQueue validation of message failed. {err:?}"); - SendError::Unroutable - })?; - - // convert fee to Asset - let fee = Asset::from((Location::parent(), fee.total())).into(); - - Ok(((ticket.encode(), message_id), fee)) - } - - fn deliver(blob: (Vec, XcmHash)) -> Result { - let ticket: OutboundQueue::Ticket = OutboundQueue::Ticket::decode(&mut blob.0.as_ref()) - .map_err(|_| { - log::trace!(target: "xcm::ethereum_blob_exporter", "undeliverable due to decoding error"); - SendError::NotApplicable - })?; - - let message_id = OutboundQueue::deliver(ticket).map_err(|_| { - log::error!(target: "xcm::ethereum_blob_exporter", "OutboundQueue submit of message failed"); - SendError::Transport("other transport error") - })?; - - log::info!(target: "xcm::ethereum_blob_exporter", "message delivered {message_id:#?}."); - Ok(message_id.into()) - } -} - -/// Errors that can be thrown to the pattern matching step. -#[derive(PartialEq, Debug)] -enum XcmConverterError { - UnexpectedEndOfXcm, - EndOfXcmMessageExpected, - WithdrawAssetExpected, - DepositAssetExpected, - NoReserveAssets, - FilterDoesNotConsumeAllAssets, - TooManyAssets, - ZeroAssetTransfer, - BeneficiaryResolutionFailed, - AssetResolutionFailed, - InvalidFeeAsset, - SetTopicExpected, - ReserveAssetDepositedExpected, - InvalidAsset, - UnexpectedInstruction, -} - -macro_rules! match_expression { - ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $value:expr $(,)?) => { - match $expression { - $( $pattern )|+ $( if $guard )? => Some($value), - _ => None, - } - }; -} - -struct XcmConverter<'a, ConvertAssetId, Call> { - iter: Peekable>>, - ethereum_network: NetworkId, - agent_id: AgentId, - _marker: PhantomData, -} -impl<'a, ConvertAssetId, Call> XcmConverter<'a, ConvertAssetId, Call> -where - ConvertAssetId: MaybeEquivalence, -{ - fn new(message: &'a Xcm, ethereum_network: NetworkId, agent_id: AgentId) -> Self { - Self { - iter: message.inner().iter().peekable(), - ethereum_network, - agent_id, - _marker: Default::default(), - } - } - - fn convert(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { - let result = match self.peek() { - Ok(ReserveAssetDeposited { .. }) => self.make_mint_foreign_token_command(), - // Get withdraw/deposit and make native tokens create message. - Ok(WithdrawAsset { .. }) => self.make_unlock_native_token_command(), - Err(e) => Err(e), - _ => return Err(XcmConverterError::UnexpectedInstruction), - }?; - - // All xcm instructions must be consumed before exit. - if self.next().is_ok() { - return Err(XcmConverterError::EndOfXcmMessageExpected) - } - - Ok(result) - } - - fn make_unlock_native_token_command( - &mut self, - ) -> Result<(Command, [u8; 32]), XcmConverterError> { - use XcmConverterError::*; - - // Get the reserve assets from WithdrawAsset. - let reserve_assets = - match_expression!(self.next()?, WithdrawAsset(reserve_assets), reserve_assets) - .ok_or(WithdrawAssetExpected)?; - - // Check if clear origin exists and skip over it. - if match_expression!(self.peek(), Ok(ClearOrigin), ()).is_some() { - let _ = self.next(); - } - - // Get the fee asset item from BuyExecution or continue parsing. - let fee_asset = match_expression!(self.peek(), Ok(BuyExecution { fees, .. }), fees); - if fee_asset.is_some() { - let _ = self.next(); - } - - let (deposit_assets, beneficiary) = match_expression!( - self.next()?, - DepositAsset { assets, beneficiary }, - (assets, beneficiary) - ) - .ok_or(DepositAssetExpected)?; - - // assert that the beneficiary is AccountKey20. - let recipient = match_expression!( - beneficiary.unpack(), - (0, [AccountKey20 { network, key }]) - if self.network_matches(network), - H160(*key) - ) - .ok_or(BeneficiaryResolutionFailed)?; - - // Make sure there are reserved assets. - if reserve_assets.len() == 0 { - return Err(NoReserveAssets) - } - - // Check the the deposit asset filter matches what was reserved. - if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { - return Err(FilterDoesNotConsumeAllAssets) - } - - // We only support a single asset at a time. - ensure!(reserve_assets.len() == 1, TooManyAssets); - let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; - - // Fees are collected on AH, up front and directly from the user, to cover the - // complete cost of the transfer. Any additional fees provided in the XCM program are - // refunded to the beneficiary. We only validate the fee here if its provided to make sure - // the XCM program is well formed. Another way to think about this from an XCM perspective - // would be that the user offered to pay X amount in fees, but we charge 0 of that X amount - // (no fee) and refund X to the user. - if let Some(fee_asset) = fee_asset { - // The fee asset must be the same as the reserve asset. - if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { - return Err(InvalidFeeAsset) - } - } - - let (token, amount) = match reserve_asset { - Asset { id: AssetId(inner_location), fun: Fungible(amount) } => - match inner_location.unpack() { - (0, [AccountKey20 { network, key }]) if self.network_matches(network) => - Some((H160(*key), *amount)), - _ => None, - }, - _ => None, - } - .ok_or(AssetResolutionFailed)?; - - // transfer amount must be greater than 0. - ensure!(amount > 0, ZeroAssetTransfer); - - // Check if there is a SetTopic and skip over it if found. - let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; - - Ok(( - Command::AgentExecute { - agent_id: self.agent_id, - command: AgentExecuteCommand::TransferToken { token, recipient, amount }, - }, - *topic_id, - )) - } - - fn next(&mut self) -> Result<&'a Instruction, XcmConverterError> { - self.iter.next().ok_or(XcmConverterError::UnexpectedEndOfXcm) - } - - fn peek(&mut self) -> Result<&&'a Instruction, XcmConverterError> { - self.iter.peek().ok_or(XcmConverterError::UnexpectedEndOfXcm) - } - - fn network_matches(&self, network: &Option) -> bool { - if let Some(network) = network { - *network == self.ethereum_network - } else { - true - } - } - - /// Convert the xcm for Polkadot-native token from AH into the Command - /// To match transfers of Polkadot-native tokens, we expect an input of the form: - /// # ReserveAssetDeposited - /// # ClearOrigin - /// # BuyExecution - /// # DepositAsset - /// # SetTopic - fn make_mint_foreign_token_command( - &mut self, - ) -> Result<(Command, [u8; 32]), XcmConverterError> { - use XcmConverterError::*; - - // Get the reserve assets. - let reserve_assets = - match_expression!(self.next()?, ReserveAssetDeposited(reserve_assets), reserve_assets) - .ok_or(ReserveAssetDepositedExpected)?; - - // Check if clear origin exists and skip over it. - if match_expression!(self.peek(), Ok(ClearOrigin), ()).is_some() { - let _ = self.next(); - } - - // Get the fee asset item from BuyExecution or continue parsing. - let fee_asset = match_expression!(self.peek(), Ok(BuyExecution { fees, .. }), fees); - if fee_asset.is_some() { - let _ = self.next(); - } - - let (deposit_assets, beneficiary) = match_expression!( - self.next()?, - DepositAsset { assets, beneficiary }, - (assets, beneficiary) - ) - .ok_or(DepositAssetExpected)?; - - // assert that the beneficiary is AccountKey20. - let recipient = match_expression!( - beneficiary.unpack(), - (0, [AccountKey20 { network, key }]) - if self.network_matches(network), - H160(*key) - ) - .ok_or(BeneficiaryResolutionFailed)?; - - // Make sure there are reserved assets. - if reserve_assets.len() == 0 { - return Err(NoReserveAssets) - } - - // Check the the deposit asset filter matches what was reserved. - if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { - return Err(FilterDoesNotConsumeAllAssets) - } - - // We only support a single asset at a time. - ensure!(reserve_assets.len() == 1, TooManyAssets); - let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; - - // Fees are collected on AH, up front and directly from the user, to cover the - // complete cost of the transfer. Any additional fees provided in the XCM program are - // refunded to the beneficiary. We only validate the fee here if its provided to make sure - // the XCM program is well formed. Another way to think about this from an XCM perspective - // would be that the user offered to pay X amount in fees, but we charge 0 of that X amount - // (no fee) and refund X to the user. - if let Some(fee_asset) = fee_asset { - // The fee asset must be the same as the reserve asset. - if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { - return Err(InvalidFeeAsset) - } - } - - let (asset_id, amount) = match reserve_asset { - Asset { id: AssetId(inner_location), fun: Fungible(amount) } => - Some((inner_location.clone(), *amount)), - _ => None, - } - .ok_or(AssetResolutionFailed)?; - - // transfer amount must be greater than 0. - ensure!(amount > 0, ZeroAssetTransfer); - - let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; - - let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; - - ensure!(asset_id == expected_asset_id, InvalidAsset); - - // Check if there is a SetTopic and skip over it if found. - let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; - - Ok((Command::MintForeignToken { token_id, recipient, amount }, *topic_id)) - } -} +// SPDX-FileCopyrightText: 2021-2022 Parity Technologies (UK) Ltd. +pub mod v1; +pub mod v2; diff --git a/bridges/snowbridge/primitives/router/src/outbound/tests.rs b/bridges/snowbridge/primitives/router/src/outbound/tests.rs deleted file mode 100644 index 44f81ce31b3a..000000000000 --- a/bridges/snowbridge/primitives/router/src/outbound/tests.rs +++ /dev/null @@ -1,1274 +0,0 @@ -use frame_support::parameter_types; -use hex_literal::hex; -use snowbridge_core::{ - outbound::{Fee, SendError, SendMessageFeeProvider}, - AgentIdOf, -}; -use sp_std::default::Default; -use xcm::{ - latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, - prelude::SendError as XcmSendError, -}; - -use super::*; - -parameter_types! { - const MaxMessageSize: u32 = u32::MAX; - const RelayNetwork: NetworkId = Polkadot; - UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(1013)].into(); - const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; - const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; -} - -struct MockOkOutboundQueue; -impl SendMessage for MockOkOutboundQueue { - type Ticket = (); - - fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { - Ok(((), Fee { local: 1, remote: 1 })) - } - - fn deliver(_: Self::Ticket) -> Result { - Ok(H256::zero()) - } -} - -impl SendMessageFeeProvider for MockOkOutboundQueue { - type Balance = u128; - - fn local_fee() -> Self::Balance { - 1 - } -} -struct MockErrOutboundQueue; -impl SendMessage for MockErrOutboundQueue { - type Ticket = (); - - fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { - Err(SendError::MessageTooLarge) - } - - fn deliver(_: Self::Ticket) -> Result { - Err(SendError::MessageTooLarge) - } -} - -impl SendMessageFeeProvider for MockErrOutboundQueue { - type Balance = u128; - - fn local_fee() -> Self::Balance { - 1 - } -} - -pub struct MockTokenIdConvert; -impl MaybeEquivalence for MockTokenIdConvert { - fn convert(_id: &TokenId) -> Option { - Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) - } - fn convert_back(_loc: &Location) -> Option { - None - } -} - -#[test] -fn exporter_validate_with_unknown_network_yields_not_applicable() { - let network = Ethereum { chain_id: 1337 }; - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = None; - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); -} - -#[test] -fn exporter_validate_with_invalid_destination_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = None; - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); -} - -#[test] -fn exporter_validate_with_x8_destination_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = Some( - [OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild] - .into(), - ); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); -} - -#[test] -fn exporter_validate_without_universal_source_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); -} - -#[test] -fn exporter_validate_without_global_universal_location_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Here.into(); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); -} - -#[test] -fn exporter_validate_without_global_bridge_location_yields_not_applicable() { - let network = NonBridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Here.into(); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); -} - -#[test] -fn exporter_validate_with_remote_universal_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Kusama), Parachain(1000)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); -} - -#[test] -fn exporter_validate_without_para_id_in_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Some(GlobalConsensus(Polkadot).into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); -} - -#[test] -fn exporter_validate_complex_para_id_in_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(12)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); -} - -#[test] -fn exporter_validate_without_xcm_message_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); -} - -#[test] -fn exporter_validate_with_max_target_fee_yields_unroutable() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; - let fees: Assets = vec![fee.clone()].into(); - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let mut message: Option> = Some( - vec![ - WithdrawAsset(fees), - BuyExecution { fees: fee, weight_limit: Unlimited }, - WithdrawAsset(assets), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: Some(network), key: beneficiary_address } - .into(), - }, - SetTopic([0; 32]), - ] - .into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert_eq!(result, Err(XcmSendError::Unroutable)); -} - -#[test] -fn exporter_validate_with_unparsable_xcm_yields_unroutable() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let channel: u32 = 0; - let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; - let fees: Assets = vec![fee.clone()].into(); - - let mut message: Option> = - Some(vec![WithdrawAsset(fees), BuyExecution { fees: fee, weight_limit: Unlimited }].into()); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert_eq!(result, Err(XcmSendError::Unroutable)); -} - -#[test] -fn exporter_validate_xcm_success_case_1() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - - let mut message: Option> = Some( - vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert!(result.is_ok()); -} - -#[test] -fn exporter_deliver_with_submit_failure_yields_unroutable() { - let result = EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockErrOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); - assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) -} - -#[test] -fn xcm_converter_convert_success() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = Command::AgentExecute { - agent_id: Default::default(), - command: AgentExecuteCommand::TransferToken { - token: token_address.into(), - recipient: beneficiary_address.into(), - amount: 1000, - }, - }; - let result = converter.convert(); - assert_eq!(result, Ok((expected_payload, [0; 32]))); -} - -#[test] -fn xcm_converter_convert_without_buy_execution_yields_success() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = Command::AgentExecute { - agent_id: Default::default(), - command: AgentExecuteCommand::TransferToken { - token: token_address.into(), - recipient: beneficiary_address.into(), - amount: 1000, - }, - }; - let result = converter.convert(); - assert_eq!(result, Ok((expected_payload, [0; 32]))); -} - -#[test] -fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(All); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = Command::AgentExecute { - agent_id: Default::default(), - command: AgentExecuteCommand::TransferToken { - token: token_address.into(), - recipient: beneficiary_address.into(), - amount: 1000, - }, - }; - let result = converter.convert(); - assert_eq!(result, Ok((expected_payload, [0; 32]))); -} - -#[test] -fn xcm_converter_convert_with_fees_less_than_reserve_yields_success() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); - let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(500) }; - - let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); - - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee_asset, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = Command::AgentExecute { - agent_id: Default::default(), - command: AgentExecuteCommand::TransferToken { - token: token_address.into(), - recipient: beneficiary_address.into(), - amount: 1000, - }, - }; - let result = converter.convert(); - assert_eq!(result, Ok((expected_payload, [0; 32]))); -} - -#[test] -fn xcm_converter_convert_without_set_topic_yields_set_topic_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - ClearTopic, - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::SetTopicExpected)); -} - -#[test] -fn xcm_converter_convert_with_partial_message_yields_unexpected_end_of_xcm() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let message: Xcm<()> = vec![WithdrawAsset(assets)].into(); - - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); -} - -#[test] -fn xcm_converter_with_different_fee_asset_fails() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let asset_location = [AccountKey20 { network: None, key: token_address }].into(); - let fee_asset = - Asset { id: AssetId(Location { parents: 0, interior: Here }), fun: Fungible(1000) }; - - let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); - - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee_asset, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); -} - -#[test] -fn xcm_converter_with_fees_greater_than_reserve_fails() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); - let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(1001) }; - - let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); - - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee_asset, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); -} - -#[test] -fn xcm_converter_convert_with_empty_xcm_yields_unexpected_end_of_xcm() { - let network = BridgedNetwork::get(); - - let message: Xcm<()> = vec![].into(); - - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); -} - -#[test] -fn xcm_converter_convert_with_extra_instructions_yields_end_of_xcm_message_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ClearError, - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::EndOfXcmMessageExpected)); -} - -#[test] -fn xcm_converter_convert_without_withdraw_asset_yields_withdraw_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::UnexpectedInstruction)); -} - -#[test] -fn xcm_converter_convert_without_withdraw_asset_yields_deposit_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::DepositAssetExpected)); -} - -#[test] -fn xcm_converter_convert_without_assets_yields_no_reserve_assets() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![].into(); - let filter: AssetFilter = assets.clone().into(); - - let fee = Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }; - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::NoReserveAssets)); -} - -#[test] -fn xcm_converter_convert_with_two_assets_yields_too_many_assets() { - let network = BridgedNetwork::get(); - - let token_address_1: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let token_address_2: [u8; 20] = hex!("1100000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![ - Asset { - id: AssetId(AccountKey20 { network: None, key: token_address_1 }.into()), - fun: Fungible(1000), - }, - Asset { - id: AssetId(AccountKey20 { network: None, key: token_address_2 }.into()), - fun: Fungible(500), - }, - ] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::TooManyAssets)); -} - -#[test] -fn xcm_converter_convert_without_consuming_filter_yields_filter_does_not_consume_all_assets() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(0)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::FilterDoesNotConsumeAllAssets)); -} - -#[test] -fn xcm_converter_convert_with_zero_amount_asset_yields_zero_asset_transfer() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(0), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::ZeroAssetTransfer)); -} - -#[test] -fn xcm_converter_convert_non_ethereum_asset_yields_asset_resolution_failed() { - let network = BridgedNetwork::get(); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([GlobalConsensus(Polkadot), Parachain(1000), GeneralIndex(0)].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); -} - -#[test] -fn xcm_converter_convert_non_ethereum_chain_asset_yields_asset_resolution_failed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId( - AccountKey20 { network: Some(Ethereum { chain_id: 2 }), key: token_address }.into(), - ), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); -} - -#[test] -fn xcm_converter_convert_non_ethereum_chain_yields_asset_resolution_failed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId( - [AccountKey20 { network: Some(NonBridgedNetwork::get()), key: token_address }].into(), - ), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); -} - -#[test] -fn xcm_converter_convert_with_non_ethereum_beneficiary_yields_beneficiary_resolution_failed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - - let beneficiary_address: [u8; 32] = - hex!("2000000000000000000000000000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: [ - GlobalConsensus(Polkadot), - Parachain(1000), - AccountId32 { network: Some(Polkadot), id: beneficiary_address }, - ] - .into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); -} - -#[test] -fn xcm_converter_convert_with_non_ethereum_chain_beneficiary_yields_beneficiary_resolution_failed() -{ - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { - network: Some(Ethereum { chain_id: 2 }), - key: beneficiary_address, - } - .into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); -} - -#[test] -fn test_describe_asset_hub() { - let legacy_location: Location = Location::new(0, [Parachain(1000)]); - let legacy_agent_id = AgentIdOf::convert_location(&legacy_location).unwrap(); - assert_eq!( - legacy_agent_id, - hex!("72456f48efed08af20e5b317abf8648ac66e86bb90a411d9b0b713f7364b75b4").into() - ); - let location: Location = Location::new(1, [Parachain(1000)]); - let agent_id = AgentIdOf::convert_location(&location).unwrap(); - assert_eq!( - agent_id, - hex!("81c5ab2571199e3188135178f3c2c8e2d268be1313d029b30f534fa579b69b79").into() - ) -} - -#[test] -fn test_describe_here() { - let location: Location = Location::new(0, []); - let agent_id = AgentIdOf::convert_location(&location).unwrap(); - assert_eq!( - agent_id, - hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into() - ) -} - -#[test] -fn xcm_converter_transfer_native_token_success() { - let network = BridgedNetwork::get(); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let amount = 1000000; - let asset_location = Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]); - let token_id = TokenIdOf::convert_location(&asset_location).unwrap(); - - let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - ReserveAssetDeposited(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = - Command::MintForeignToken { recipient: beneficiary_address.into(), amount, token_id }; - let result = converter.convert(); - assert_eq!(result, Ok((expected_payload, [0; 32]))); -} - -#[test] -fn xcm_converter_transfer_native_token_with_invalid_location_will_fail() { - let network = BridgedNetwork::get(); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let amount = 1000000; - // Invalid asset location from a different consensus - let asset_location = - Location { parents: 2, interior: [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))].into() }; - - let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - ReserveAssetDeposited(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::InvalidAsset)); -} - -#[test] -fn exporter_validate_with_invalid_dest_does_not_alter_destination() { - let network = BridgedNetwork::get(); - let destination: InteriorLocation = Parachain(1000).into(); - - let universal_source: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - let msg: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut msg_wrapper: Option> = Some(msg.clone()); - let mut dest_wrapper = Some(destination.clone()); - let mut universal_source_wrapper = Some(universal_source.clone()); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate( - network, channel, &mut universal_source_wrapper, &mut dest_wrapper, &mut msg_wrapper - ); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - - // ensure mutable variables are not changed - assert_eq!(Some(destination), dest_wrapper); - assert_eq!(Some(msg), msg_wrapper); - assert_eq!(Some(universal_source), universal_source_wrapper); -} - -#[test] -fn exporter_validate_with_invalid_universal_source_does_not_alter_universal_source() { - let network = BridgedNetwork::get(); - let destination: InteriorLocation = Here.into(); - - let universal_source: InteriorLocation = - [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)].into(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - let msg: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut msg_wrapper: Option> = Some(msg.clone()); - let mut dest_wrapper = Some(destination.clone()); - let mut universal_source_wrapper = Some(universal_source.clone()); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate( - network, channel, &mut universal_source_wrapper, &mut dest_wrapper, &mut msg_wrapper - ); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - - // ensure mutable variables are not changed - assert_eq!(Some(destination), dest_wrapper); - assert_eq!(Some(msg), msg_wrapper); - assert_eq!(Some(universal_source), universal_source_wrapper); -} diff --git a/bridges/snowbridge/primitives/router/src/outbound/v1.rs b/bridges/snowbridge/primitives/router/src/outbound/v1.rs new file mode 100644 index 000000000000..f952d5c613f9 --- /dev/null +++ b/bridges/snowbridge/primitives/router/src/outbound/v1.rs @@ -0,0 +1,1703 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Converts XCM messages into simpler commands that can be processed by the Gateway contract + +use core::slice::Iter; + +use codec::{Decode, Encode}; + +use frame_support::{ensure, traits::Get}; +use snowbridge_core::{ + outbound::v1::{AgentExecuteCommand, Command, Message, SendMessage}, + AgentId, ChannelId, ParaId, TokenId, TokenIdOf, +}; +use sp_core::{H160, H256}; +use sp_runtime::traits::MaybeEquivalence; +use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; +use xcm::prelude::*; +use xcm_executor::traits::{ConvertLocation, ExportXcm}; + +pub struct EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, +>( + PhantomData<( + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + )>, +); + +impl + ExportXcm + for EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + > +where + UniversalLocation: Get, + EthereumNetwork: Get, + OutboundQueue: SendMessage, + AgentHashedDescription: ConvertLocation, + ConvertAssetId: MaybeEquivalence, +{ + type Ticket = (Vec, XcmHash); + + fn validate( + network: NetworkId, + _channel: u32, + universal_source: &mut Option, + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + let expected_network = EthereumNetwork::get(); + let universal_location = UniversalLocation::get(); + + if network != expected_network { + log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched bridge network {network:?}."); + return Err(SendError::NotApplicable) + } + + // Cloning destination to avoid modifying the value so subsequent exporters can use it. + let dest = destination.clone().take().ok_or(SendError::MissingArgument)?; + if dest != Here { + log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched remote destination {dest:?}."); + return Err(SendError::NotApplicable) + } + + // Cloning universal_source to avoid modifying the value so subsequent exporters can use it. + let (local_net, local_sub) = universal_source.clone() + .take() + .ok_or_else(|| { + log::error!(target: "xcm::ethereum_blob_exporter", "universal source not provided."); + SendError::MissingArgument + })? + .split_global() + .map_err(|()| { + log::error!(target: "xcm::ethereum_blob_exporter", "could not get global consensus from universal source '{universal_source:?}'."); + SendError::NotApplicable + })?; + + if Ok(local_net) != universal_location.global_consensus() { + log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched relay network {local_net:?}."); + return Err(SendError::NotApplicable) + } + + let para_id = match local_sub.as_slice() { + [Parachain(para_id)] => *para_id, + _ => { + log::error!(target: "xcm::ethereum_blob_exporter", "could not get parachain id from universal source '{local_sub:?}'."); + return Err(SendError::NotApplicable) + }, + }; + + let source_location = Location::new(1, local_sub.clone()); + + let agent_id = match AgentHashedDescription::convert_location(&source_location) { + Some(id) => id, + None => { + log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to not being able to create agent id. '{source_location:?}'"); + return Err(SendError::NotApplicable) + }, + }; + + let message = message.clone().ok_or_else(|| { + log::error!(target: "xcm::ethereum_blob_exporter", "xcm message not provided."); + SendError::MissingArgument + })?; + + let mut converter = + XcmConverter::::new(&message, expected_network, agent_id); + let (command, message_id) = converter.convert().map_err(|err|{ + log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to pattern matching error '{err:?}'."); + SendError::Unroutable + })?; + + let channel_id: ChannelId = ParaId::from(para_id).into(); + + let outbound_message = Message { id: Some(message_id.into()), channel_id, command }; + + // validate the message + let (ticket, fee) = OutboundQueue::validate(&outbound_message).map_err(|err| { + log::error!(target: "xcm::ethereum_blob_exporter", "OutboundQueue validation of message failed. {err:?}"); + SendError::Unroutable + })?; + + // convert fee to Asset + let fee = Asset::from((Location::parent(), fee.total())).into(); + + Ok(((ticket.encode(), message_id), fee)) + } + + fn deliver(blob: (Vec, XcmHash)) -> Result { + let ticket: OutboundQueue::Ticket = OutboundQueue::Ticket::decode(&mut blob.0.as_ref()) + .map_err(|_| { + log::trace!(target: "xcm::ethereum_blob_exporter", "undeliverable due to decoding error"); + SendError::NotApplicable + })?; + + let message_id = OutboundQueue::deliver(ticket).map_err(|_| { + log::error!(target: "xcm::ethereum_blob_exporter", "OutboundQueue submit of message failed"); + SendError::Transport("other transport error") + })?; + + log::info!(target: "xcm::ethereum_blob_exporter", "message delivered {message_id:#?}."); + Ok(message_id.into()) + } +} + +/// Errors that can be thrown to the pattern matching step. +#[derive(PartialEq, Debug)] +enum XcmConverterError { + UnexpectedEndOfXcm, + EndOfXcmMessageExpected, + WithdrawAssetExpected, + DepositAssetExpected, + NoReserveAssets, + FilterDoesNotConsumeAllAssets, + TooManyAssets, + ZeroAssetTransfer, + BeneficiaryResolutionFailed, + AssetResolutionFailed, + InvalidFeeAsset, + SetTopicExpected, + ReserveAssetDepositedExpected, + InvalidAsset, + UnexpectedInstruction, +} + +macro_rules! match_expression { + ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $value:expr $(,)?) => { + match $expression { + $( $pattern )|+ $( if $guard )? => Some($value), + _ => None, + } + }; +} + +struct XcmConverter<'a, ConvertAssetId, Call> { + iter: Peekable>>, + ethereum_network: NetworkId, + agent_id: AgentId, + _marker: PhantomData, +} +impl<'a, ConvertAssetId, Call> XcmConverter<'a, ConvertAssetId, Call> +where + ConvertAssetId: MaybeEquivalence, +{ + fn new(message: &'a Xcm, ethereum_network: NetworkId, agent_id: AgentId) -> Self { + Self { + iter: message.inner().iter().peekable(), + ethereum_network, + agent_id, + _marker: Default::default(), + } + } + + fn convert(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { + let result = match self.peek() { + Ok(ReserveAssetDeposited { .. }) => self.send_native_tokens_message(), + // Get withdraw/deposit and make native tokens create message. + Ok(WithdrawAsset { .. }) => self.send_tokens_message(), + Err(e) => Err(e), + _ => return Err(XcmConverterError::UnexpectedInstruction), + }?; + + // All xcm instructions must be consumed before exit. + if self.next().is_ok() { + return Err(XcmConverterError::EndOfXcmMessageExpected) + } + + Ok(result) + } + + fn send_tokens_message(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { + use XcmConverterError::*; + + // Get the reserve assets from WithdrawAsset. + let reserve_assets = + match_expression!(self.next()?, WithdrawAsset(reserve_assets), reserve_assets) + .ok_or(WithdrawAssetExpected)?; + + // Check if clear origin exists and skip over it. + if match_expression!(self.peek(), Ok(ClearOrigin), ()).is_some() { + let _ = self.next(); + } + + // Get the fee asset item from BuyExecution or continue parsing. + let fee_asset = match_expression!(self.peek(), Ok(BuyExecution { fees, .. }), fees); + if fee_asset.is_some() { + let _ = self.next(); + } + + let (deposit_assets, beneficiary) = match_expression!( + self.next()?, + DepositAsset { assets, beneficiary }, + (assets, beneficiary) + ) + .ok_or(DepositAssetExpected)?; + + // assert that the beneficiary is AccountKey20. + let recipient = match_expression!( + beneficiary.unpack(), + (0, [AccountKey20 { network, key }]) + if self.network_matches(network), + H160(*key) + ) + .ok_or(BeneficiaryResolutionFailed)?; + + // Make sure there are reserved assets. + if reserve_assets.len() == 0 { + return Err(NoReserveAssets) + } + + // Check the the deposit asset filter matches what was reserved. + if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { + return Err(FilterDoesNotConsumeAllAssets) + } + + // We only support a single asset at a time. + ensure!(reserve_assets.len() == 1, TooManyAssets); + let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; + + // If there was a fee specified verify it. + if let Some(fee_asset) = fee_asset { + // The fee asset must be the same as the reserve asset. + if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { + return Err(InvalidFeeAsset) + } + } + + let (token, amount) = match reserve_asset { + Asset { id: AssetId(inner_location), fun: Fungible(amount) } => + match inner_location.unpack() { + (0, [AccountKey20 { network, key }]) if self.network_matches(network) => + Some((H160(*key), *amount)), + _ => None, + }, + _ => None, + } + .ok_or(AssetResolutionFailed)?; + + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); + + // Check if there is a SetTopic. + let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; + + Ok(( + Command::AgentExecute { + agent_id: self.agent_id, + command: AgentExecuteCommand::TransferToken { token, recipient, amount }, + }, + *topic_id, + )) + } + + fn next(&mut self) -> Result<&'a Instruction, XcmConverterError> { + self.iter.next().ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + + fn peek(&mut self) -> Result<&&'a Instruction, XcmConverterError> { + self.iter.peek().ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + + fn network_matches(&self, network: &Option) -> bool { + if let Some(network) = network { + *network == self.ethereum_network + } else { + true + } + } + + /// Convert the xcm for Polkadot-native token from AH into the Command + /// To match transfers of Polkadot-native tokens, we expect an input of the form: + /// # ReserveAssetDeposited + /// # ClearOrigin + /// # BuyExecution + /// # DepositAsset + /// # SetTopic + fn send_native_tokens_message(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { + use XcmConverterError::*; + + // Get the reserve assets. + let reserve_assets = + match_expression!(self.next()?, ReserveAssetDeposited(reserve_assets), reserve_assets) + .ok_or(ReserveAssetDepositedExpected)?; + + // Check if clear origin exists and skip over it. + if match_expression!(self.peek(), Ok(ClearOrigin), ()).is_some() { + let _ = self.next(); + } + + // Get the fee asset item from BuyExecution or continue parsing. + let fee_asset = match_expression!(self.peek(), Ok(BuyExecution { fees, .. }), fees); + if fee_asset.is_some() { + let _ = self.next(); + } + + let (deposit_assets, beneficiary) = match_expression!( + self.next()?, + DepositAsset { assets, beneficiary }, + (assets, beneficiary) + ) + .ok_or(DepositAssetExpected)?; + + // assert that the beneficiary is AccountKey20. + let recipient = match_expression!( + beneficiary.unpack(), + (0, [AccountKey20 { network, key }]) + if self.network_matches(network), + H160(*key) + ) + .ok_or(BeneficiaryResolutionFailed)?; + + // Make sure there are reserved assets. + if reserve_assets.len() == 0 { + return Err(NoReserveAssets) + } + + // Check the the deposit asset filter matches what was reserved. + if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { + return Err(FilterDoesNotConsumeAllAssets) + } + + // We only support a single asset at a time. + ensure!(reserve_assets.len() == 1, TooManyAssets); + let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; + + // If there was a fee specified verify it. + if let Some(fee_asset) = fee_asset { + // The fee asset must be the same as the reserve asset. + if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { + return Err(InvalidFeeAsset) + } + } + + let (asset_id, amount) = match reserve_asset { + Asset { id: AssetId(inner_location), fun: Fungible(amount) } => + Some((inner_location.clone(), *amount)), + _ => None, + } + .ok_or(AssetResolutionFailed)?; + + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); + + let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; + + let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; + + ensure!(asset_id == expected_asset_id, InvalidAsset); + + // Check if there is a SetTopic. + let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; + + Ok((Command::MintForeignToken { token_id, recipient, amount }, *topic_id)) + } +} + +#[cfg(test)] +mod tests { + use frame_support::parameter_types; + use hex_literal::hex; + use snowbridge_core::{ + outbound::{v1::Fee, SendError, SendMessageFeeProvider}, + AgentIdOf, + }; + use sp_std::default::Default; + use xcm::{ + latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, + prelude::SendError as XcmSendError, + }; + + use super::*; + + parameter_types! { + const MaxMessageSize: u32 = u32::MAX; + const RelayNetwork: NetworkId = Polkadot; + UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(1013)].into(); + const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; + const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; + } + + struct MockOkOutboundQueue; + impl SendMessage for MockOkOutboundQueue { + type Ticket = (); + + fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { + Ok(((), Fee { local: 1, remote: 1 })) + } + + fn deliver(_: Self::Ticket) -> Result { + Ok(H256::zero()) + } + } + + impl SendMessageFeeProvider for MockOkOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } + } + struct MockErrOutboundQueue; + impl SendMessage for MockErrOutboundQueue { + type Ticket = (); + + fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { + Err(SendError::MessageTooLarge) + } + + fn deliver(_: Self::Ticket) -> Result { + Err(SendError::MessageTooLarge) + } + } + + impl SendMessageFeeProvider for MockErrOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } + } + + pub struct MockTokenIdConvert; + impl MaybeEquivalence for MockTokenIdConvert { + fn convert(_id: &TokenId) -> Option { + Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) + } + fn convert_back(_loc: &Location) -> Option { + None + } + } + + #[test] + fn exporter_validate_with_unknown_network_yields_not_applicable() { + let network = Ethereum { chain_id: 1337 }; + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_with_invalid_destination_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_with_x8_destination_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Some( + [ + OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, + OnlyChild, + ] + .into(), + ); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_without_universal_source_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_without_global_universal_location_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_without_global_bridge_location_yields_not_applicable() { + let network = NonBridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_with_remote_universal_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Kusama), Parachain(1000)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_without_para_id_in_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Some(GlobalConsensus(Polkadot).into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_complex_para_id_in_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(12)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_without_xcm_message_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_with_max_target_fee_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; + let fees: Assets = vec![fee.clone()].into(); + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(fees), + BuyExecution { fees: fee, weight_limit: Unlimited }, + WithdrawAsset(assets), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: Some(network), key: beneficiary_address } + .into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert_eq!(result, Err(XcmSendError::Unroutable)); + } + + #[test] + fn exporter_validate_with_unparsable_xcm_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let channel: u32 = 0; + let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; + let fees: Assets = vec![fee.clone()].into(); + + let mut message: Option> = Some( + vec![WithdrawAsset(fees), BuyExecution { fees: fee, weight_limit: Unlimited }].into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert_eq!(result, Err(XcmSendError::Unroutable)); + } + + #[test] + fn exporter_validate_xcm_success_case_1() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert!(result.is_ok()); + } + + #[test] + fn exporter_deliver_with_submit_failure_yields_unroutable() { + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockErrOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); + assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) + } + + #[test] + fn xcm_converter_convert_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = Command::AgentExecute { + agent_id: Default::default(), + command: AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); + } + + #[test] + fn xcm_converter_convert_without_buy_execution_yields_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = Command::AgentExecute { + agent_id: Default::default(), + command: AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); + } + + #[test] + fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(All); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = Command::AgentExecute { + agent_id: Default::default(), + command: AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); + } + + #[test] + fn xcm_converter_convert_with_fees_less_than_reserve_yields_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); + let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(500) }; + + let assets: Assets = + vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = Command::AgentExecute { + agent_id: Default::default(), + command: AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); + } + + #[test] + fn xcm_converter_convert_without_set_topic_yields_set_topic_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + ClearTopic, + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::SetTopicExpected)); + } + + #[test] + fn xcm_converter_convert_with_partial_message_yields_unexpected_end_of_xcm() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let message: Xcm<()> = vec![WithdrawAsset(assets)].into(); + + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); + } + + #[test] + fn xcm_converter_with_different_fee_asset_fails() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location = [AccountKey20 { network: None, key: token_address }].into(); + let fee_asset = + Asset { id: AssetId(Location { parents: 0, interior: Here }), fun: Fungible(1000) }; + + let assets: Assets = + vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); + } + + #[test] + fn xcm_converter_with_fees_greater_than_reserve_fails() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); + let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(1001) }; + + let assets: Assets = + vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); + } + + #[test] + fn xcm_converter_convert_with_empty_xcm_yields_unexpected_end_of_xcm() { + let network = BridgedNetwork::get(); + + let message: Xcm<()> = vec![].into(); + + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); + } + + #[test] + fn xcm_converter_convert_with_extra_instructions_yields_end_of_xcm_message_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ClearError, + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::EndOfXcmMessageExpected)); + } + + #[test] + fn xcm_converter_convert_without_withdraw_asset_yields_withdraw_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedInstruction)); + } + + #[test] + fn xcm_converter_convert_without_withdraw_asset_yields_deposit_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::DepositAssetExpected)); + } + + #[test] + fn xcm_converter_convert_without_assets_yields_no_reserve_assets() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![].into(); + let filter: AssetFilter = assets.clone().into(); + + let fee = Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::NoReserveAssets)); + } + + #[test] + fn xcm_converter_convert_with_two_assets_yields_too_many_assets() { + let network = BridgedNetwork::get(); + + let token_address_1: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let token_address_2: [u8; 20] = hex!("1100000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![ + Asset { + id: AssetId(AccountKey20 { network: None, key: token_address_1 }.into()), + fun: Fungible(1000), + }, + Asset { + id: AssetId(AccountKey20 { network: None, key: token_address_2 }.into()), + fun: Fungible(500), + }, + ] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::TooManyAssets)); + } + + #[test] + fn xcm_converter_convert_without_consuming_filter_yields_filter_does_not_consume_all_assets() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(0)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::FilterDoesNotConsumeAllAssets)); + } + + #[test] + fn xcm_converter_convert_with_zero_amount_asset_yields_zero_asset_transfer() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(0), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::ZeroAssetTransfer)); + } + + #[test] + fn xcm_converter_convert_non_ethereum_asset_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([GlobalConsensus(Polkadot), Parachain(1000), GeneralIndex(0)].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); + } + + #[test] + fn xcm_converter_convert_non_ethereum_chain_asset_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId( + AccountKey20 { network: Some(Ethereum { chain_id: 2 }), key: token_address }.into(), + ), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); + } + + #[test] + fn xcm_converter_convert_non_ethereum_chain_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId( + [AccountKey20 { network: Some(NonBridgedNetwork::get()), key: token_address }] + .into(), + ), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); + } + + #[test] + fn xcm_converter_convert_with_non_ethereum_beneficiary_yields_beneficiary_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let beneficiary_address: [u8; 32] = + hex!("2000000000000000000000000000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: [ + GlobalConsensus(Polkadot), + Parachain(1000), + AccountId32 { network: Some(Polkadot), id: beneficiary_address }, + ] + .into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); + } + + #[test] + fn xcm_converter_convert_with_non_ethereum_chain_beneficiary_yields_beneficiary_resolution_failed( + ) { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { + network: Some(Ethereum { chain_id: 2 }), + key: beneficiary_address, + } + .into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); + } + + #[test] + fn test_describe_asset_hub() { + let legacy_location: Location = Location::new(0, [Parachain(1000)]); + let legacy_agent_id = AgentIdOf::convert_location(&legacy_location).unwrap(); + assert_eq!( + legacy_agent_id, + hex!("72456f48efed08af20e5b317abf8648ac66e86bb90a411d9b0b713f7364b75b4").into() + ); + let location: Location = Location::new(1, [Parachain(1000)]); + let agent_id = AgentIdOf::convert_location(&location).unwrap(); + assert_eq!( + agent_id, + hex!("81c5ab2571199e3188135178f3c2c8e2d268be1313d029b30f534fa579b69b79").into() + ) + } + + #[test] + fn test_describe_here() { + let location: Location = Location::new(0, []); + let agent_id = AgentIdOf::convert_location(&location).unwrap(); + assert_eq!( + agent_id, + hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into() + ) + } + + #[test] + fn xcm_converter_transfer_native_token_success() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let amount = 1000000; + let asset_location = Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]); + let token_id = TokenIdOf::convert_location(&asset_location).unwrap(); + + let assets: Assets = + vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ReserveAssetDeposited(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = + Command::MintForeignToken { recipient: beneficiary_address.into(), amount, token_id }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); + } + + #[test] + fn xcm_converter_transfer_native_token_with_invalid_location_will_fail() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let amount = 1000000; + // Invalid asset location from a different consensus + let asset_location = Location { + parents: 2, + interior: [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))].into(), + }; + + let assets: Assets = + vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ReserveAssetDeposited(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidAsset)); + } + + #[test] + fn exporter_validate_with_invalid_dest_does_not_alter_destination() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Parachain(1000).into(); + + let universal_source: InteriorLocation = + [GlobalConsensus(Polkadot), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate( + network, + channel, + &mut universal_source_wrapper, + &mut dest_wrapper, + &mut msg_wrapper, + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); + } + + #[test] + fn exporter_validate_with_invalid_universal_source_does_not_alter_universal_source() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Here.into(); + + let universal_source: InteriorLocation = + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate( + network, + channel, + &mut universal_source_wrapper, + &mut dest_wrapper, + &mut msg_wrapper, + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); + } +} diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2.rs b/bridges/snowbridge/primitives/router/src/outbound/v2.rs new file mode 100644 index 000000000000..4476c913aa00 --- /dev/null +++ b/bridges/snowbridge/primitives/router/src/outbound/v2.rs @@ -0,0 +1,1777 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Converts XCM messages into simpler commands that can be processed by the Gateway contract + +use codec::{Decode, Encode}; +use core::slice::Iter; +use sp_std::ops::ControlFlow; + +use frame_support::{ + ensure, + traits::{Contains, Get, ProcessMessageError}, + BoundedVec, +}; +use snowbridge_core::{ + outbound::v2::{Command, Message, SendMessage}, + AgentId, TokenId, TokenIdOf, TokenIdOf as LocationIdOf, +}; +use sp_core::{H160, H256}; +use sp_runtime::traits::MaybeEquivalence; +use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; +use xcm::prelude::*; +use xcm_builder::{CreateMatcher, ExporterFor, MatchXcm}; +use xcm_executor::traits::{ConvertLocation, ExportXcm}; + +const TARGET: &'static str = "xcm::ethereum_blob_exporter::v2"; + +pub struct EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, +>( + PhantomData<( + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + )>, +); + +impl + ExportXcm + for EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + > +where + UniversalLocation: Get, + EthereumNetwork: Get, + OutboundQueue: SendMessage, + AgentHashedDescription: ConvertLocation, + ConvertAssetId: MaybeEquivalence, +{ + type Ticket = (Vec, XcmHash); + + fn validate( + network: NetworkId, + _channel: u32, + universal_source: &mut Option, + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + let expected_network = EthereumNetwork::get(); + let universal_location = UniversalLocation::get(); + + if network != expected_network { + log::trace!(target: TARGET, "skipped due to unmatched bridge network {network:?}."); + return Err(SendError::NotApplicable) + } + + // Cloning destination to avoid modifying the value so subsequent exporters can use it. + let dest = destination.clone().ok_or(SendError::MissingArgument)?; + if dest != Here { + log::trace!(target: TARGET, "skipped due to unmatched remote destination {dest:?}."); + return Err(SendError::NotApplicable) + } + + // Cloning universal_source to avoid modifying the value so subsequent exporters can use it. + let (local_net, local_sub) = universal_source.clone() + .ok_or_else(|| { + log::error!(target: TARGET, "universal source not provided."); + SendError::MissingArgument + })? + .split_global() + .map_err(|()| { + log::error!(target: TARGET, "could not get global consensus from universal source '{universal_source:?}'."); + SendError::NotApplicable + })?; + + if Ok(local_net) != universal_location.global_consensus() { + log::trace!(target: TARGET, "skipped due to unmatched relay network {local_net:?}."); + return Err(SendError::NotApplicable) + } + + let source_location = Location::new(1, local_sub.clone()); + + let agent_id = match AgentHashedDescription::convert_location(&source_location) { + Some(id) => id, + None => { + log::error!(target: TARGET, "unroutable due to not being able to create agent id. '{source_location:?}'"); + return Err(SendError::NotApplicable) + }, + }; + + let message = message.clone().ok_or_else(|| { + log::error!(target: TARGET, "xcm message not provided."); + SendError::MissingArgument + })?; + + // Inspect AliasOrigin as V2 message + let mut instructions = message.clone().0; + let result = instructions.matcher().match_next_inst_while( + |_| true, + |inst| { + return match inst { + AliasOrigin(..) => Err(ProcessMessageError::Yield), + _ => Ok(ControlFlow::Continue(())), + } + }, + ); + ensure!(result.is_err(), SendError::NotApplicable); + + let mut converter = + XcmConverter::::new(&message, expected_network, agent_id); + let message = converter.convert().map_err(|err| { + log::error!(target: TARGET, "unroutable due to pattern matching error '{err:?}'."); + SendError::Unroutable + })?; + + // validate the message + let (ticket, _) = OutboundQueue::validate(&message).map_err(|err| { + log::error!(target: TARGET, "OutboundQueue validation of message failed. {err:?}"); + SendError::Unroutable + })?; + + Ok(((ticket.encode(), XcmHash::from(message.id)), Assets::default())) + } + + fn deliver(blob: (Vec, XcmHash)) -> Result { + let ticket: OutboundQueue::Ticket = OutboundQueue::Ticket::decode(&mut blob.0.as_ref()) + .map_err(|_| { + log::trace!(target: TARGET, "undeliverable due to decoding error"); + SendError::NotApplicable + })?; + + let message_id = OutboundQueue::deliver(ticket).map_err(|_| { + log::error!(target: TARGET, "OutboundQueue submit of message failed"); + SendError::Transport("other transport error") + })?; + + log::info!(target: TARGET, "message delivered {message_id:#?}."); + Ok(message_id.into()) + } +} + +/// Errors that can be thrown to the pattern matching step. +#[derive(PartialEq, Debug)] +pub enum XcmConverterError { + UnexpectedEndOfXcm, + EndOfXcmMessageExpected, + WithdrawAssetExpected, + DepositAssetExpected, + NoReserveAssets, + FilterDoesNotConsumeAllAssets, + TooManyAssets, + ZeroAssetTransfer, + BeneficiaryResolutionFailed, + AssetResolutionFailed, + InvalidFeeAsset, + SetTopicExpected, + ReserveAssetDepositedExpected, + InvalidAsset, + UnexpectedInstruction, + TooManyCommands, + AliasOriginExpected, + InvalidOrigin, +} + +macro_rules! match_expression { + ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $value:expr $(,)?) => { + match $expression { + $( $pattern )|+ $( if $guard )? => Some($value), + _ => None, + } + }; +} + +pub struct XcmConverter<'a, ConvertAssetId, Call> { + iter: Peekable>>, + message: Vec>, + ethereum_network: NetworkId, + agent_id: AgentId, + _marker: PhantomData, +} +impl<'a, ConvertAssetId, Call> XcmConverter<'a, ConvertAssetId, Call> +where + ConvertAssetId: MaybeEquivalence, +{ + pub fn new(message: &'a Xcm, ethereum_network: NetworkId, agent_id: AgentId) -> Self { + Self { + message: message.clone().inner().into(), + iter: message.inner().iter().peekable(), + ethereum_network, + agent_id, + _marker: Default::default(), + } + } + + pub fn convert(&mut self) -> Result { + let result = match self.jump_to() { + // PNA + Ok(ReserveAssetDeposited { .. }) => self.send_native_tokens_message(), + // ENA + Ok(WithdrawAsset { .. }) => self.send_tokens_message(), + Err(e) => Err(e), + _ => return Err(XcmConverterError::UnexpectedInstruction), + }?; + + // All xcm instructions must be consumed before exit. + if self.next().is_ok() { + return Err(XcmConverterError::EndOfXcmMessageExpected) + } + + Ok(result) + } + + /// Convert the xcm for Ethereum-native token from AH into the Message which will be executed + /// on Ethereum Gateway contract, we expect an input of the form: + /// # WithdrawAsset(WETH_FEE) + /// # PayFees(WETH_FEE) + /// # WithdrawAsset(ENA) + /// # AliasOrigin(Origin) + /// # DepositAsset(ENA) + /// # SetTopic + fn send_tokens_message(&mut self) -> Result { + use XcmConverterError::*; + + // Get fee amount + let fee_amount = self.extract_remote_fee()?; + + // Get the reserve assets from WithdrawAsset. + let reserve_assets = + match_expression!(self.next()?, WithdrawAsset(reserve_assets), reserve_assets) + .ok_or(WithdrawAssetExpected)?; + + // Check AliasOrigin. + let origin_loc = match_expression!(self.next()?, AliasOrigin(origin), origin) + .ok_or(AliasOriginExpected)?; + let origin = LocationIdOf::convert_location(&origin_loc).ok_or(InvalidOrigin)?; + + let (deposit_assets, beneficiary) = match_expression!( + self.next()?, + DepositAsset { assets, beneficiary }, + (assets, beneficiary) + ) + .ok_or(DepositAssetExpected)?; + + // assert that the beneficiary is AccountKey20. + let recipient = match_expression!( + beneficiary.unpack(), + (0, [AccountKey20 { network, key }]) + if self.network_matches(network), + H160(*key) + ) + .ok_or(BeneficiaryResolutionFailed)?; + + // Make sure there are reserved assets. + if reserve_assets.len() == 0 { + return Err(NoReserveAssets) + } + + // Check the the deposit asset filter matches what was reserved. + if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { + return Err(FilterDoesNotConsumeAllAssets) + } + + // We only support a single asset at a time. + ensure!(reserve_assets.len() == 1, TooManyAssets); + let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; + + // only fungible asset is allowed + let (token, amount) = match reserve_asset { + Asset { id: AssetId(inner_location), fun: Fungible(amount) } => + match inner_location.unpack() { + (0, [AccountKey20 { network, key }]) if self.network_matches(network) => + Some((H160(*key), *amount)), + _ => None, + }, + _ => None, + } + .ok_or(AssetResolutionFailed)?; + + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); + + // ensure SetTopic exists + let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; + + let message = Message { + id: (*topic_id).into(), + origin, + fee: fee_amount, + commands: BoundedVec::try_from(vec![Command::UnlockNativeToken { + agent_id: self.agent_id, + token, + recipient, + amount, + }]) + .map_err(|_| TooManyCommands)?, + }; + + Ok(message) + } + + fn next(&mut self) -> Result<&'a Instruction, XcmConverterError> { + self.iter.next().ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + + fn network_matches(&self, network: &Option) -> bool { + if let Some(network) = network { + *network == self.ethereum_network + } else { + true + } + } + + /// Convert the xcm for Polkadot-native token from AH into the Message which will be executed + /// on Ethereum Gateway contract, we expect an input of the form: + /// # WithdrawAsset(WETH) + /// # PayFees(WETH) + /// # ReserveAssetDeposited(PNA) + /// # AliasOrigin(Origin) + /// # DepositAsset(PNA) + /// # SetTopic + fn send_native_tokens_message(&mut self) -> Result { + use XcmConverterError::*; + + // Get fee amount + let fee_amount = self.extract_remote_fee()?; + + // Get the reserve assets. + let reserve_assets = + match_expression!(self.next()?, ReserveAssetDeposited(reserve_assets), reserve_assets) + .ok_or(ReserveAssetDepositedExpected)?; + + // Check AliasOrigin. + let origin_loc = match_expression!(self.next()?, AliasOrigin(origin), origin) + .ok_or(AliasOriginExpected)?; + let origin = LocationIdOf::convert_location(&origin_loc).ok_or(InvalidOrigin)?; + + let (deposit_assets, beneficiary) = match_expression!( + self.next()?, + DepositAsset { assets, beneficiary }, + (assets, beneficiary) + ) + .ok_or(DepositAssetExpected)?; + + // assert that the beneficiary is AccountKey20. + let recipient = match_expression!( + beneficiary.unpack(), + (0, [AccountKey20 { network, key }]) + if self.network_matches(network), + H160(*key) + ) + .ok_or(BeneficiaryResolutionFailed)?; + + // Make sure there are reserved assets. + if reserve_assets.len() == 0 { + return Err(NoReserveAssets) + } + + // Check the the deposit asset filter matches what was reserved. + if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { + return Err(FilterDoesNotConsumeAllAssets) + } + + // We only support a single asset at a time. + ensure!(reserve_assets.len() == 1, TooManyAssets); + let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; + + // only fungible asset is allowed + let (asset_id, amount) = match reserve_asset { + Asset { id: AssetId(inner_location), fun: Fungible(amount) } => + Some((inner_location.clone(), *amount)), + _ => None, + } + .ok_or(AssetResolutionFailed)?; + + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); + + // Ensure PNA already registered + let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; + let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; + ensure!(asset_id == expected_asset_id, InvalidAsset); + + // ensure SetTopic exists + let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; + + let message = Message { + origin, + fee: fee_amount, + id: (*topic_id).into(), + commands: BoundedVec::try_from(vec![Command::MintForeignToken { + token_id, + recipient, + amount, + }]) + .map_err(|_| TooManyCommands)?, + }; + + Ok(message) + } + + /// Skip fee instructions and jump to the primary asset instruction + fn jump_to(&mut self) -> Result<&Instruction, XcmConverterError> { + ensure!(self.message.len() > 3, XcmConverterError::UnexpectedEndOfXcm); + self.message.get(2).ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + + /// Extract the fee asset item from PayFees(V5) + fn extract_remote_fee(&mut self) -> Result { + use XcmConverterError::*; + let _ = match_expression!(self.next()?, WithdrawAsset(fee), fee) + .ok_or(WithdrawAssetExpected)?; + let fee_asset = + match_expression!(self.next()?, PayFees { asset: fee }, fee).ok_or(InvalidFeeAsset)?; + // Todo: Validate fee asset is WETH + let fee_amount = match fee_asset { + Asset { id: _, fun: Fungible(amount) } => Some(*amount), + _ => None, + } + .ok_or(AssetResolutionFailed)?; + Ok(fee_amount) + } +} + +/// An adapter for the implementation of `ExporterFor`, which attempts to find the +/// `(bridge_location, payment)` for the requested `network` and `remote_location` and `xcm` +/// in the provided `T` table containing various exporters. +pub struct XcmFilterExporter(core::marker::PhantomData<(T, M)>); +impl>> ExporterFor for XcmFilterExporter { + fn exporter_for( + network: &NetworkId, + remote_location: &InteriorLocation, + xcm: &Xcm<()>, + ) -> Option<(Location, Option)> { + // check the XCM + if !M::contains(xcm) { + return None + } + // check `network` and `remote_location` + T::exporter_for(network, remote_location, xcm) + } +} + +/// Xcm for SnowbridgeV2 which requires XCMV5 +pub struct XcmForSnowbridgeV2; +impl Contains> for XcmForSnowbridgeV2 { + fn contains(xcm: &Xcm<()>) -> bool { + let mut instructions = xcm.clone().0; + let result = instructions.matcher().match_next_inst_while( + |_| true, + |inst| { + return match inst { + AliasOrigin(..) => Err(ProcessMessageError::Yield), + _ => Ok(ControlFlow::Continue(())), + } + }, + ); + result.is_err() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::parameter_types; + use hex_literal::hex; + use snowbridge_core::{ + outbound::{v2::Fee, SendError, SendMessageFeeProvider}, + AgentIdOf, + }; + use sp_std::default::Default; + use xcm::{ + latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, + prelude::SendError as XcmSendError, + }; + + parameter_types! { + const MaxMessageSize: u32 = u32::MAX; + const RelayNetwork: NetworkId = Polkadot; + UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(1013)].into(); + const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; + const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; + } + + struct MockOkOutboundQueue; + impl SendMessage for MockOkOutboundQueue { + type Ticket = (); + + fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { + Ok(((), Fee { local: 1 })) + } + + fn deliver(_: Self::Ticket) -> Result { + Ok(H256::zero()) + } + } + + impl SendMessageFeeProvider for MockOkOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } + } + struct MockErrOutboundQueue; + impl SendMessage for MockErrOutboundQueue { + type Ticket = (); + + fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { + Err(SendError::MessageTooLarge) + } + + fn deliver(_: Self::Ticket) -> Result { + Err(SendError::MessageTooLarge) + } + } + + impl SendMessageFeeProvider for MockErrOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } + } + + pub struct MockTokenIdConvert; + impl MaybeEquivalence for MockTokenIdConvert { + fn convert(_id: &TokenId) -> Option { + Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) + } + fn convert_back(_loc: &Location) -> Option { + None + } + } + + #[test] + fn exporter_validate_with_unknown_network_yields_not_applicable() { + let network = Ethereum { chain_id: 1337 }; + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_with_invalid_destination_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_with_x8_destination_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Some( + [ + OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, + OnlyChild, + ] + .into(), + ); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_without_universal_source_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_without_global_universal_location_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_without_global_bridge_location_yields_not_applicable() { + let network = NonBridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_with_remote_universal_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Kusama), Parachain(1000)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_without_para_id_in_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Some(GlobalConsensus(Polkadot).into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_complex_para_id_in_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(12)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_without_xcm_message_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_with_max_target_fee_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; + let fees: Assets = vec![fee.clone()].into(); + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(fees), + BuyExecution { fees: fee.clone(), weight_limit: Unlimited }, + ExpectAsset(fee.into()), + WithdrawAsset(assets), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: Some(network), key: beneficiary_address } + .into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_with_unparsable_xcm_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let channel: u32 = 0; + let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; + let fees: Assets = vec![fee.clone()].into(); + + let mut message: Option> = Some( + vec![WithdrawAsset(fees), BuyExecution { fees: fee, weight_limit: Unlimited }].into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_xcm_success_case_1() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee.clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert!(result.is_ok()); + } + + #[test] + fn exporter_deliver_with_submit_failure_yields_unroutable() { + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockErrOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); + assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) + } + + #[test] + fn xcm_converter_convert_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: assets.get(0).unwrap().clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert!(result.is_ok()); + } + + #[test] + fn xcm_converter_convert_without_buy_execution_yields_invalid_fee_asset() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); + } + + #[test] + fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(All); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = Command::UnlockNativeToken { + agent_id: Default::default(), + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }; + let expected_message = Message { + id: [0; 32].into(), + origin: H256::zero(), + fee: 1000, + commands: BoundedVec::try_from(vec![expected_payload]).unwrap(), + }; + let result = converter.convert(); + assert_eq!(result, Ok(expected_message)); + } + + #[test] + fn xcm_converter_convert_with_fees_less_than_reserve_yields_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); + let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(500) }; + + let assets: Assets = + vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset.clone(), weight_limit: Unlimited }, + ExpectAsset(fee_asset.into()), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = Command::UnlockNativeToken { + agent_id: Default::default(), + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }; + let expected_message = Message { + id: [0; 32].into(), + origin: H256::zero(), + fee: 500, + commands: BoundedVec::try_from(vec![expected_payload]).unwrap(), + }; + let result = converter.convert(); + assert_eq!(result, Ok(expected_message)); + } + + #[test] + fn xcm_converter_convert_without_set_topic_yields_set_topic_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + ClearTopic, + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::SetTopicExpected)); + } + + #[test] + fn xcm_converter_convert_with_partial_message_yields_unexpected_end_of_xcm() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let message: Xcm<()> = vec![WithdrawAsset(assets)].into(); + + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); + } + + #[test] + fn xcm_converter_with_different_fee_asset_succeed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location = [AccountKey20 { network: None, key: token_address }].into(); + let fee_asset = + Asset { id: AssetId(Location { parents: 0, interior: Here }), fun: Fungible(1000) }; + + let assets: Assets = + vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.is_ok(), true); + } + + #[test] + fn xcm_converter_with_fees_greater_than_reserve_succeed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); + let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(1001) }; + + let assets: Assets = + vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.is_ok(), true); + } + + #[test] + fn xcm_converter_convert_with_empty_xcm_yields_unexpected_end_of_xcm() { + let network = BridgedNetwork::get(); + + let message: Xcm<()> = vec![].into(); + + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); + } + + #[test] + fn xcm_converter_convert_with_extra_instructions_yields_end_of_xcm_message_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ClearError, + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::EndOfXcmMessageExpected)); + } + + #[test] + fn xcm_converter_convert_without_withdraw_asset_yields_withdraw_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedInstruction)); + } + + #[test] + fn xcm_converter_convert_without_withdraw_asset_yields_deposit_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::DepositAssetExpected)); + } + + #[test] + fn xcm_converter_convert_without_assets_yields_no_reserve_assets() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![].into(); + let filter: AssetFilter = assets.clone().into(); + + let fee = Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::NoReserveAssets)); + } + + #[test] + fn xcm_converter_convert_with_two_assets_yields_too_many_assets() { + let network = BridgedNetwork::get(); + + let token_address_1: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let token_address_2: [u8; 20] = hex!("1100000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![ + Asset { + id: AssetId(AccountKey20 { network: None, key: token_address_1 }.into()), + fun: Fungible(1000), + }, + Asset { + id: AssetId(AccountKey20 { network: None, key: token_address_2 }.into()), + fun: Fungible(500), + }, + ] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::TooManyAssets)); + } + + #[test] + fn xcm_converter_convert_without_consuming_filter_yields_filter_does_not_consume_all_assets() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(0)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::FilterDoesNotConsumeAllAssets)); + } + + #[test] + fn xcm_converter_convert_with_zero_amount_asset_yields_zero_asset_transfer() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(0), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::ZeroAssetTransfer)); + } + + #[test] + fn xcm_converter_convert_non_ethereum_asset_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([GlobalConsensus(Polkadot), Parachain(1000), GeneralIndex(0)].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); + } + + #[test] + fn xcm_converter_convert_non_ethereum_chain_asset_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId( + AccountKey20 { network: Some(Ethereum { chain_id: 2 }), key: token_address }.into(), + ), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); + } + + #[test] + fn xcm_converter_convert_non_ethereum_chain_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId( + [AccountKey20 { network: Some(NonBridgedNetwork::get()), key: token_address }] + .into(), + ), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); + } + + #[test] + fn xcm_converter_convert_with_non_ethereum_beneficiary_yields_beneficiary_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let beneficiary_address: [u8; 32] = + hex!("2000000000000000000000000000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: [ + GlobalConsensus(Polkadot), + Parachain(1000), + AccountId32 { network: Some(Polkadot), id: beneficiary_address }, + ] + .into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); + } + + #[test] + fn xcm_converter_convert_with_non_ethereum_chain_beneficiary_yields_beneficiary_resolution_failed( + ) { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { + network: Some(Ethereum { chain_id: 2 }), + key: beneficiary_address, + } + .into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); + } + + #[test] + fn test_describe_asset_hub() { + let legacy_location: Location = Location::new(0, [Parachain(1000)]); + let legacy_agent_id = AgentIdOf::convert_location(&legacy_location).unwrap(); + assert_eq!( + legacy_agent_id, + hex!("72456f48efed08af20e5b317abf8648ac66e86bb90a411d9b0b713f7364b75b4").into() + ); + let location: Location = Location::new(1, [Parachain(1000)]); + let agent_id = AgentIdOf::convert_location(&location).unwrap(); + assert_eq!( + agent_id, + hex!("81c5ab2571199e3188135178f3c2c8e2d268be1313d029b30f534fa579b69b79").into() + ) + } + + #[test] + fn test_describe_here() { + let location: Location = Location::new(0, []); + let agent_id = AgentIdOf::convert_location(&location).unwrap(); + assert_eq!( + agent_id, + hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into() + ) + } + + #[test] + fn xcm_converter_transfer_native_token_success() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let amount = 1000000; + let asset_location = Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]); + let token_id = TokenIdOf::convert_location(&asset_location).unwrap(); + + let assets: Assets = + vec![Asset { id: AssetId(asset_location.clone()), fun: Fungible(amount) }].into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ReserveAssetDeposited(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + ExpectAsset(Asset { id: AssetId(asset_location), fun: Fungible(amount) }.into()), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = + Command::MintForeignToken { recipient: beneficiary_address.into(), amount, token_id }; + let expected_message = Message { + id: [0; 32].into(), + origin: H256::zero(), + fee: 1000000, + commands: BoundedVec::try_from(vec![expected_payload]).unwrap(), + }; + let result = converter.convert(); + assert_eq!(result, Ok(expected_message)); + } + + #[test] + fn xcm_converter_transfer_native_token_with_invalid_location_will_fail() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let amount = 1000000; + // Invalid asset location from a different consensus + let asset_location = Location { + parents: 2, + interior: [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))].into(), + }; + + let assets: Assets = + vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ReserveAssetDeposited(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidAsset)); + } + + #[test] + fn exporter_validate_with_invalid_dest_does_not_alter_destination() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Parachain(1000).into(); + + let universal_source: InteriorLocation = + [GlobalConsensus(Polkadot), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate( + network, + channel, + &mut universal_source_wrapper, + &mut dest_wrapper, + &mut msg_wrapper, + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); + } + + #[test] + fn exporter_validate_with_invalid_universal_source_does_not_alter_universal_source() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Here.into(); + + let universal_source: InteriorLocation = + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate( + network, + channel, + &mut universal_source_wrapper, + &mut dest_wrapper, + &mut msg_wrapper, + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); + } +} diff --git a/bridges/snowbridge/runtime/runtime-common/src/tests.rs b/bridges/snowbridge/runtime/runtime-common/src/tests.rs index 8d9a88f42933..dea5ad5411c2 100644 --- a/bridges/snowbridge/runtime/runtime-common/src/tests.rs +++ b/bridges/snowbridge/runtime/runtime-common/src/tests.rs @@ -1,6 +1,9 @@ use crate::XcmExportFeeToSibling; use frame_support::{parameter_types, sp_runtime::testing::H256}; -use snowbridge_core::outbound::{Fee, Message, SendError, SendMessage, SendMessageFeeProvider}; +use snowbridge_core::outbound::{ + v1::{Fee, Message, SendMessage}, + SendError, SendMessageFeeProvider, +}; use xcm::prelude::{ Asset, Assets, Here, Kusama, Location, NetworkId, Parachain, XcmContext, XcmError, XcmHash, XcmResult, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index b87f25ac0f01..7f2f42792ec0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -51,3 +51,4 @@ snowbridge-pallet-system = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-pallet-inbound-queue = { workspace = true } snowbridge-pallet-inbound-queue-fixtures = { workspace = true } +snowbridge-pallet-outbound-queue-v2 = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index 6c1cdb98e8b2..cd826e3bfb29 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -20,6 +20,7 @@ mod claim_assets; mod register_bridged_assets; mod send_xcm; mod snowbridge; +mod snowbridge_v2; mod teleport; mod transact; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index ffa60a4f52e7..6921f0e870f2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -22,7 +22,8 @@ use hex_literal::hex; use rococo_westend_system_emulated_network::asset_hub_westend_emulated_chain::genesis::AssetHubWestendAssetOwner; use snowbridge_core::{outbound::OperatingMode, AssetMetadata, TokenIdOf}; use snowbridge_router_primitives::inbound::{ - Command, Destination, EthereumLocationsConverterFor, MessageV1, VersionedMessage, + v1::{Command, Destination, MessageV1, VersionedMessage}, + EthereumLocationsConverterFor, }; use sp_core::H256; use testnet_parachains_constants::westend::snowbridge::EthereumNetwork; @@ -256,7 +257,6 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { }); BridgeHubWestend::execute_with(|| { - use bridge_hub_westend_runtime::xcm_config::TreasuryAccount; type RuntimeEvent = ::RuntimeEvent; // Check that the transfer token back to Ethereum message was queue in the Ethereum // Outbound Queue @@ -265,21 +265,12 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { vec![RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageQueued{ .. }) => {},] ); let events = BridgeHubWestend::events(); - // Check that the local fee was credited to the Snowbridge sovereign account - assert!( - events.iter().any(|event| matches!( - event, - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) - if *who == TreasuryAccount::get().into() && *amount == 5071000000 - )), - "Snowbridge sovereign takes local fee." - ); // Check that the remote fee was credited to the AssetHub sovereign account assert!( events.iter().any(|event| matches!( event, - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) - if *who == assethub_sovereign && *amount == 2680000000000, + RuntimeEvent::Balances(pallet_balances::Event::Minted { who,.. }) + if *who == assethub_sovereign )), "AssetHub sovereign takes remote fee." ); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs new file mode 100644 index 000000000000..8ded64c512ec --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs @@ -0,0 +1,314 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::imports::*; +use bridge_hub_westend_runtime::EthereumInboundQueue; +use hex_literal::hex; +use snowbridge_core::AssetMetadata; +use snowbridge_router_primitives::inbound::{ + v1::{Command, Destination, MessageV1, VersionedMessage}, + EthereumLocationsConverterFor, +}; +use sp_runtime::MultiAddress; +use testnet_parachains_constants::westend::snowbridge::EthereumNetwork; +use xcm::v5::AssetTransferFilter; +use xcm_executor::traits::ConvertLocation; + +const INITIAL_FUND: u128 = 5_000_000_000_000; +pub const CHAIN_ID: u64 = 11155111; +pub const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); +const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); +const XCM_FEE: u128 = 100_000_000_000; +const TOKEN_AMOUNT: u128 = 100_000_000_000; + +#[test] +fn send_weth_from_asset_hub_to_ethereum() { + let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); + let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); + let weth_asset_location: Location = + (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + + BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); + + AssetHubWestend::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + + assert_ok!(::ForeignAssets::force_create( + RuntimeOrigin::root(), + weth_asset_location.clone().try_into().unwrap(), + assethub_sovereign.clone().into(), + false, + 1, + )); + + assert!(::ForeignAssets::asset_exists( + weth_asset_location.clone().try_into().unwrap(), + )); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::SendToken { + token: WETH.into(), + destination: Destination::AccountId32 { id: AssetHubWestendReceiver::get().into() }, + amount: TOKEN_AMOUNT, + fee: XCM_FEE, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert([0; 32].into(), message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()).unwrap(); + + // Check that the send token message was sent using xcm + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) =>{},] + ); + }); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + // Check that AssetHub has issued the foreign asset + assert_expected_events!( + AssetHubWestend, + vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},] + ); + + // Local fee amount(in DOT) should cover + // 1. execution cost on AH + // 2. delivery cost to BH + // 3. execution cost on BH + let local_fee_amount = 200_000_000_000; + // Remote fee amount(in WETH) should cover execution cost on Ethereum + let remote_fee_amount = 4_000_000_000; + let local_fee_asset = + Asset { id: AssetId(Location::parent()), fun: Fungible(local_fee_amount) }; + let remote_fee_asset = + Asset { id: AssetId(weth_asset_location.clone()), fun: Fungible(remote_fee_amount) }; + let reserve_asset = Asset { + id: AssetId(weth_asset_location.clone()), + fun: Fungible(TOKEN_AMOUNT - remote_fee_amount), + }; + let assets = vec![ + Asset { id: weth_asset_location.clone().into(), fun: Fungible(TOKEN_AMOUNT) }, + local_fee_asset.clone(), + ]; + let destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); + + let beneficiary = Location::new( + 0, + [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], + ); + + let xcm_on_bh = Xcm(vec![DepositAsset { assets: Wild(AllCounted(2)), beneficiary }]); + + let xcms = VersionedXcm::from(Xcm(vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: local_fee_asset.clone() }, + InitiateTransfer { + destination, + remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( + remote_fee_asset.clone().into(), + ))), + preserve_origin: true, + assets: vec![AssetTransferFilter::ReserveWithdraw(Definite( + reserve_asset.clone().into(), + ))], + remote_xcm: xcm_on_bh, + }, + ])); + + // Send the Weth back to Ethereum + ::PolkadotXcm::execute( + RuntimeOrigin::signed(AssetHubWestendReceiver::get()), + bx!(xcms), + Weight::from(8_000_000_000), + ) + .unwrap(); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + // Check that the transfer token back to Ethereum message was queue in the Ethereum + // Outbound Queue + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] + ); + let events = BridgeHubWestend::events(); + // Check that the remote fee was credited to the AssetHub sovereign account + assert!( + events.iter().any(|event| matches!( + event, + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) + if *who == assethub_sovereign + )), + "AssetHub sovereign takes remote fee." + ); + }); +} + +#[test] +fn transfer_relay_token() { + let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of( + BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()), + ); + BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); + + let asset_id: Location = Location { parents: 1, interior: [].into() }; + let _expected_asset_id: Location = Location { + parents: 1, + interior: [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))].into(), + }; + + let ethereum_sovereign: AccountId = + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&Location::new( + 2, + [GlobalConsensus(EthereumNetwork::get())], + )) + .unwrap() + .into(); + + // Register token + BridgeHubWestend::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + type RuntimeEvent = ::RuntimeEvent; + + assert_ok!(::Balances::force_set_balance( + RuntimeOrigin::root(), + sp_runtime::MultiAddress::Id(BridgeHubWestendSender::get()), + INITIAL_FUND * 10, + )); + + assert_ok!(::EthereumSystem::register_token( + RuntimeOrigin::root(), + Box::new(VersionedLocation::from(asset_id.clone())), + AssetMetadata { + name: "wnd".as_bytes().to_vec().try_into().unwrap(), + symbol: "wnd".as_bytes().to_vec().try_into().unwrap(), + decimals: 12, + }, + )); + // Check that a message was sent to Ethereum to create the agent + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::RegisterToken { .. }) => {},] + ); + }); + + // Send token to Ethereum + AssetHubWestend::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + type RuntimeEvent = ::RuntimeEvent; + + let weth_asset_location: Location = + (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }) + .into(); + + assert_ok!(::ForeignAssets::force_create( + RuntimeOrigin::root(), + weth_asset_location.clone().try_into().unwrap(), + assethub_sovereign.clone().into(), + false, + 1, + )); + + assert_ok!(::ForeignAssets::mint( + RuntimeOrigin::signed(assethub_sovereign.clone().into()), + weth_asset_location.clone().try_into().unwrap(), + MultiAddress::Id(AssetHubWestendSender::get()), + TOKEN_AMOUNT, + )); + + // Local fee amount(in DOT) should cover + // 1. execution cost on AH + // 2. delivery cost to BH + // 3. execution cost on BH + let local_fee_amount = 200_000_000_000; + // Remote fee amount(in WETH) should cover execution cost on Ethereum + let remote_fee_amount = 4_000_000_000; + + let local_fee_asset = + Asset { id: AssetId(Location::parent()), fun: Fungible(local_fee_amount) }; + let remote_fee_asset = + Asset { id: AssetId(weth_asset_location.clone()), fun: Fungible(remote_fee_amount) }; + + let assets = vec![ + Asset { + id: AssetId(Location::parent()), + fun: Fungible(TOKEN_AMOUNT + local_fee_amount), + }, + remote_fee_asset.clone(), + ]; + + let destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); + + let beneficiary = Location::new( + 0, + [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], + ); + + let xcm_on_bh = Xcm(vec![DepositAsset { assets: Wild(AllCounted(2)), beneficiary }]); + + let xcms = VersionedXcm::from(Xcm(vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: local_fee_asset.clone() }, + InitiateTransfer { + destination, + remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( + remote_fee_asset.clone().into(), + ))), + preserve_origin: true, + assets: vec![AssetTransferFilter::ReserveDeposit(Definite( + Asset { id: AssetId(Location::parent()), fun: Fungible(TOKEN_AMOUNT) }.into(), + ))], + remote_xcm: xcm_on_bh, + }, + ])); + + // Send DOT to Ethereum + ::PolkadotXcm::execute( + RuntimeOrigin::signed(AssetHubWestendSender::get()), + bx!(xcms), + Weight::from(8_000_000_000), + ) + .unwrap(); + + // Check that the native asset transferred to some reserved account(sovereign of Ethereum) + let events = AssetHubWestend::events(); + assert!( + events.iter().any(|event| matches!( + event, + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount}) + if *who == ethereum_sovereign.clone() && *amount == TOKEN_AMOUNT, + )), + "native token reserved to Ethereum sovereign account." + ); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the transfer token back to Ethereum message was queue in the Ethereum + // Outbound Queue + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] + ); + }); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 88ccd42dff7f..0537a5a41e13 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -496,7 +496,10 @@ pub type XcmRouter = WithUniqueTopic<( // Router which wraps and sends xcm to BridgeHub to be delivered to the Ethereum // GlobalConsensus SovereignPaidRemoteExporter< - bridging::to_ethereum::EthereumNetworkExportTable, + ( + bridging::to_ethereum::EthereumNetworkExportTableV2, + bridging::to_ethereum::EthereumNetworkExportTable, + ), XcmpQueue, UniversalLocation, >, @@ -656,7 +659,9 @@ pub mod bridging { /// Needs to be more than fee calculated from DefaultFeeConfig FeeConfigRecord in snowbridge:parachain/pallets/outbound-queue/src/lib.rs /// Polkadot uses 10 decimals, Kusama,Rococo,Westend 12 decimals. pub const DefaultBridgeHubEthereumBaseFee: Balance = 2_750_872_500_000; + pub const DefaultBridgeHubEthereumBaseFeeV2: Balance = 100_000_000_000; pub storage BridgeHubEthereumBaseFee: Balance = DefaultBridgeHubEthereumBaseFee::get(); + pub storage BridgeHubEthereumBaseFeeV2: Balance = DefaultBridgeHubEthereumBaseFeeV2::get(); pub SiblingBridgeHubWithEthereumInboundQueueInstance: Location = Location::new( 1, [ @@ -679,6 +684,18 @@ pub mod bridging { ), ]; + pub BridgeTableV2: sp_std::vec::Vec = sp_std::vec![ + NetworkExportTableItem::new( + EthereumNetwork::get(), + Some(sp_std::vec![Junctions::Here]), + SiblingBridgeHub::get(), + Some(( + XcmBridgeHubRouterFeeAssetId::get(), + BridgeHubEthereumBaseFeeV2::get(), + ).into()) + ), + ]; + /// Universal aliases pub UniversalAliases: BTreeSet<(Location, Junction)> = BTreeSet::from_iter( sp_std::vec![ @@ -689,8 +706,18 @@ pub mod bridging { pub EthereumBridgeTable: sp_std::vec::Vec = sp_std::vec::Vec::new().into_iter() .chain(BridgeTable::get()) .collect(); + + pub EthereumBridgeTableV2: sp_std::vec::Vec = sp_std::vec::Vec::new().into_iter() + .chain(BridgeTableV2::get()) + .collect(); } + pub type EthereumNetworkExportTableV2 = + snowbridge_router_primitives::outbound::v2::XcmFilterExporter< + xcm_builder::NetworkExportTable, + snowbridge_router_primitives::outbound::v2::XcmForSnowbridgeV2, + >; + pub type EthereumNetworkExportTable = xcm_builder::NetworkExportTable; pub type EthereumAssetFromEthereum = diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 4af8a9f43850..99f82fd8bed4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -112,6 +112,7 @@ snowbridge-pallet-ethereum-client = { workspace = true } snowbridge-pallet-inbound-queue = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-outbound-queue-runtime-api = { workspace = true } +snowbridge-merkle-tree = { workspace = true } snowbridge-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } @@ -189,6 +190,7 @@ std = [ "serde_json/std", "snowbridge-beacon-primitives/std", "snowbridge-core/std", + "snowbridge-merkle-tree/std", "snowbridge-outbound-queue-runtime-api/std", "snowbridge-pallet-ethereum-client/std", "snowbridge-pallet-inbound-queue/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs index be7005b5379a..a405bd5b002b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs @@ -24,7 +24,7 @@ use crate::{ use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards}; -use snowbridge_router_primitives::{inbound::MessageToXcm, outbound::EthereumBlobExporter}; +use snowbridge_router_primitives::{inbound::v1::MessageToXcm, outbound::v1::EthereumBlobExporter}; use sp_core::H160; use testnet_parachains_constants::rococo::{ currency::*, @@ -37,6 +37,7 @@ use crate::xcm_config::RelayNetwork; use benchmark_helpers::DoNothingRouter; use frame_support::{parameter_types, weights::ConstantMultiplier}; use pallet_xcm::EnsureXcm; +use snowbridge_core::outbound::v2::DefaultOutboundQueue; use sp_runtime::{ traits::{ConstU32, ConstU8, Keccak256}, FixedU128, @@ -107,7 +108,7 @@ impl snowbridge_pallet_outbound_queue::Config for Runtime { type Decimals = ConstU8<12>; type MaxMessagePayloadSize = ConstU32<2048>; type MaxMessagesPerBlock = ConstU32<32>; - type GasMeter = snowbridge_core::outbound::ConstantGasMeter; + type GasMeter = snowbridge_core::outbound::v1::ConstantGasMeter; type Balance = Balance; type WeightToFee = WeightToFee; type WeightInfo = crate::weights::snowbridge_pallet_outbound_queue::WeightInfo; @@ -191,6 +192,7 @@ impl snowbridge_pallet_system::Config for Runtime { type InboundDeliveryCost = EthereumInboundQueue; type UniversalLocation = UniversalLocation; type EthereumLocation = EthereumLocation; + type OutboundQueueV2 = DefaultOutboundQueue; } #[cfg(feature = "runtime-benchmarks")] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs index 98e2450ee832..679d33f3456a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs @@ -26,6 +26,7 @@ use testnet_parachains_constants::rococo::xcm_version::SAFE_XCM_VERSION; use xcm::latest::WESTEND_GENESIS_HASH; const BRIDGE_HUB_ROCOCO_ED: Balance = ExistentialDeposit::get(); +use xcm::latest::WESTEND_GENESIS_HASH; fn bridge_hub_rococo_genesis( invulnerables: Vec<(AccountId, AuraId)>, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index ff7af475f5e2..9d24395e8f59 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -92,7 +92,7 @@ pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use rococo_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; use snowbridge_core::{ - outbound::{Command, Fee}, + outbound::v1::{Command, Fee}, AgentId, PricingParameters, }; use xcm::{latest::prelude::*, prelude::*}; @@ -421,6 +421,7 @@ impl pallet_message_queue::Config for Runtime { RuntimeCall, >, EthereumOutboundQueue, + EthereumOutboundQueue, >; type Size = u32; // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: @@ -1002,7 +1003,7 @@ impl_runtime_apis! { } impl snowbridge_outbound_queue_runtime_api::OutboundQueueApi for Runtime { - fn prove_message(leaf_index: u64) -> Option { + fn prove_message(leaf_index: u64) -> Option { snowbridge_pallet_outbound_queue::api::prove_message::(leaf_index) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 637e7c710640..ed94e442c2f7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -114,6 +114,10 @@ snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } +snowbridge-pallet-inbound-queue-v2 = { workspace = true } +snowbridge-pallet-outbound-queue-v2 = { workspace = true } +snowbridge-outbound-queue-runtime-api-v2 = { workspace = true } +snowbridge-merkle-tree = { workspace = true } [dev-dependencies] @@ -185,9 +189,13 @@ std = [ "serde_json/std", "snowbridge-beacon-primitives/std", "snowbridge-core/std", + "snowbridge-merkle-tree/std", + "snowbridge-outbound-queue-runtime-api-v2/std", "snowbridge-outbound-queue-runtime-api/std", "snowbridge-pallet-ethereum-client/std", + "snowbridge-pallet-inbound-queue-v2/std", "snowbridge-pallet-inbound-queue/std", + "snowbridge-pallet-outbound-queue-v2/std", "snowbridge-pallet-outbound-queue/std", "snowbridge-pallet-system/std", "snowbridge-router-primitives/std", @@ -248,7 +256,9 @@ runtime-benchmarks = [ "polkadot-runtime-common/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", "snowbridge-pallet-ethereum-client/runtime-benchmarks", + "snowbridge-pallet-inbound-queue-v2/runtime-benchmarks", "snowbridge-pallet-inbound-queue/runtime-benchmarks", + "snowbridge-pallet-outbound-queue-v2/runtime-benchmarks", "snowbridge-pallet-outbound-queue/runtime-benchmarks", "snowbridge-pallet-system/runtime-benchmarks", "snowbridge-router-primitives/runtime-benchmarks", @@ -288,7 +298,9 @@ try-runtime = [ "parachain-info/try-runtime", "polkadot-runtime-common/try-runtime", "snowbridge-pallet-ethereum-client/try-runtime", + "snowbridge-pallet-inbound-queue-v2/try-runtime", "snowbridge-pallet-inbound-queue/try-runtime", + "snowbridge-pallet-outbound-queue-v2/try-runtime", "snowbridge-pallet-outbound-queue/try-runtime", "snowbridge-pallet-system/try-runtime", "sp-runtime/try-runtime", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index 94921fd8af9a..e25caed95a02 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -19,13 +19,16 @@ use crate::XcmRouter; use crate::{ xcm_config, xcm_config::{TreasuryAccount, UniversalLocation}, - Balances, EthereumInboundQueue, EthereumOutboundQueue, EthereumSystem, MessageQueue, Runtime, - RuntimeEvent, TransactionByteFee, + Balances, EthereumInboundQueue, EthereumOutboundQueue, EthereumOutboundQueueV2, EthereumSystem, + MessageQueue, Runtime, RuntimeEvent, TransactionByteFee, }; use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards}; -use snowbridge_router_primitives::{inbound::MessageToXcm, outbound::EthereumBlobExporter}; +use snowbridge_router_primitives::{ + inbound::{v1::MessageToXcm, v2::MessageToXcm as MessageToXcmV2}, + outbound::{v1::EthereumBlobExporter, v2::EthereumBlobExporter as EthereumBlobExporterV2}, +}; use sp_core::H160; use testnet_parachains_constants::westend::{ currency::*, @@ -55,6 +58,14 @@ pub type SnowbridgeExporter = EthereumBlobExporter< EthereumSystem, >; +pub type SnowbridgeExporterV2 = EthereumBlobExporterV2< + UniversalLocation, + EthereumNetwork, + snowbridge_pallet_outbound_queue_v2::Pallet, + snowbridge_core::AgentIdOf, + EthereumSystem, +>; + // Ethereum Bridge parameter_types! { pub storage EthereumGatewayAddress: H160 = H160(hex_literal::hex!("EDa338E4dC46038493b885327842fD3E301CaB39")); @@ -102,6 +113,36 @@ impl snowbridge_pallet_inbound_queue::Config for Runtime { type AssetTransactor = ::AssetTransactor; } +impl snowbridge_pallet_inbound_queue_v2::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Verifier = snowbridge_pallet_ethereum_client::Pallet; + type Token = Balances; + #[cfg(not(feature = "runtime-benchmarks"))] + type XcmSender = XcmRouter; + #[cfg(feature = "runtime-benchmarks")] + type XcmSender = DoNothingRouter; + type ChannelLookup = EthereumSystem; + type GatewayAddress = EthereumGatewayAddress; + #[cfg(feature = "runtime-benchmarks")] + type Helper = Runtime; + type MessageConverter = MessageToXcmV2< + CreateAssetCall, + CreateAssetDeposit, + ConstU8, + AccountId, + Balance, + EthereumSystem, + EthereumUniversalLocation, + AssetHubFromEthereum, + >; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type MaxMessageSize = ConstU32<2048>; + type WeightInfo = crate::weights::snowbridge_pallet_inbound_queue_v2::WeightInfo; + type PricingParameters = EthereumSystem; + type AssetTransactor = ::AssetTransactor; +} + impl snowbridge_pallet_outbound_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Hashing = Keccak256; @@ -109,7 +150,7 @@ impl snowbridge_pallet_outbound_queue::Config for Runtime { type Decimals = ConstU8<12>; type MaxMessagePayloadSize = ConstU32<2048>; type MaxMessagesPerBlock = ConstU32<32>; - type GasMeter = snowbridge_core::outbound::ConstantGasMeter; + type GasMeter = snowbridge_core::outbound::v1::ConstantGasMeter; type Balance = Balance; type WeightToFee = WeightToFee; type WeightInfo = crate::weights::snowbridge_pallet_outbound_queue::WeightInfo; @@ -117,6 +158,23 @@ impl snowbridge_pallet_outbound_queue::Config for Runtime { type Channels = EthereumSystem; } +impl snowbridge_pallet_outbound_queue_v2::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Hashing = Keccak256; + type MessageQueue = MessageQueue; + type MaxMessagePayloadSize = ConstU32<2048>; + type MaxMessagesPerBlock = ConstU32<32>; + type GasMeter = snowbridge_core::outbound::v2::ConstantGasMeter; + type Balance = Balance; + type WeightToFee = WeightToFee; + type Verifier = snowbridge_pallet_ethereum_client::Pallet; + type GatewayAddress = EthereumGatewayAddress; + type WeightInfo = crate::weights::snowbridge_pallet_outbound_queue_v2::WeightInfo; + type RewardLedger = (); + type ConvertAssetId = EthereumSystem; + type EthereumNetwork = EthereumNetwork; +} + #[cfg(any(feature = "std", feature = "fast-runtime", feature = "runtime-benchmarks", test))] parameter_types! { pub const ChainForkVersions: ForkVersions = ForkVersions { @@ -190,6 +248,7 @@ impl snowbridge_pallet_system::Config for Runtime { type InboundDeliveryCost = EthereumInboundQueue; type UniversalLocation = UniversalLocation; type EthereumLocation = EthereumLocation; + type OutboundQueueV2 = EthereumOutboundQueueV2; } #[cfg(feature = "runtime-benchmarks")] @@ -198,6 +257,7 @@ pub mod benchmark_helpers { use codec::Encode; use snowbridge_beacon_primitives::BeaconHeader; use snowbridge_pallet_inbound_queue::BenchmarkHelper; + use snowbridge_pallet_inbound_queue_v2::BenchmarkHelper as BenchmarkHelperV2; use sp_core::H256; use xcm::latest::{Assets, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}; @@ -207,6 +267,12 @@ pub mod benchmark_helpers { } } + impl BenchmarkHelperV2 for Runtime { + fn initialize_storage(beacon_header: BeaconHeader, block_roots_root: H256) { + EthereumBeaconClient::store_finalized_header(beacon_header, block_roots_root).unwrap(); + } + } + pub struct DoNothingRouter; impl SendXcm for DoNothingRouter { type Ticket = Xcm<()>; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 065400016791..fa9231796b59 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -99,7 +99,7 @@ use parachains_common::{ AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; use snowbridge_core::{ - outbound::{Command, Fee}, + outbound::v1::{Command, Fee}, AgentId, PricingParameters, }; use testnet_parachains_constants::westend::{consensus::*, currency::*, fee::WeightToFee, time::*}; @@ -107,6 +107,10 @@ use xcm::VersionedLocation; use westend_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; +use snowbridge_core::outbound::v2::{Fee as FeeV2, InboundMessage}; + +use snowbridge_core::outbound::DryRunError; + /// The address format for describing accounts. pub type Address = MultiAddress; @@ -398,6 +402,7 @@ impl pallet_message_queue::Config for Runtime { RuntimeCall, >, EthereumOutboundQueue, + EthereumOutboundQueueV2, >; type Size = u32; // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: @@ -563,6 +568,8 @@ construct_runtime!( EthereumOutboundQueue: snowbridge_pallet_outbound_queue = 81, EthereumBeaconClient: snowbridge_pallet_ethereum_client = 82, EthereumSystem: snowbridge_pallet_system = 83, + EthereumInboundQueueV2: snowbridge_pallet_inbound_queue_v2 = 84, + EthereumOutboundQueueV2: snowbridge_pallet_outbound_queue_v2 = 85, // Message Queue. Importantly, is registered last so that messages are processed after // the `on_initialize` hooks of bridging pallets. @@ -621,6 +628,8 @@ mod benches { [snowbridge_pallet_outbound_queue, EthereumOutboundQueue] [snowbridge_pallet_system, EthereumSystem] [snowbridge_pallet_ethereum_client, EthereumBeaconClient] + [snowbridge_pallet_inbound_queue_v2, EthereumInboundQueueV2] + [snowbridge_pallet_outbound_queue_v2, EthereumOutboundQueueV2] ); } @@ -890,7 +899,7 @@ impl_runtime_apis! { } impl snowbridge_outbound_queue_runtime_api::OutboundQueueApi for Runtime { - fn prove_message(leaf_index: u64) -> Option { + fn prove_message(leaf_index: u64) -> Option { snowbridge_pallet_outbound_queue::api::prove_message::(leaf_index) } @@ -899,6 +908,15 @@ impl_runtime_apis! { } } + impl snowbridge_outbound_queue_runtime_api_v2::OutboundQueueApiV2 for Runtime { + fn prove_message(leaf_index: u64) -> Option { + snowbridge_pallet_outbound_queue_v2::api::prove_message::(leaf_index) + } + fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage,FeeV2),DryRunError> { + snowbridge_pallet_outbound_queue_v2::api::dry_run::(xcm) + } + } + impl snowbridge_system_runtime_api::ControlApi for Runtime { fn agent_id(location: VersionedLocation) -> Option { snowbridge_pallet_system::api::agent_id::(location) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs index c1c5c337aca8..cba49ab186c5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -47,7 +47,9 @@ pub mod xcm; pub mod snowbridge_pallet_ethereum_client; pub mod snowbridge_pallet_inbound_queue; +pub mod snowbridge_pallet_inbound_queue_v2; pub mod snowbridge_pallet_outbound_queue; +pub mod snowbridge_pallet_outbound_queue_v2; pub mod snowbridge_pallet_system; pub use block_weights::constants::BlockExecutionWeight; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_inbound_queue_v2.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_inbound_queue_v2.rs new file mode 100644 index 000000000000..7844816f903f --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_inbound_queue_v2.rs @@ -0,0 +1,69 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `snowbridge_pallet_inbound_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-09-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `macbook pro 14 m2`, CPU: `m2-arm64` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --pallet=snowbridge_inbound_queue +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --steps +// 50 +// --repeat +// 20 +// --output +// ./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_inbound_queue.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `snowbridge_pallet_inbound_queue`. +pub struct WeightInfo(PhantomData); +impl snowbridge_pallet_inbound_queue_v2::WeightInfo for WeightInfo { + /// Storage: EthereumInboundQueue PalletOperatingMode (r:1 w:0) + /// Proof: EthereumInboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: EthereumBeaconClient ExecutionHeaders (r:1 w:0) + /// Proof: EthereumBeaconClient ExecutionHeaders (max_values: None, max_size: Some(136), added: 2611, mode: MaxEncodedLen) + /// Storage: EthereumInboundQueue Nonce (r:1 w:1) + /// Proof: EthereumInboundQueue Nonce (max_values: None, max_size: Some(20), added: 2495, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `800` + // Estimated: `7200` + // Minimum execution time: 200_000_000 picoseconds. + Weight::from_parts(200_000_000, 0) + .saturating_add(Weight::from_parts(0, 7200)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(6)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_outbound_queue_v2.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_outbound_queue_v2.rs new file mode 100644 index 000000000000..6cde71d9cdec --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_outbound_queue_v2.rs @@ -0,0 +1,98 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `snowbridge_outbound_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-20, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `192.168.1.13`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// ../target/release/polkadot-parachain +// benchmark +// pallet +// --chain=bridge-hub-rococo-dev +// --pallet=snowbridge_outbound_queue +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --output +// ../parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_outbound_queue.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `snowbridge_outbound_queue`. +pub struct WeightInfo(PhantomData); +impl snowbridge_pallet_outbound_queue_v2::WeightInfo for WeightInfo { + /// Storage: EthereumOutboundQueue MessageLeaves (r:1 w:1) + /// Proof Skipped: EthereumOutboundQueue MessageLeaves (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: EthereumOutboundQueue PendingHighPriorityMessageCount (r:1 w:1) + /// Proof: EthereumOutboundQueue PendingHighPriorityMessageCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue Nonce (r:1 w:1) + /// Proof: EthereumOutboundQueue Nonce (max_values: None, max_size: Some(20), added: 2495, mode: MaxEncodedLen) + /// Storage: EthereumOutboundQueue Messages (r:1 w:1) + /// Proof Skipped: EthereumOutboundQueue Messages (max_values: Some(1), max_size: None, mode: Measured) + fn do_process_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3485` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(39_000_000, 3485) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: EthereumOutboundQueue MessageLeaves (r:1 w:0) + /// Proof Skipped: EthereumOutboundQueue MessageLeaves (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: System Digest (r:1 w:1) + /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) + fn commit() -> Weight { + // Proof Size summary in bytes: + // Measured: `1094` + // Estimated: `2579` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(28_000_000, 2579) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + + fn commit_single() -> Weight { + // Proof Size summary in bytes: + // Measured: `1094` + // Estimated: `2579` + // Minimum execution time: 9_000_000 picoseconds. + Weight::from_parts(9_000_000, 1586) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + + fn submit_delivery_proof() -> Weight { + // Proof Size summary in bytes: + // Measured: `800` + // Estimated: `7200` + // Minimum execution time: 200_000_000 picoseconds. + Weight::from_parts(200_000_000, 0) + .saturating_add(Weight::from_parts(0, 7200)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(6)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index e692568932fe..856a76792ded 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -210,13 +210,16 @@ impl xcm_executor::Config for XcmConfig { WestendLocation, EthereumNetwork, Self::AssetTransactor, - crate::EthereumOutboundQueue, + crate::EthereumOutboundQueueV2, >, SendXcmFeeToAccount, ), >; - type MessageExporter = - (XcmOverBridgeHubRococo, crate::bridge_to_ethereum_config::SnowbridgeExporter); + type MessageExporter = ( + XcmOverBridgeHubRococo, + crate::bridge_to_ethereum_config::SnowbridgeExporterV2, + crate::bridge_to_ethereum_config::SnowbridgeExporter, + ); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs index 2f5aa76fbdd7..cdc4c741d863 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs @@ -23,6 +23,7 @@ use frame_support::{ use pallet_message_queue::OnQueueChanged; use scale_info::TypeInfo; use snowbridge_core::ChannelId; +use sp_core::H256; use xcm::latest::prelude::{Junction, Location}; /// The aggregate origin of an inbound message. @@ -44,6 +45,7 @@ pub enum AggregateMessageOrigin { /// /// This is used by Snowbridge inbound queue. Snowbridge(ChannelId), + SnowbridgeV2(H256), } impl From for Location { @@ -55,7 +57,7 @@ impl From for Location { Sibling(id) => Location::new(1, Junction::Parachain(id.into())), // NOTE: We don't need this conversion for Snowbridge. However, we have to // implement it anyway as xcm_builder::ProcessXcmMessage requires it. - Snowbridge(_) => Location::default(), + _ => Location::default(), } } } @@ -82,18 +84,19 @@ impl From for AggregateMessageOrigin { } /// Routes messages to either the XCMP or Snowbridge processor. -pub struct BridgeHubMessageRouter( - PhantomData<(XcmpProcessor, SnowbridgeProcessor)>, +pub struct BridgeHubMessageRouter( + PhantomData<(XcmpProcessor, SnowbridgeProcessor, SnowbridgeProcessorV2)>, ) where XcmpProcessor: ProcessMessage, SnowbridgeProcessor: ProcessMessage; -impl ProcessMessage - for BridgeHubMessageRouter +impl ProcessMessage + for BridgeHubMessageRouter where XcmpProcessor: ProcessMessage, SnowbridgeProcessor: ProcessMessage, + SnowbridgeProcessorV2: ProcessMessage, { type Origin = AggregateMessageOrigin; @@ -108,6 +111,7 @@ where Here | Parent | Sibling(_) => XcmpProcessor::process_message(message, origin, meter, id), Snowbridge(_) => SnowbridgeProcessor::process_message(message, origin, meter, id), + SnowbridgeV2(_) => SnowbridgeProcessorV2::process_message(message, origin, meter, id), } } } diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 7f50658c4e16..31e7e9fea3a4 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -169,7 +169,6 @@ std = [ "snowbridge-beacon-primitives?/std", "snowbridge-core?/std", "snowbridge-ethereum?/std", - "snowbridge-outbound-queue-merkle-tree?/std", "snowbridge-outbound-queue-runtime-api?/std", "snowbridge-pallet-ethereum-client-fixtures?/std", "snowbridge-pallet-ethereum-client?/std", @@ -541,7 +540,7 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] -runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-fixtures", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] +runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-fixtures", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] runtime = [ "frame-benchmarking", "frame-benchmarking-pallet-pov", @@ -1437,11 +1436,6 @@ path = "../bridges/snowbridge/primitives/ethereum" default-features = false optional = true -[dependencies.snowbridge-outbound-queue-merkle-tree] -path = "../bridges/snowbridge/pallets/outbound-queue/merkle-tree" -default-features = false -optional = true - [dependencies.snowbridge-outbound-queue-runtime-api] path = "../bridges/snowbridge/pallets/outbound-queue/runtime-api" default-features = false diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 2216864fad0f..9591e343bc77 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -1188,10 +1188,6 @@ pub use snowbridge_core; #[cfg(feature = "snowbridge-ethereum")] pub use snowbridge_ethereum; -/// Snowbridge Outbound Queue Merkle Tree. -#[cfg(feature = "snowbridge-outbound-queue-merkle-tree")] -pub use snowbridge_outbound_queue_merkle_tree; - /// Snowbridge Outbound Queue Runtime API. #[cfg(feature = "snowbridge-outbound-queue-runtime-api")] pub use snowbridge_outbound_queue_runtime_api; From 4d6c67841dae588390661854bf578c2e3c0c1733 Mon Sep 17 00:00:00 2001 From: ron Date: Mon, 11 Nov 2024 11:57:52 +0800 Subject: [PATCH 02/68] Fix v2 tests --- .../primitives/router/src/outbound/v2.rs | 181 ++++++------------ 1 file changed, 54 insertions(+), 127 deletions(-) diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2.rs b/bridges/snowbridge/primitives/router/src/outbound/v2.rs index 4476c913aa00..f1d06fc9662b 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2.rs @@ -918,35 +918,6 @@ mod tests { assert!(result.is_ok()); } - #[test] - fn xcm_converter_convert_without_buy_execution_yields_invalid_fee_asset() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); - } - #[test] fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() { let network = BridgedNetwork::get(); @@ -963,53 +934,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = Command::UnlockNativeToken { - agent_id: Default::default(), - token: token_address.into(), - recipient: beneficiary_address.into(), - amount: 1000, - }; - let expected_message = Message { - id: [0; 32].into(), - origin: H256::zero(), - fee: 1000, - commands: BoundedVec::try_from(vec![expected_payload]).unwrap(), - }; - let result = converter.convert(); - assert_eq!(result, Ok(expected_message)); - } - - #[test] - fn xcm_converter_convert_with_fees_less_than_reserve_yields_success() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); - let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(500) }; - - let assets: Assets = - vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); - - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ + PayFees { asset: assets.get(0).unwrap().clone() }, WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee_asset.clone(), weight_limit: Unlimited }, - ExpectAsset(fee_asset.into()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1019,20 +946,8 @@ mod tests { .into(); let mut converter = XcmConverter::::new(&message, network, Default::default()); - let expected_payload = Command::UnlockNativeToken { - agent_id: Default::default(), - token: token_address.into(), - recipient: beneficiary_address.into(), - amount: 1000, - }; - let expected_message = Message { - id: [0; 32].into(), - origin: H256::zero(), - fee: 500, - commands: BoundedVec::try_from(vec![expected_payload]).unwrap(), - }; let result = converter.convert(); - assert_eq!(result, Ok(expected_message)); + assert_eq!(result.is_ok(), true); } #[test] @@ -1051,7 +966,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + PayFees { asset: assets.get(0).unwrap().clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1101,8 +1018,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1133,8 +1051,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1177,8 +1096,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + PayFees { asset: assets.get(0).unwrap().clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1239,8 +1159,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + PayFees { asset: assets.get(0).unwrap().clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), SetTopic([0; 32]), ] .into(); @@ -1269,8 +1190,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, + PayFees { asset: fee.clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1308,8 +1230,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + PayFees { asset: assets.get(0).unwrap().clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1340,8 +1263,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + PayFees { asset: assets.get(0).unwrap().clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1372,8 +1296,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + PayFees { asset: assets.get(0).unwrap().clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1402,9 +1327,10 @@ mod tests { let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: assets.get(0).unwrap().clone() }, WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1436,9 +1362,10 @@ mod tests { let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: assets.get(0).unwrap().clone() }, WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1471,9 +1398,10 @@ mod tests { let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: assets.get(0).unwrap().clone() }, WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1504,17 +1432,14 @@ mod tests { .into(); let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: assets.get(0).unwrap().clone() }, WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, - beneficiary: [ - GlobalConsensus(Polkadot), - Parachain(1000), - AccountId32 { network: Some(Polkadot), id: beneficiary_address }, - ] - .into(), + beneficiary: AccountId32 { network: Some(Polkadot), id: beneficiary_address } + .into(), }, SetTopic([0; 32]), ] @@ -1543,8 +1468,9 @@ mod tests { let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + PayFees { asset: assets.get(0).unwrap().clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { @@ -1604,10 +1530,10 @@ mod tests { let filter: AssetFilter = assets.clone().into(); let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: assets.get(0).unwrap().clone() }, ReserveAssetDeposited(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - ExpectAsset(Asset { id: AssetId(asset_location), fun: Fungible(amount) }.into()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), @@ -1621,7 +1547,7 @@ mod tests { Command::MintForeignToken { recipient: beneficiary_address.into(), amount, token_id }; let expected_message = Message { id: [0; 32].into(), - origin: H256::zero(), + origin: hex!("aa16eddac8725928eaeda4aae518bf10d02bee80382517d21464a5cdf8d1d8e1").into(), fee: 1000000, commands: BoundedVec::try_from(vec![expected_payload]).unwrap(), }; @@ -1647,9 +1573,10 @@ mod tests { let filter: AssetFilter = assets.clone().into(); let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: assets.get(0).unwrap().clone() }, ReserveAssetDeposited(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), From a72aa5a57dedecbb499bc672f2264e50faa0d977 Mon Sep 17 00:00:00 2001 From: ron Date: Mon, 11 Nov 2024 15:35:49 +0800 Subject: [PATCH 03/68] Fix compile --- .../bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs index 679d33f3456a..98e2450ee832 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs @@ -26,7 +26,6 @@ use testnet_parachains_constants::rococo::xcm_version::SAFE_XCM_VERSION; use xcm::latest::WESTEND_GENESIS_HASH; const BRIDGE_HUB_ROCOCO_ED: Balance = ExistentialDeposit::get(); -use xcm::latest::WESTEND_GENESIS_HASH; fn bridge_hub_rococo_genesis( invulnerables: Vec<(AccountId, AuraId)>, From 056fd7f7e3941054a9ac966216203dfdf46e8bfb Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 14 Nov 2024 21:30:32 +0800 Subject: [PATCH 04/68] Rename to OutboundQueueV2Api --- .../snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs index 26ab7872ff11..0e5637d388bf 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs @@ -11,7 +11,7 @@ use snowbridge_merkle_tree::MerkleProof; use xcm::prelude::Xcm; sp_api::decl_runtime_apis! { - pub trait OutboundQueueApiV2 where Balance: BalanceT + pub trait OutboundQueueV2Api where Balance: BalanceT { /// Generate a merkle proof for a committed message identified by `leaf_index`. /// The merkle root is stored in the block header as a diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index fa9231796b59..929caadf968b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -908,7 +908,7 @@ impl_runtime_apis! { } } - impl snowbridge_outbound_queue_runtime_api_v2::OutboundQueueApiV2 for Runtime { + impl snowbridge_outbound_queue_runtime_api_v2::OutboundQueueV2Api for Runtime { fn prove_message(leaf_index: u64) -> Option { snowbridge_pallet_outbound_queue_v2::api::prove_message::(leaf_index) } From 49837cfabc3509850553e04f4628ca61d88a103e Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 14 Nov 2024 21:37:27 +0800 Subject: [PATCH 05/68] Return raw balance for dry run --- .../pallets/outbound-queue-v2/runtime-api/src/lib.rs | 2 +- bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs | 4 ++-- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs index 0e5637d388bf..717f0135af0c 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs @@ -18,6 +18,6 @@ sp_api::decl_runtime_apis! { /// `sp_runtime::generic::DigestItem::Other` fn prove_message(leaf_index: u64) -> Option; - fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage,Fee),DryRunError>; + fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage,Balance),DryRunError>; } } diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs index f45e15bad647..7b68b587d4dc 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs @@ -33,7 +33,7 @@ where Some(proof) } -pub fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage, Fee), DryRunError> +pub fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage, T::Balance), DryRunError> where T: Config, { @@ -46,7 +46,7 @@ where let message: Message = converter.convert().map_err(|_| DryRunError::ConvertXcmFailed)?; - let fee = Fee::from(crate::Pallet::::calculate_local_fee()); + let fee = crate::Pallet::::calculate_local_fee(); let commands: Vec = message .commands diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 929caadf968b..498bd07530db 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -912,7 +912,7 @@ impl_runtime_apis! { fn prove_message(leaf_index: u64) -> Option { snowbridge_pallet_outbound_queue_v2::api::prove_message::(leaf_index) } - fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage,FeeV2),DryRunError> { + fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage,Balance),DryRunError> { snowbridge_pallet_outbound_queue_v2::api::dry_run::(xcm) } } From 030d95c579ed93be349586bd54b41abee2277233 Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 14 Nov 2024 22:02:22 +0800 Subject: [PATCH 06/68] Decode account asap --- .../outbound-queue-v2/runtime-api/src/lib.rs | 5 +--- .../pallets/outbound-queue-v2/src/api.rs | 2 +- .../pallets/outbound-queue-v2/src/envelope.rs | 23 +++++++++++++------ .../pallets/outbound-queue-v2/src/lib.rs | 12 ++++------ 4 files changed, 22 insertions(+), 20 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs index 717f0135af0c..08d10ddd9269 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs @@ -3,10 +3,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::traits::tokens::Balance as BalanceT; -use snowbridge_core::outbound::{ - v2::{Fee, InboundMessage}, - DryRunError, -}; +use snowbridge_core::outbound::{v2::InboundMessage, DryRunError}; use snowbridge_merkle_tree::MerkleProof; use xcm::prelude::Xcm; diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs index 7b68b587d4dc..15c224931081 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs @@ -6,7 +6,7 @@ use crate::{Config, MessageLeaves}; use frame_support::storage::StorageStreamIter; use snowbridge_core::{ outbound::{ - v2::{CommandWrapper, Fee, GasMeter, InboundMessage, Message}, + v2::{CommandWrapper, GasMeter, InboundMessage, Message}, DryRunError, }, AgentIdOf, diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/envelope.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/envelope.rs index e0f6ba63291c..744c93deb796 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/envelope.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/envelope.rs @@ -5,8 +5,11 @@ use snowbridge_core::inbound::Log; use sp_core::{RuntimeDebug, H160}; use sp_std::prelude::*; +use crate::Config; use alloy_primitives::B256; use alloy_sol_types::{sol, SolEvent}; +use codec::Decode; +use frame_support::pallet_prelude::{Encode, TypeInfo}; sol! { event InboundMessageDispatched(uint64 indexed nonce, bool success, bytes32 indexed reward_address); @@ -14,7 +17,7 @@ sol! { /// An inbound message that has had its outer envelope decoded. #[derive(Clone, RuntimeDebug)] -pub struct Envelope { +pub struct Envelope { /// The address of the outbound queue on Ethereum that emitted this message as an event log pub gateway: H160, /// A nonce for enforcing replay protection and ordering. @@ -22,26 +25,32 @@ pub struct Envelope { /// Delivery status pub success: bool, /// The reward address - pub reward_address: [u8; 32], + pub reward_address: T::AccountId, } -#[derive(Copy, Clone, RuntimeDebug)] -pub struct EnvelopeDecodeError; +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum EnvelopeDecodeError { + DecodeLogFailed, + DecodeAccountFailed, +} -impl TryFrom<&Log> for Envelope { +impl TryFrom<&Log> for Envelope { type Error = EnvelopeDecodeError; fn try_from(log: &Log) -> Result { let topics: Vec = log.topics.iter().map(|x| B256::from_slice(x.as_ref())).collect(); let event = InboundMessageDispatched::decode_log(topics, &log.data, true) - .map_err(|_| EnvelopeDecodeError)?; + .map_err(|_| EnvelopeDecodeError::DecodeLogFailed)?; + + let account = T::AccountId::decode(&mut &event.reward_address[..]) + .map_err(|_| EnvelopeDecodeError::DecodeAccountFailed)?; Ok(Self { gateway: log.address, nonce: event.nonce, success: event.success, - reward_address: event.reward_address.clone().into(), + reward_address: account, }) } } diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index 43fde9528f5d..2526413f1b3c 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -133,8 +133,6 @@ use alloy_sol_types::SolValue; use alloy_primitives::FixedBytes; -use sp_runtime::traits::TrailingZeroInput; - use sp_runtime::traits::MaybeEquivalence; use xcm::prelude::{Location, NetworkId}; @@ -320,8 +318,8 @@ pub mod pallet { .map_err(|e| Error::::Verification(e))?; // Decode event log into an Envelope - let envelope = - Envelope::try_from(&message.event_log).map_err(|_| Error::::InvalidEnvelope)?; + let envelope = Envelope::::try_from(&message.event_log) + .map_err(|_| Error::::InvalidEnvelope)?; // Verify that the message was submitted from the known Gateway contract ensure!(T::GatewayAddress::get() == envelope.gateway, Error::::InvalidGateway); @@ -330,12 +328,10 @@ pub mod pallet { ensure!(>::contains_key(nonce), Error::::PendingNonceNotExist); let order = >::get(nonce).ok_or(Error::::PendingNonceNotExist)?; - let account = T::AccountId::decode(&mut &envelope.reward_address[..]).unwrap_or( - T::AccountId::decode(&mut TrailingZeroInput::zeroes()).expect("zero address"), - ); + // No fee for governance order if !order.fee.is_zero() { - T::RewardLedger::deposit(account, order.fee.into())?; + T::RewardLedger::deposit(envelope.reward_address, order.fee.into())?; } >::remove(nonce); From 206b3000c7dc4e5641a6298d34ca94c294790e36 Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 14 Nov 2024 22:13:54 +0800 Subject: [PATCH 07/68] Revamp comments for V2 --- .../pallets/outbound-queue-v2/src/lib.rs | 54 ++----------------- 1 file changed, 5 insertions(+), 49 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index 2526413f1b3c..e74a91cc517f 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -7,14 +7,14 @@ //! Messages come either from sibling parachains via XCM, or BridgeHub itself //! via the `snowbridge-pallet-system`: //! -//! 1. `snowbridge_router_primitives::outbound::EthereumBlobExporter::deliver` -//! 2. `snowbridge_pallet_system::Pallet::send` +//! 1. `snowbridge_router_primitives::outbound::v2::EthereumBlobExporter::deliver` +//! 2. `snowbridge_pallet_system::Pallet::send_v2` //! //! The message submission pipeline works like this: //! 1. The message is first validated via the implementation for -//! [`snowbridge_core::outbound::SendMessage::validate`] +//! [`snowbridge_core::outbound::v2::SendMessage::validate`] //! 2. The message is then enqueued for later processing via the implementation for -//! [`snowbridge_core::outbound::SendMessage::deliver`] +//! [`snowbridge_core::outbound::v2::SendMessage::deliver`] //! 3. The underlying message queue is implemented by [`Config::MessageQueue`] //! 4. The message queue delivers messages back to this pallet via the implementation for //! [`frame_support::traits::ProcessMessage::process_message`] @@ -35,50 +35,6 @@ //! allows us to pause processing of normal user messages while still allowing //! governance commands to be sent to Ethereum. //! -//! # Fees -//! -//! An upfront fee must be paid for delivering a message. This fee covers several -//! components: -//! 1. The weight of processing the message locally -//! 2. The gas refund paid out to relayers for message submission -//! 3. An additional reward paid out to relayers for message submission -//! -//! Messages are weighed to determine the maximum amount of gas they could -//! consume on Ethereum. Using this upper bound, a final fee can be calculated. -//! -//! The fee calculation also requires the following parameters: -//! * Average ETH/DOT exchange rate over some period -//! * Max fee per unit of gas that bridge is willing to refund relayers for -//! -//! By design, it is expected that governance should manually update these -//! parameters every few weeks using the `set_pricing_parameters` extrinsic in the -//! system pallet. -//! -//! This is an interim measure. Once ETH/DOT liquidity pools are available in the Polkadot network, -//! we'll use them as a source of pricing info, subject to certain safeguards. -//! -//! ## Fee Computation Function -//! -//! ```text -//! LocalFee(Message) = WeightToFee(ProcessMessageWeight(Message)) -//! RemoteFee(Message) = MaxGasRequired(Message) * Params.MaxFeePerGas + Params.Reward -//! RemoteFeeAdjusted(Message) = Params.Multiplier * (RemoteFee(Message) / Params.Ratio("ETH/DOT")) -//! Fee(Message) = LocalFee(Message) + RemoteFeeAdjusted(Message) -//! ``` -//! -//! By design, the computed fee includes a safety factor (the `Multiplier`) to cover -//! unfavourable fluctuations in the ETH/DOT exchange rate. -//! -//! ## Fee Settlement -//! -//! On the remote side, in the gateway contract, the relayer accrues -//! -//! ```text -//! Min(GasPrice, Message.MaxFeePerGas) * GasUsed() + Message.Reward -//! ``` -//! Or in plain english, relayers are refunded for gas consumption, using a -//! price that is a minimum of the actual gas price, or `Message.MaxFeePerGas`. -//! //! # Extrinsics //! //! * [`Call::set_operating_mode`]: Set the operating mode @@ -86,7 +42,7 @@ //! # Runtime API //! //! * `prove_message`: Generate a merkle proof for a committed message -//! * `calculate_fee`: Calculate the delivery fee for a message +//! * `dry_run`: Convert xcm to InboundMessage #![cfg_attr(not(feature = "std"), no_std)] pub mod api; pub mod envelope; From 8b3e178211ed65bcc253750df8d1dff1914a3527 Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 14 Nov 2024 22:20:30 +0800 Subject: [PATCH 08/68] Custom digest for V2 --- bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs | 2 +- .../parachains/runtimes/bridge-hubs/common/src/digest_item.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index e74a91cc517f..293e532bd778 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -309,7 +309,7 @@ pub mod pallet { // Create merkle root of messages let root = merkle_root::<::Hashing, _>(MessageLeaves::::stream_iter()); - let digest_item: DigestItem = CustomDigestItem::Snowbridge(root).into(); + let digest_item: DigestItem = CustomDigestItem::SnowbridgeV2(root).into(); // Insert merkle root into the header digest >::deposit_log(digest_item); diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/src/digest_item.rs b/cumulus/parachains/runtimes/bridge-hubs/common/src/digest_item.rs index bdfcaedbe82d..5823b15b8d55 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/src/digest_item.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/common/src/digest_item.rs @@ -24,6 +24,9 @@ pub enum CustomDigestItem { #[codec(index = 0)] /// Merkle root of outbound Snowbridge messages. Snowbridge(H256), + #[codec(index = 1)] + /// Merkle root of outbound Snowbridge V2 messages. + SnowbridgeV2(H256), } /// Convert custom application digest item into a concrete digest item From a22f0ac9f49c89ef50e3b41d3a70ff3505cce4e6 Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 14 Nov 2024 22:56:17 +0800 Subject: [PATCH 09/68] Cleanup imports --- .../pallets/outbound-queue-v2/src/lib.rs | 22 +++++-------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index 293e532bd778..01725641c7de 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -60,6 +60,8 @@ mod mock; #[cfg(test)] mod test; +use alloy_primitives::FixedBytes; +use alloy_sol_types::SolValue; use bridge_hub_common::{AggregateMessageOrigin, CustomDigestItem}; use codec::Decode; use envelope::Envelope; @@ -68,35 +70,23 @@ use frame_support::{ traits::{tokens::Balance, EnqueueMessage, Get, ProcessMessageError}, weights::{Weight, WeightToFee}, }; +pub use pallet::*; use snowbridge_core::{ - inbound::Message as DeliveryMessage, + inbound::{Message as DeliveryMessage, VerificationError, Verifier}, outbound::v2::{CommandWrapper, Fee, GasMeter, InboundMessage, InboundMessageWrapper, Message}, BasicOperatingMode, RewardLedger, TokenId, }; use snowbridge_merkle_tree::merkle_root; -use sp_core::H256; +use sp_core::{H160, H256}; use sp_runtime::{ - traits::{BlockNumberProvider, Hash}, + traits::{BlockNumberProvider, Hash, MaybeEquivalence}, ArithmeticError, DigestItem, }; use sp_std::prelude::*; pub use types::{PendingOrder, ProcessMessageOriginOf}; pub use weights::WeightInfo; - -pub use pallet::*; - -use alloy_sol_types::SolValue; - -use alloy_primitives::FixedBytes; - -use sp_runtime::traits::MaybeEquivalence; - use xcm::prelude::{Location, NetworkId}; -use snowbridge_core::inbound::{VerificationError, Verifier}; - -use sp_core::H160; - #[frame_support::pallet] pub mod pallet { use super::*; From e346bf6860e99928334d66b1f5ca3cef0849a718 Mon Sep 17 00:00:00 2001 From: Ron Date: Thu, 14 Nov 2024 22:58:27 +0800 Subject: [PATCH 10/68] Update bridges/snowbridge/pallets/outbound-queue-v2/src/types.rs Co-authored-by: Vincent Geddes <117534+vgeddes@users.noreply.github.com> --- bridges/snowbridge/pallets/outbound-queue-v2/src/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/types.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/types.rs index db1f567e42fc..d16cd031e618 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/types.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/types.rs @@ -17,7 +17,7 @@ pub struct PendingOrder { pub nonce: u64, /// The block number in which the message was committed pub block_number: BlockNumber, - /// The fee + /// The fee in Ether provided by the user to incentivize message delivery #[codec(compact)] pub fee: u128, } From 9847bc9759479e6746b97a0292fb6f0d9a369783 Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 14 Nov 2024 23:08:40 +0800 Subject: [PATCH 11/68] Clean up with the insert --- .../pallets/outbound-queue-v2/src/lib.rs | 20 +++++++------------ 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index 01725641c7de..402aba7af7cc 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -80,7 +80,7 @@ use snowbridge_merkle_tree::merkle_root; use sp_core::{H160, H256}; use sp_runtime::{ traits::{BlockNumberProvider, Hash, MaybeEquivalence}, - ArithmeticError, DigestItem, + DigestItem, }; use sp_std::prelude::*; pub use types::{PendingOrder, ProcessMessageOriginOf}; @@ -357,18 +357,12 @@ pub mod pallet { Messages::::append(Box::new(inbound_message)); MessageLeaves::::append(message_abi_encoded_hash); - >::try_mutate(nonce, |maybe_locked| -> DispatchResult { - let mut locked = maybe_locked.clone().unwrap_or_else(|| PendingOrder { - nonce, - fee: 0, - block_number: frame_system::Pallet::::current_block_number(), - }); - locked.fee = - locked.fee.checked_add(message.fee).ok_or(ArithmeticError::Overflow)?; - *maybe_locked = Some(locked); - Ok(()) - }) - .map_err(|_| Unsupported)?; + let order = PendingOrder { + nonce, + fee: message.fee, + block_number: frame_system::Pallet::::current_block_number(), + }; + >::insert(nonce, order); Nonce::::set(nonce.checked_add(1).ok_or(Unsupported)?); From fb3b30c348dc7c31022324313139ae18b09ef237 Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 15 Nov 2024 00:09:54 +0800 Subject: [PATCH 12/68] Remove Fee for V2 --- .../pallets/outbound-queue-v2/src/lib.rs | 2 +- .../src/send_message_impl.rs | 8 +-- bridges/snowbridge/pallets/system/src/lib.rs | 2 +- bridges/snowbridge/pallets/system/src/mock.rs | 31 +++++++++- .../primitives/core/src/outbound/v2.rs | 60 ++----------------- .../primitives/router/src/outbound/v2.rs | 12 ++-- .../src/bridge_to_ethereum_config.rs | 30 +++++++++- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 13 ++-- 8 files changed, 82 insertions(+), 76 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index 402aba7af7cc..9722e0876b9f 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -73,7 +73,7 @@ use frame_support::{ pub use pallet::*; use snowbridge_core::{ inbound::{Message as DeliveryMessage, VerificationError, Verifier}, - outbound::v2::{CommandWrapper, Fee, GasMeter, InboundMessage, InboundMessageWrapper, Message}, + outbound::v2::{CommandWrapper, GasMeter, InboundMessage, InboundMessageWrapper, Message}, BasicOperatingMode, RewardLedger, TokenId, }; use snowbridge_merkle_tree::merkle_root; diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs index c37cf0dfa530..97188c9c4bc2 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs @@ -25,9 +25,9 @@ where { type Ticket = Message; - fn validate( - message: &Message, - ) -> Result<(Self::Ticket, Fee<::Balance>), SendError> { + type Balance = T::Balance; + + fn validate(message: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { // The inner payload should not be too large let payload = message.encode(); ensure!( @@ -35,7 +35,7 @@ where SendError::MessageTooLarge ); - let fee = Fee::from(Self::calculate_local_fee()); + let fee = Self::calculate_local_fee(); Ok((message.clone(), fee)) } diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 52cc28b7de75..8a5b0a6edbf9 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -844,7 +844,7 @@ pub mod pallet { T::OutboundQueueV2::validate(&message).map_err(|err| Error::::Send(err))?; let payment = match pays_fee { - PaysFee::Yes(account) | PaysFee::Partial(account) => Some((account, fee.total())), + PaysFee::Yes(account) | PaysFee::Partial(account) => Some((account, fee)), PaysFee::No => None, }; diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index f20f8886450f..53ba8e87c140 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -12,7 +12,11 @@ use xcm_executor::traits::ConvertLocation; use snowbridge_core::{ gwei, meth, - outbound::{v1::ConstantGasMeter, v2::DefaultOutboundQueue}, + outbound::{ + v1::ConstantGasMeter, + v2::{Message, SendMessage}, + SendError as OutboundSendError, SendMessageFeeProvider, + }, sibling_sovereign_account, AgentId, AllowSiblingsOnly, ParaId, PricingParameters, Rewards, }; use sp_runtime::{ @@ -200,6 +204,29 @@ impl BenchmarkHelper for () { } } +pub struct MockOkOutboundQueue; +impl SendMessage for MockOkOutboundQueue { + type Ticket = (); + + type Balance = u128; + + fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), OutboundSendError> { + Ok(((), 1_u128)) + } + + fn deliver(_: Self::Ticket) -> Result { + Ok(H256::zero()) + } +} + +impl SendMessageFeeProvider for MockOkOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } +} + impl crate::Config for Test { type RuntimeEvent = RuntimeEvent; type OutboundQueue = OutboundQueue; @@ -214,7 +241,7 @@ impl crate::Config for Test { type EthereumLocation = EthereumDestination; #[cfg(feature = "runtime-benchmarks")] type Helper = (); - type OutboundQueueV2 = DefaultOutboundQueue; + type OutboundQueueV2 = MockOkOutboundQueue; } // Build genesis storage according to the mock runtime. diff --git a/bridges/snowbridge/primitives/core/src/outbound/v2.rs b/bridges/snowbridge/primitives/core/src/outbound/v2.rs index 4443a6ea5297..94015e506285 100644 --- a/bridges/snowbridge/primitives/core/src/outbound/v2.rs +++ b/bridges/snowbridge/primitives/core/src/outbound/v2.rs @@ -2,7 +2,7 @@ // SPDX-FileCopyrightText: 2023 Snowfork //! # Outbound V2 primitives -use crate::outbound::{OperatingMode, SendError, SendMessageFeeProvider}; +use crate::outbound::{OperatingMode, SendError}; use alloy_sol_types::sol; use codec::{Decode, Encode}; use frame_support::{pallet_prelude::ConstU32, BoundedVec}; @@ -230,70 +230,20 @@ pub struct Initializer { pub maximum_required_gas: u64, } -#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -#[cfg_attr(feature = "std", derive(PartialEq))] -/// Fee for delivering message -pub struct Fee -where - Balance: BaseArithmetic + Unsigned + Copy, -{ - /// Fee to cover cost of processing the message locally - pub local: Balance, -} - -impl Fee -where - Balance: BaseArithmetic + Unsigned + Copy, -{ - pub fn total(&self) -> Balance { - self.local - } -} - -impl From for Fee -where - Balance: BaseArithmetic + Unsigned + Copy, -{ - fn from(local: Balance) -> Self { - Self { local } - } -} - -pub trait SendMessage: SendMessageFeeProvider { +pub trait SendMessage { type Ticket: Clone + Encode + Decode; + type Balance: BaseArithmetic + Unsigned + Copy; + /// Validate an outbound message and return a tuple: /// 1. Ticket for submitting the message /// 2. Delivery fee - fn validate( - message: &Message, - ) -> Result<(Self::Ticket, Fee<::Balance>), SendError>; + fn validate(message: &Message) -> Result<(Self::Ticket, Self::Balance), SendError>; /// Submit the message ticket for eventual delivery to Ethereum fn deliver(ticket: Self::Ticket) -> Result; } -pub struct DefaultOutboundQueue; -impl SendMessage for DefaultOutboundQueue { - type Ticket = (); - - fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { - Ok(((), Fee { local: Default::default() })) - } - - fn deliver(_: Self::Ticket) -> Result { - Ok(H256::zero()) - } -} - -impl SendMessageFeeProvider for DefaultOutboundQueue { - type Balance = u128; - - fn local_fee() -> Self::Balance { - Default::default() - } -} - pub trait GasMeter { /// Measures the maximum amount of gas a command payload will require to *dispatch*, NOT /// including validation & verification. diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2.rs b/bridges/snowbridge/primitives/router/src/outbound/v2.rs index f1d06fc9662b..362ab6922979 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2.rs @@ -483,7 +483,7 @@ mod tests { use frame_support::parameter_types; use hex_literal::hex; use snowbridge_core::{ - outbound::{v2::Fee, SendError, SendMessageFeeProvider}, + outbound::{SendError, SendMessageFeeProvider}, AgentIdOf, }; use sp_std::default::Default; @@ -504,8 +504,10 @@ mod tests { impl SendMessage for MockOkOutboundQueue { type Ticket = (); - fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { - Ok(((), Fee { local: 1 })) + type Balance = u128; + + fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { + Ok(((), 1_u128)) } fn deliver(_: Self::Ticket) -> Result { @@ -524,7 +526,9 @@ mod tests { impl SendMessage for MockErrOutboundQueue { type Ticket = (); - fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { + type Balance = u128; + + fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { Err(SendError::MessageTooLarge) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs index a405bd5b002b..3d208dc68208 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs @@ -25,7 +25,7 @@ use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards}; use snowbridge_router_primitives::{inbound::v1::MessageToXcm, outbound::v1::EthereumBlobExporter}; -use sp_core::H160; +use sp_core::{H160, H256}; use testnet_parachains_constants::rococo::{ currency::*, fee::WeightToFee, @@ -37,7 +37,10 @@ use crate::xcm_config::RelayNetwork; use benchmark_helpers::DoNothingRouter; use frame_support::{parameter_types, weights::ConstantMultiplier}; use pallet_xcm::EnsureXcm; -use snowbridge_core::outbound::v2::DefaultOutboundQueue; +use snowbridge_core::outbound::{ + v2::{Message, SendMessage}, + SendError, SendMessageFeeProvider, +}; use sp_runtime::{ traits::{ConstU32, ConstU8, Keccak256}, FixedU128, @@ -178,6 +181,29 @@ impl snowbridge_pallet_ethereum_client::Config for Runtime { type WeightInfo = crate::weights::snowbridge_pallet_ethereum_client::WeightInfo; } +pub struct DefaultOutboundQueue; +impl SendMessage for DefaultOutboundQueue { + type Ticket = (); + + type Balance = Balance; + + fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { + Ok(((), Default::default())) + } + + fn deliver(_: Self::Ticket) -> Result { + Ok(H256::zero()) + } +} + +impl SendMessageFeeProvider for DefaultOutboundQueue { + type Balance = Balance; + + fn local_fee() -> Self::Balance { + Default::default() + } +} + impl snowbridge_pallet_system::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OutboundQueue = EthereumOutboundQueue; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 498bd07530db..70a94e8dd630 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -99,17 +99,16 @@ use parachains_common::{ AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; use snowbridge_core::{ - outbound::v1::{Command, Fee}, + outbound::{ + v1::{Command, Fee}, + v2::InboundMessage, + DryRunError, + }, AgentId, PricingParameters, }; use testnet_parachains_constants::westend::{consensus::*, currency::*, fee::WeightToFee, time::*}; -use xcm::VersionedLocation; - use westend_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; - -use snowbridge_core::outbound::v2::{Fee as FeeV2, InboundMessage}; - -use snowbridge_core::outbound::DryRunError; +use xcm::VersionedLocation; /// The address format for describing accounts. pub type Address = MultiAddress; From 83b6ff851625be9c5a520bdf602607c28e4de668 Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 15 Nov 2024 01:08:43 +0800 Subject: [PATCH 13/68] Reorgnize InboundMessage to abi module --- .../outbound-queue-v2/runtime-api/src/lib.rs | 2 +- .../pallets/outbound-queue-v2/src/api.rs | 5 +- .../pallets/outbound-queue-v2/src/lib.rs | 5 +- .../pallets/outbound-queue-v2/src/test.rs | 2 +- .../primitives/core/src/outbound/v2.rs | 157 ++++++++++-------- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- 6 files changed, 96 insertions(+), 77 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs index 08d10ddd9269..f2c88658c23f 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs @@ -3,7 +3,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::traits::tokens::Balance as BalanceT; -use snowbridge_core::outbound::{v2::InboundMessage, DryRunError}; +use snowbridge_core::outbound::{v2::abi::InboundMessage, DryRunError}; use snowbridge_merkle_tree::MerkleProof; use xcm::prelude::Xcm; diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs index 15c224931081..336979b47a36 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs @@ -6,7 +6,10 @@ use crate::{Config, MessageLeaves}; use frame_support::storage::StorageStreamIter; use snowbridge_core::{ outbound::{ - v2::{CommandWrapper, GasMeter, InboundMessage, Message}, + v2::{ + abi::{CommandWrapper, InboundMessage}, + GasMeter, Message, + }, DryRunError, }, AgentIdOf, diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index 9722e0876b9f..85745101e2f5 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -73,7 +73,10 @@ use frame_support::{ pub use pallet::*; use snowbridge_core::{ inbound::{Message as DeliveryMessage, VerificationError, Verifier}, - outbound::v2::{CommandWrapper, GasMeter, InboundMessage, InboundMessageWrapper, Message}, + outbound::v2::{ + abi::{CommandWrapper, InboundMessage, InboundMessageWrapper}, + GasMeter, Message, + }, BasicOperatingMode, RewardLedger, TokenId, }; use snowbridge_merkle_tree::merkle_root; diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs index b4d70e37a9e4..0c1c2868cb90 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs @@ -13,7 +13,7 @@ use frame_support::{ use codec::Encode; use snowbridge_core::{ outbound::{ - v2::{primary_governance_origin, Command, InboundMessageWrapper, SendMessage}, + v2::{abi::InboundMessageWrapper, primary_governance_origin, Command, SendMessage}, SendError, }, ChannelId, ParaId, diff --git a/bridges/snowbridge/primitives/core/src/outbound/v2.rs b/bridges/snowbridge/primitives/core/src/outbound/v2.rs index 94015e506285..59be8767c2f4 100644 --- a/bridges/snowbridge/primitives/core/src/outbound/v2.rs +++ b/bridges/snowbridge/primitives/core/src/outbound/v2.rs @@ -3,7 +3,6 @@ //! # Outbound V2 primitives use crate::outbound::{OperatingMode, SendError}; -use alloy_sol_types::sol; use codec::{Decode, Encode}; use frame_support::{pallet_prelude::ConstU32, BoundedVec}; use hex_literal::hex; @@ -12,89 +11,103 @@ use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; use sp_core::{RuntimeDebug, H160, H256}; use sp_std::{vec, vec::Vec}; +use crate::outbound::v2::abi::{ + CreateAgentParams, MintForeignTokenParams, RegisterForeignTokenParams, SetOperatingModeParams, + UnlockNativeTokenParams, UpgradeParams, +}; use alloy_primitives::{Address, FixedBytes}; use alloy_sol_types::SolValue; -sol! { - struct InboundMessageWrapper { - // origin - bytes32 origin; - // Message nonce - uint64 nonce; - // Commands - CommandWrapper[] commands; - } +pub mod abi { + use super::MAX_COMMANDS; + use alloy_sol_types::sol; + use codec::{Decode, Encode}; + use frame_support::BoundedVec; + use scale_info::TypeInfo; + use sp_core::{ConstU32, RuntimeDebug, H256}; + use sp_std::vec::Vec; - #[derive(Encode, Decode, RuntimeDebug, PartialEq,TypeInfo)] - struct CommandWrapper { - uint8 kind; - uint64 gas; - bytes payload; - } + sol! { + struct InboundMessageWrapper { + // origin + bytes32 origin; + // Message nonce + uint64 nonce; + // Commands + CommandWrapper[] commands; + } - // Payload for Upgrade - struct UpgradeParams { - // The address of the implementation contract - address implAddress; - // Codehash of the new implementation contract. - bytes32 implCodeHash; - // Parameters used to upgrade storage of the gateway - bytes initParams; - } + #[derive(Encode, Decode, RuntimeDebug, PartialEq,TypeInfo)] + struct CommandWrapper { + uint8 kind; + uint64 gas; + bytes payload; + } - // Payload for CreateAgent - struct CreateAgentParams { - /// @dev The agent ID of the consensus system - bytes32 agentID; - } + // Payload for Upgrade + struct UpgradeParams { + // The address of the implementation contract + address implAddress; + // Codehash of the new implementation contract. + bytes32 implCodeHash; + // Parameters used to upgrade storage of the gateway + bytes initParams; + } - // Payload for SetOperatingMode instruction - struct SetOperatingModeParams { - /// The new operating mode - uint8 mode; - } + // Payload for CreateAgent + struct CreateAgentParams { + /// @dev The agent ID of the consensus system + bytes32 agentID; + } - // Payload for NativeTokenUnlock instruction - struct UnlockNativeTokenParams { - // Token address - address token; - // Recipient address - address recipient; - // Amount to unlock - uint128 amount; - } + // Payload for SetOperatingMode instruction + struct SetOperatingModeParams { + /// The new operating mode + uint8 mode; + } - // Payload for RegisterForeignToken - struct RegisterForeignTokenParams { - /// @dev The token ID (hash of stable location id of token) - bytes32 foreignTokenID; - /// @dev The name of the token - bytes name; - /// @dev The symbol of the token - bytes symbol; - /// @dev The decimal of the token - uint8 decimals; - } + // Payload for NativeTokenUnlock instruction + struct UnlockNativeTokenParams { + // Token address + address token; + // Recipient address + address recipient; + // Amount to unlock + uint128 amount; + } + + // Payload for RegisterForeignToken + struct RegisterForeignTokenParams { + /// @dev The token ID (hash of stable location id of token) + bytes32 foreignTokenID; + /// @dev The name of the token + bytes name; + /// @dev The symbol of the token + bytes symbol; + /// @dev The decimal of the token + uint8 decimals; + } - // Payload for MintForeignTokenParams instruction - struct MintForeignTokenParams { - // Foreign token ID - bytes32 foreignTokenID; - // Recipient address - address recipient; - // Amount to mint - uint128 amount; + // Payload for MintForeignTokenParams instruction + struct MintForeignTokenParams { + // Foreign token ID + bytes32 foreignTokenID; + // Recipient address + address recipient; + // Amount to mint + uint128 amount; + } } -} -#[derive(Encode, Decode, TypeInfo, PartialEq, Clone, RuntimeDebug)] -pub struct InboundMessage { - /// Origin - pub origin: H256, - /// Nonce - pub nonce: u64, - /// Commands - pub commands: BoundedVec>, + #[derive(Encode, Decode, TypeInfo, PartialEq, Clone, RuntimeDebug)] + pub struct InboundMessage { + /// Origin + pub origin: H256, + /// Nonce + pub nonce: u64, + /// Commands + pub commands: BoundedVec>, + } } pub const MAX_COMMANDS: u32 = 8; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 70a94e8dd630..0ac65c20a86c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -101,7 +101,7 @@ use parachains_common::{ use snowbridge_core::{ outbound::{ v1::{Command, Fee}, - v2::InboundMessage, + v2::abi::InboundMessage, DryRunError, }, AgentId, PricingParameters, From f08e36ece1160f2a1b459b4a72f49f68c7fcc8ed Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 15 Nov 2024 01:44:24 +0800 Subject: [PATCH 14/68] Seperate XcmConverter --- .../pallets/outbound-queue-v2/src/api.rs | 2 +- .../src/outbound/{v2.rs => v2/convert.rs} | 712 +---------------- .../primitives/router/src/outbound/v2/mod.rs | 718 ++++++++++++++++++ 3 files changed, 727 insertions(+), 705 deletions(-) rename bridges/snowbridge/primitives/router/src/outbound/{v2.rs => v2/convert.rs} (59%) create mode 100644 bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs index 336979b47a36..754cc59b022e 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs @@ -15,7 +15,7 @@ use snowbridge_core::{ AgentIdOf, }; use snowbridge_merkle_tree::{merkle_proof, MerkleProof}; -use snowbridge_router_primitives::outbound::v2::XcmConverter; +use snowbridge_router_primitives::outbound::v2::convert::XcmConverter; use sp_core::Get; use sp_std::{default::Default, vec::Vec}; use xcm::{ diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs similarity index 59% rename from bridges/snowbridge/primitives/router/src/outbound/v2.rs rename to bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs index 362ab6922979..a6a0de1bd75e 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs @@ -1,162 +1,18 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork -//! Converts XCM messages into simpler commands that can be processed by the Gateway contract +//! Converts XCM messages into InboundMessage that can be processed by the Gateway contract -use codec::{Decode, Encode}; use core::slice::Iter; -use sp_std::ops::ControlFlow; - -use frame_support::{ - ensure, - traits::{Contains, Get, ProcessMessageError}, - BoundedVec, -}; +use frame_support::{ensure, BoundedVec}; use snowbridge_core::{ - outbound::v2::{Command, Message, SendMessage}, + outbound::v2::{Command, Message}, AgentId, TokenId, TokenIdOf, TokenIdOf as LocationIdOf, }; -use sp_core::{H160, H256}; +use sp_core::H160; use sp_runtime::traits::MaybeEquivalence; use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; use xcm::prelude::*; -use xcm_builder::{CreateMatcher, ExporterFor, MatchXcm}; -use xcm_executor::traits::{ConvertLocation, ExportXcm}; - -const TARGET: &'static str = "xcm::ethereum_blob_exporter::v2"; - -pub struct EthereumBlobExporter< - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, ->( - PhantomData<( - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, - )>, -); - -impl - ExportXcm - for EthereumBlobExporter< - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, - > -where - UniversalLocation: Get, - EthereumNetwork: Get, - OutboundQueue: SendMessage, - AgentHashedDescription: ConvertLocation, - ConvertAssetId: MaybeEquivalence, -{ - type Ticket = (Vec, XcmHash); - - fn validate( - network: NetworkId, - _channel: u32, - universal_source: &mut Option, - destination: &mut Option, - message: &mut Option>, - ) -> SendResult { - let expected_network = EthereumNetwork::get(); - let universal_location = UniversalLocation::get(); - - if network != expected_network { - log::trace!(target: TARGET, "skipped due to unmatched bridge network {network:?}."); - return Err(SendError::NotApplicable) - } - - // Cloning destination to avoid modifying the value so subsequent exporters can use it. - let dest = destination.clone().ok_or(SendError::MissingArgument)?; - if dest != Here { - log::trace!(target: TARGET, "skipped due to unmatched remote destination {dest:?}."); - return Err(SendError::NotApplicable) - } - - // Cloning universal_source to avoid modifying the value so subsequent exporters can use it. - let (local_net, local_sub) = universal_source.clone() - .ok_or_else(|| { - log::error!(target: TARGET, "universal source not provided."); - SendError::MissingArgument - })? - .split_global() - .map_err(|()| { - log::error!(target: TARGET, "could not get global consensus from universal source '{universal_source:?}'."); - SendError::NotApplicable - })?; - - if Ok(local_net) != universal_location.global_consensus() { - log::trace!(target: TARGET, "skipped due to unmatched relay network {local_net:?}."); - return Err(SendError::NotApplicable) - } - - let source_location = Location::new(1, local_sub.clone()); - - let agent_id = match AgentHashedDescription::convert_location(&source_location) { - Some(id) => id, - None => { - log::error!(target: TARGET, "unroutable due to not being able to create agent id. '{source_location:?}'"); - return Err(SendError::NotApplicable) - }, - }; - - let message = message.clone().ok_or_else(|| { - log::error!(target: TARGET, "xcm message not provided."); - SendError::MissingArgument - })?; - - // Inspect AliasOrigin as V2 message - let mut instructions = message.clone().0; - let result = instructions.matcher().match_next_inst_while( - |_| true, - |inst| { - return match inst { - AliasOrigin(..) => Err(ProcessMessageError::Yield), - _ => Ok(ControlFlow::Continue(())), - } - }, - ); - ensure!(result.is_err(), SendError::NotApplicable); - - let mut converter = - XcmConverter::::new(&message, expected_network, agent_id); - let message = converter.convert().map_err(|err| { - log::error!(target: TARGET, "unroutable due to pattern matching error '{err:?}'."); - SendError::Unroutable - })?; - - // validate the message - let (ticket, _) = OutboundQueue::validate(&message).map_err(|err| { - log::error!(target: TARGET, "OutboundQueue validation of message failed. {err:?}"); - SendError::Unroutable - })?; - - Ok(((ticket.encode(), XcmHash::from(message.id)), Assets::default())) - } - - fn deliver(blob: (Vec, XcmHash)) -> Result { - let ticket: OutboundQueue::Ticket = OutboundQueue::Ticket::decode(&mut blob.0.as_ref()) - .map_err(|_| { - log::trace!(target: TARGET, "undeliverable due to decoding error"); - SendError::NotApplicable - })?; - - let message_id = OutboundQueue::deliver(ticket).map_err(|_| { - log::error!(target: TARGET, "OutboundQueue submit of message failed"); - SendError::Transport("other transport error") - })?; - - log::info!(target: TARGET, "message delivered {message_id:#?}."); - Ok(message_id.into()) - } -} +use xcm_executor::traits::ConvertLocation; /// Errors that can be thrown to the pattern matching step. #[derive(PartialEq, Debug)] @@ -440,455 +296,15 @@ where } } -/// An adapter for the implementation of `ExporterFor`, which attempts to find the -/// `(bridge_location, payment)` for the requested `network` and `remote_location` and `xcm` -/// in the provided `T` table containing various exporters. -pub struct XcmFilterExporter(core::marker::PhantomData<(T, M)>); -impl>> ExporterFor for XcmFilterExporter { - fn exporter_for( - network: &NetworkId, - remote_location: &InteriorLocation, - xcm: &Xcm<()>, - ) -> Option<(Location, Option)> { - // check the XCM - if !M::contains(xcm) { - return None - } - // check `network` and `remote_location` - T::exporter_for(network, remote_location, xcm) - } -} - -/// Xcm for SnowbridgeV2 which requires XCMV5 -pub struct XcmForSnowbridgeV2; -impl Contains> for XcmForSnowbridgeV2 { - fn contains(xcm: &Xcm<()>) -> bool { - let mut instructions = xcm.clone().0; - let result = instructions.matcher().match_next_inst_while( - |_| true, - |inst| { - return match inst { - AliasOrigin(..) => Err(ProcessMessageError::Yield), - _ => Ok(ControlFlow::Continue(())), - } - }, - ); - result.is_err() - } -} - #[cfg(test)] mod tests { use super::*; + use crate::outbound::v2::tests::{BridgedNetwork, MockTokenIdConvert, NonBridgedNetwork}; use frame_support::parameter_types; use hex_literal::hex; - use snowbridge_core::{ - outbound::{SendError, SendMessageFeeProvider}, - AgentIdOf, - }; + use snowbridge_core::AgentIdOf; use sp_std::default::Default; - use xcm::{ - latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, - prelude::SendError as XcmSendError, - }; - - parameter_types! { - const MaxMessageSize: u32 = u32::MAX; - const RelayNetwork: NetworkId = Polkadot; - UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(1013)].into(); - const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; - const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; - } - - struct MockOkOutboundQueue; - impl SendMessage for MockOkOutboundQueue { - type Ticket = (); - - type Balance = u128; - - fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { - Ok(((), 1_u128)) - } - - fn deliver(_: Self::Ticket) -> Result { - Ok(H256::zero()) - } - } - - impl SendMessageFeeProvider for MockOkOutboundQueue { - type Balance = u128; - - fn local_fee() -> Self::Balance { - 1 - } - } - struct MockErrOutboundQueue; - impl SendMessage for MockErrOutboundQueue { - type Ticket = (); - - type Balance = u128; - - fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { - Err(SendError::MessageTooLarge) - } - - fn deliver(_: Self::Ticket) -> Result { - Err(SendError::MessageTooLarge) - } - } - - impl SendMessageFeeProvider for MockErrOutboundQueue { - type Balance = u128; - - fn local_fee() -> Self::Balance { - 1 - } - } - - pub struct MockTokenIdConvert; - impl MaybeEquivalence for MockTokenIdConvert { - fn convert(_id: &TokenId) -> Option { - Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) - } - fn convert_back(_loc: &Location) -> Option { - None - } - } - - #[test] - fn exporter_validate_with_unknown_network_yields_not_applicable() { - let network = Ethereum { chain_id: 1337 }; - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = None; - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_with_invalid_destination_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = None; - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_with_x8_destination_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = Some( - [ - OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, - OnlyChild, - ] - .into(), - ); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_without_universal_source_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_without_global_universal_location_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Here.into(); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_without_global_bridge_location_yields_not_applicable() { - let network = NonBridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Here.into(); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_with_remote_universal_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Kusama), Parachain(1000)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_without_para_id_in_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Some(GlobalConsensus(Polkadot).into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_complex_para_id_in_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(12)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_without_xcm_message_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_with_max_target_fee_yields_unroutable() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; - let fees: Assets = vec![fee.clone()].into(); - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let mut message: Option> = Some( - vec![ - WithdrawAsset(fees), - BuyExecution { fees: fee.clone(), weight_limit: Unlimited }, - ExpectAsset(fee.into()), - WithdrawAsset(assets), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: Some(network), key: beneficiary_address } - .into(), - }, - SetTopic([0; 32]), - ] - .into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_with_unparsable_xcm_yields_unroutable() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let channel: u32 = 0; - let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; - let fees: Assets = vec![fee.clone()].into(); - - let mut message: Option> = Some( - vec![WithdrawAsset(fees), BuyExecution { fees: fee, weight_limit: Unlimited }].into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_xcm_success_case_1() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - - let mut message: Option> = Some( - vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee.clone() }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert!(result.is_ok()); - } - - #[test] - fn exporter_deliver_with_submit_failure_yields_unroutable() { - let result = EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockErrOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); - assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) - } + use xcm::latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; #[test] fn xcm_converter_convert_success() { @@ -1593,116 +1009,4 @@ mod tests { let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::InvalidAsset)); } - - #[test] - fn exporter_validate_with_invalid_dest_does_not_alter_destination() { - let network = BridgedNetwork::get(); - let destination: InteriorLocation = Parachain(1000).into(); - - let universal_source: InteriorLocation = - [GlobalConsensus(Polkadot), Parachain(1000)].into(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - let msg: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut msg_wrapper: Option> = Some(msg.clone()); - let mut dest_wrapper = Some(destination.clone()); - let mut universal_source_wrapper = Some(universal_source.clone()); - - let result = EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate( - network, - channel, - &mut universal_source_wrapper, - &mut dest_wrapper, - &mut msg_wrapper, - ); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - - // ensure mutable variables are not changed - assert_eq!(Some(destination), dest_wrapper); - assert_eq!(Some(msg), msg_wrapper); - assert_eq!(Some(universal_source), universal_source_wrapper); - } - - #[test] - fn exporter_validate_with_invalid_universal_source_does_not_alter_universal_source() { - let network = BridgedNetwork::get(); - let destination: InteriorLocation = Here.into(); - - let universal_source: InteriorLocation = - [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)].into(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - let msg: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut msg_wrapper: Option> = Some(msg.clone()); - let mut dest_wrapper = Some(destination.clone()); - let mut universal_source_wrapper = Some(universal_source.clone()); - - let result = EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate( - network, - channel, - &mut universal_source_wrapper, - &mut dest_wrapper, - &mut msg_wrapper, - ); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - - // ensure mutable variables are not changed - assert_eq!(Some(destination), dest_wrapper); - assert_eq!(Some(msg), msg_wrapper); - assert_eq!(Some(universal_source), universal_source_wrapper); - } } diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs new file mode 100644 index 000000000000..292fede50fe0 --- /dev/null +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs @@ -0,0 +1,718 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Converts XCM messages into simpler commands that can be processed by the Gateway contract + +pub mod convert; +use convert::XcmConverter; + +use codec::{Decode, Encode}; +use frame_support::{ + ensure, + traits::{Contains, Get, ProcessMessageError}, +}; +use snowbridge_core::{outbound::v2::SendMessage, TokenId}; +use sp_core::H256; +use sp_runtime::traits::MaybeEquivalence; +use sp_std::{marker::PhantomData, ops::ControlFlow, prelude::*}; +use xcm::prelude::*; +use xcm_builder::{CreateMatcher, ExporterFor, MatchXcm}; +use xcm_executor::traits::{ConvertLocation, ExportXcm}; + +const TARGET: &'static str = "xcm::ethereum_blob_exporter::v2"; + +pub struct EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, +>( + PhantomData<( + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + )>, +); + +impl + ExportXcm + for EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + > +where + UniversalLocation: Get, + EthereumNetwork: Get, + OutboundQueue: SendMessage, + AgentHashedDescription: ConvertLocation, + ConvertAssetId: MaybeEquivalence, +{ + type Ticket = (Vec, XcmHash); + + fn validate( + network: NetworkId, + _channel: u32, + universal_source: &mut Option, + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + let expected_network = EthereumNetwork::get(); + let universal_location = UniversalLocation::get(); + + if network != expected_network { + log::trace!(target: TARGET, "skipped due to unmatched bridge network {network:?}."); + return Err(SendError::NotApplicable) + } + + // Cloning destination to avoid modifying the value so subsequent exporters can use it. + let dest = destination.clone().ok_or(SendError::MissingArgument)?; + if dest != Here { + log::trace!(target: TARGET, "skipped due to unmatched remote destination {dest:?}."); + return Err(SendError::NotApplicable) + } + + // Cloning universal_source to avoid modifying the value so subsequent exporters can use it. + let (local_net, local_sub) = universal_source.clone() + .ok_or_else(|| { + log::error!(target: TARGET, "universal source not provided."); + SendError::MissingArgument + })? + .split_global() + .map_err(|()| { + log::error!(target: TARGET, "could not get global consensus from universal source '{universal_source:?}'."); + SendError::NotApplicable + })?; + + if Ok(local_net) != universal_location.global_consensus() { + log::trace!(target: TARGET, "skipped due to unmatched relay network {local_net:?}."); + return Err(SendError::NotApplicable) + } + + let source_location = Location::new(1, local_sub.clone()); + + let agent_id = match AgentHashedDescription::convert_location(&source_location) { + Some(id) => id, + None => { + log::error!(target: TARGET, "unroutable due to not being able to create agent id. '{source_location:?}'"); + return Err(SendError::NotApplicable) + }, + }; + + let message = message.clone().ok_or_else(|| { + log::error!(target: TARGET, "xcm message not provided."); + SendError::MissingArgument + })?; + + // Inspect AliasOrigin as V2 message + let mut instructions = message.clone().0; + let result = instructions.matcher().match_next_inst_while( + |_| true, + |inst| { + return match inst { + AliasOrigin(..) => Err(ProcessMessageError::Yield), + _ => Ok(ControlFlow::Continue(())), + } + }, + ); + ensure!(result.is_err(), SendError::NotApplicable); + + let mut converter = + XcmConverter::::new(&message, expected_network, agent_id); + let message = converter.convert().map_err(|err| { + log::error!(target: TARGET, "unroutable due to pattern matching error '{err:?}'."); + SendError::Unroutable + })?; + + // validate the message + let (ticket, _) = OutboundQueue::validate(&message).map_err(|err| { + log::error!(target: TARGET, "OutboundQueue validation of message failed. {err:?}"); + SendError::Unroutable + })?; + + Ok(((ticket.encode(), XcmHash::from(message.id)), Assets::default())) + } + + fn deliver(blob: (Vec, XcmHash)) -> Result { + let ticket: OutboundQueue::Ticket = OutboundQueue::Ticket::decode(&mut blob.0.as_ref()) + .map_err(|_| { + log::trace!(target: TARGET, "undeliverable due to decoding error"); + SendError::NotApplicable + })?; + + let message_id = OutboundQueue::deliver(ticket).map_err(|_| { + log::error!(target: TARGET, "OutboundQueue submit of message failed"); + SendError::Transport("other transport error") + })?; + + log::info!(target: TARGET, "message delivered {message_id:#?}."); + Ok(message_id.into()) + } +} + +/// An adapter for the implementation of `ExporterFor`, which attempts to find the +/// `(bridge_location, payment)` for the requested `network` and `remote_location` and `xcm` +/// in the provided `T` table containing various exporters. +pub struct XcmFilterExporter(core::marker::PhantomData<(T, M)>); +impl>> ExporterFor for XcmFilterExporter { + fn exporter_for( + network: &NetworkId, + remote_location: &InteriorLocation, + xcm: &Xcm<()>, + ) -> Option<(Location, Option)> { + // check the XCM + if !M::contains(xcm) { + return None + } + // check `network` and `remote_location` + T::exporter_for(network, remote_location, xcm) + } +} + +/// Xcm for SnowbridgeV2 which requires XCMV5 +pub struct XcmForSnowbridgeV2; +impl Contains> for XcmForSnowbridgeV2 { + fn contains(xcm: &Xcm<()>) -> bool { + let mut instructions = xcm.clone().0; + let result = instructions.matcher().match_next_inst_while( + |_| true, + |inst| { + return match inst { + AliasOrigin(..) => Err(ProcessMessageError::Yield), + _ => Ok(ControlFlow::Continue(())), + } + }, + ); + result.is_err() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::parameter_types; + use hex_literal::hex; + use snowbridge_core::{ + outbound::{v2::Message, SendError, SendMessageFeeProvider}, + AgentIdOf, + }; + use sp_std::default::Default; + use xcm::{ + latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, + prelude::SendError as XcmSendError, + }; + + parameter_types! { + const MaxMessageSize: u32 = u32::MAX; + const RelayNetwork: NetworkId = Polkadot; + UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(1013)].into(); + pub const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; + pub const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; + } + + struct MockOkOutboundQueue; + impl SendMessage for MockOkOutboundQueue { + type Ticket = (); + + type Balance = u128; + + fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { + Ok(((), 1_u128)) + } + + fn deliver(_: Self::Ticket) -> Result { + Ok(H256::zero()) + } + } + + impl SendMessageFeeProvider for MockOkOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } + } + struct MockErrOutboundQueue; + impl SendMessage for MockErrOutboundQueue { + type Ticket = (); + + type Balance = u128; + + fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { + Err(SendError::MessageTooLarge) + } + + fn deliver(_: Self::Ticket) -> Result { + Err(SendError::MessageTooLarge) + } + } + + impl SendMessageFeeProvider for MockErrOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } + } + + pub struct MockTokenIdConvert; + impl MaybeEquivalence for MockTokenIdConvert { + fn convert(_id: &TokenId) -> Option { + Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) + } + fn convert_back(_loc: &Location) -> Option { + None + } + } + + #[test] + fn exporter_validate_with_unknown_network_yields_not_applicable() { + let network = Ethereum { chain_id: 1337 }; + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_with_invalid_destination_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_with_x8_destination_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Some( + [ + OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, + OnlyChild, + ] + .into(), + ); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_without_universal_source_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_without_global_universal_location_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_without_global_bridge_location_yields_not_applicable() { + let network = NonBridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_with_remote_universal_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Kusama), Parachain(1000)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_without_para_id_in_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Some(GlobalConsensus(Polkadot).into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_complex_para_id_in_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(12)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_without_xcm_message_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); + } + + #[test] + fn exporter_validate_with_max_target_fee_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; + let fees: Assets = vec![fee.clone()].into(); + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(fees), + BuyExecution { fees: fee.clone(), weight_limit: Unlimited }, + ExpectAsset(fee.into()), + WithdrawAsset(assets), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: Some(network), key: beneficiary_address } + .into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_with_unparsable_xcm_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let channel: u32 = 0; + let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; + let fees: Assets = vec![fee.clone()].into(); + + let mut message: Option> = Some( + vec![WithdrawAsset(fees), BuyExecution { fees: fee, weight_limit: Unlimited }].into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + } + + #[test] + fn exporter_validate_xcm_success_case_1() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee.clone() }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert!(result.is_ok()); + } + + #[test] + fn exporter_deliver_with_submit_failure_yields_unroutable() { + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockErrOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); + assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) + } + + #[test] + fn exporter_validate_with_invalid_dest_does_not_alter_destination() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Parachain(1000).into(); + + let universal_source: InteriorLocation = + [GlobalConsensus(Polkadot), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate( + network, + channel, + &mut universal_source_wrapper, + &mut dest_wrapper, + &mut msg_wrapper, + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); + } + + #[test] + fn exporter_validate_with_invalid_universal_source_does_not_alter_universal_source() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Here.into(); + + let universal_source: InteriorLocation = + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate( + network, + channel, + &mut universal_source_wrapper, + &mut dest_wrapper, + &mut msg_wrapper, + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); + } +} From af928badbe5916376de1896ab64edff6ff3a7e92 Mon Sep 17 00:00:00 2001 From: ron Date: Tue, 19 Nov 2024 21:15:08 +0800 Subject: [PATCH 15/68] Support multiple commands in one message --- .../router/src/outbound/v2/convert.rs | 255 +++++++----------- .../primitives/router/src/outbound/v2/mod.rs | 4 +- .../src/tests/snowbridge_v2.rs | 156 +++++++++++ 3 files changed, 252 insertions(+), 163 deletions(-) diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs index a6a0de1bd75e..9e4766e67c8d 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs @@ -48,7 +48,6 @@ macro_rules! match_expression { pub struct XcmConverter<'a, ConvertAssetId, Call> { iter: Peekable>>, - message: Vec>, ethereum_network: NetworkId, agent_id: AgentId, _marker: PhantomData, @@ -59,7 +58,6 @@ where { pub fn new(message: &'a Xcm, ethereum_network: NetworkId, agent_id: AgentId) -> Self { Self { - message: message.clone().inner().into(), iter: message.inner().iter().peekable(), ethereum_network, agent_id, @@ -68,115 +66,18 @@ where } pub fn convert(&mut self) -> Result { - let result = match self.jump_to() { - // PNA - Ok(ReserveAssetDeposited { .. }) => self.send_native_tokens_message(), - // ENA - Ok(WithdrawAsset { .. }) => self.send_tokens_message(), - Err(e) => Err(e), - _ => return Err(XcmConverterError::UnexpectedInstruction), - }?; - - // All xcm instructions must be consumed before exit. - if self.next().is_ok() { - return Err(XcmConverterError::EndOfXcmMessageExpected) - } - + let result = self.to_ethereum_message()?; Ok(result) } - /// Convert the xcm for Ethereum-native token from AH into the Message which will be executed - /// on Ethereum Gateway contract, we expect an input of the form: - /// # WithdrawAsset(WETH_FEE) - /// # PayFees(WETH_FEE) - /// # WithdrawAsset(ENA) - /// # AliasOrigin(Origin) - /// # DepositAsset(ENA) - /// # SetTopic - fn send_tokens_message(&mut self) -> Result { - use XcmConverterError::*; - - // Get fee amount - let fee_amount = self.extract_remote_fee()?; - - // Get the reserve assets from WithdrawAsset. - let reserve_assets = - match_expression!(self.next()?, WithdrawAsset(reserve_assets), reserve_assets) - .ok_or(WithdrawAssetExpected)?; - - // Check AliasOrigin. - let origin_loc = match_expression!(self.next()?, AliasOrigin(origin), origin) - .ok_or(AliasOriginExpected)?; - let origin = LocationIdOf::convert_location(&origin_loc).ok_or(InvalidOrigin)?; - - let (deposit_assets, beneficiary) = match_expression!( - self.next()?, - DepositAsset { assets, beneficiary }, - (assets, beneficiary) - ) - .ok_or(DepositAssetExpected)?; - - // assert that the beneficiary is AccountKey20. - let recipient = match_expression!( - beneficiary.unpack(), - (0, [AccountKey20 { network, key }]) - if self.network_matches(network), - H160(*key) - ) - .ok_or(BeneficiaryResolutionFailed)?; - - // Make sure there are reserved assets. - if reserve_assets.len() == 0 { - return Err(NoReserveAssets) - } - - // Check the the deposit asset filter matches what was reserved. - if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { - return Err(FilterDoesNotConsumeAllAssets) - } - - // We only support a single asset at a time. - ensure!(reserve_assets.len() == 1, TooManyAssets); - let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; - - // only fungible asset is allowed - let (token, amount) = match reserve_asset { - Asset { id: AssetId(inner_location), fun: Fungible(amount) } => - match inner_location.unpack() { - (0, [AccountKey20 { network, key }]) if self.network_matches(network) => - Some((H160(*key), *amount)), - _ => None, - }, - _ => None, - } - .ok_or(AssetResolutionFailed)?; - - // transfer amount must be greater than 0. - ensure!(amount > 0, ZeroAssetTransfer); - - // ensure SetTopic exists - let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; - - let message = Message { - id: (*topic_id).into(), - origin, - fee: fee_amount, - commands: BoundedVec::try_from(vec![Command::UnlockNativeToken { - agent_id: self.agent_id, - token, - recipient, - amount, - }]) - .map_err(|_| TooManyCommands)?, - }; - - Ok(message) - } - fn next(&mut self) -> Result<&'a Instruction, XcmConverterError> { self.iter.next().ok_or(XcmConverterError::UnexpectedEndOfXcm) } + fn peek(&mut self) -> Result<&&'a Instruction, XcmConverterError> { + self.iter.peek().ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + fn network_matches(&self, network: &Option) -> bool { if let Some(network) = network { *network == self.ethereum_network @@ -185,31 +86,58 @@ where } } - /// Convert the xcm for Polkadot-native token from AH into the Message which will be executed + /// Extract the fee asset item from PayFees(V5) + fn extract_remote_fee(&mut self) -> Result { + use XcmConverterError::*; + let _ = match_expression!(self.next()?, WithdrawAsset(fee), fee) + .ok_or(WithdrawAssetExpected)?; + let fee_asset = + match_expression!(self.next()?, PayFees { asset: fee }, fee).ok_or(InvalidFeeAsset)?; + // Todo: Validate fee asset is WETH + let fee_amount = match fee_asset { + Asset { id: _, fun: Fungible(amount) } => Some(*amount), + _ => None, + } + .ok_or(AssetResolutionFailed)?; + Ok(fee_amount) + } + + /// Convert the xcm for into the Message which will be executed /// on Ethereum Gateway contract, we expect an input of the form: /// # WithdrawAsset(WETH) /// # PayFees(WETH) - /// # ReserveAssetDeposited(PNA) + /// # ReserveAssetDeposited(PNA) | WithdrawAsset(ENA) /// # AliasOrigin(Origin) - /// # DepositAsset(PNA) + /// # DepositAsset(PNA|ENA) /// # SetTopic - fn send_native_tokens_message(&mut self) -> Result { + fn to_ethereum_message(&mut self) -> Result { use XcmConverterError::*; // Get fee amount let fee_amount = self.extract_remote_fee()?; - // Get the reserve assets. - let reserve_assets = - match_expression!(self.next()?, ReserveAssetDeposited(reserve_assets), reserve_assets) - .ok_or(ReserveAssetDepositedExpected)?; + // Get ENA reserve asset from WithdrawAsset. + let enas = + match_expression!(self.peek(), Ok(WithdrawAsset(reserve_assets)), reserve_assets); + if enas.is_some() { + let _ = self.next(); + } + // Get PNA reserve asset from ReserveAssetDeposited + let pnas = match_expression!( + self.peek(), + Ok(ReserveAssetDeposited(reserve_assets)), + reserve_assets + ); + if pnas.is_some() { + let _ = self.next(); + } // Check AliasOrigin. let origin_loc = match_expression!(self.next()?, AliasOrigin(origin), origin) .ok_or(AliasOriginExpected)?; let origin = LocationIdOf::convert_location(&origin_loc).ok_or(InvalidOrigin)?; - let (deposit_assets, beneficiary) = match_expression!( + let (_, beneficiary) = match_expression!( self.next()?, DepositAsset { assets, beneficiary }, (assets, beneficiary) @@ -226,73 +154,76 @@ where .ok_or(BeneficiaryResolutionFailed)?; // Make sure there are reserved assets. - if reserve_assets.len() == 0 { + if enas.is_none() && pnas.is_none() { return Err(NoReserveAssets) } - // Check the the deposit asset filter matches what was reserved. - if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { - return Err(FilterDoesNotConsumeAllAssets) + let mut commands: Vec = Vec::new(); + + if let Some(enas) = enas { + for ena in enas.clone().inner().iter() { + // only fungible asset is allowed + let (token, amount) = match ena { + Asset { id: AssetId(inner_location), fun: Fungible(amount) } => + match inner_location.unpack() { + (0, [AccountKey20 { network, key }]) + if self.network_matches(network) => + Some((H160(*key), *amount)), + _ => None, + }, + _ => None, + } + .ok_or(AssetResolutionFailed)?; + + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); + + commands.push(Command::UnlockNativeToken { + agent_id: self.agent_id, + token, + recipient, + amount, + }); + } } - // We only support a single asset at a time. - ensure!(reserve_assets.len() == 1, TooManyAssets); - let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; + if let Some(pnas) = pnas { + for pna in pnas.clone().inner().iter() { + let (asset_id, amount) = match pna { + Asset { id: AssetId(inner_location), fun: Fungible(amount) } => + Some((inner_location.clone(), *amount)), + _ => None, + } + .ok_or(AssetResolutionFailed)?; - // only fungible asset is allowed - let (asset_id, amount) = match reserve_asset { - Asset { id: AssetId(inner_location), fun: Fungible(amount) } => - Some((inner_location.clone(), *amount)), - _ => None, - } - .ok_or(AssetResolutionFailed)?; + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); - // transfer amount must be greater than 0. - ensure!(amount > 0, ZeroAssetTransfer); + // Ensure PNA already registered + let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; + let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; + ensure!(asset_id == expected_asset_id, InvalidAsset); - // Ensure PNA already registered - let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; - let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; - ensure!(asset_id == expected_asset_id, InvalidAsset); + commands.push(Command::MintForeignToken { token_id, recipient, amount }); + } + } // ensure SetTopic exists let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; let message = Message { + id: (*topic_id).into(), origin, fee: fee_amount, - id: (*topic_id).into(), - commands: BoundedVec::try_from(vec![Command::MintForeignToken { - token_id, - recipient, - amount, - }]) - .map_err(|_| TooManyCommands)?, + commands: BoundedVec::try_from(commands).map_err(|_| TooManyCommands)?, }; - Ok(message) - } - - /// Skip fee instructions and jump to the primary asset instruction - fn jump_to(&mut self) -> Result<&Instruction, XcmConverterError> { - ensure!(self.message.len() > 3, XcmConverterError::UnexpectedEndOfXcm); - self.message.get(2).ok_or(XcmConverterError::UnexpectedEndOfXcm) - } - - /// Extract the fee asset item from PayFees(V5) - fn extract_remote_fee(&mut self) -> Result { - use XcmConverterError::*; - let _ = match_expression!(self.next()?, WithdrawAsset(fee), fee) - .ok_or(WithdrawAssetExpected)?; - let fee_asset = - match_expression!(self.next()?, PayFees { asset: fee }, fee).ok_or(InvalidFeeAsset)?; - // Todo: Validate fee asset is WETH - let fee_amount = match fee_asset { - Asset { id: _, fun: Fungible(amount) } => Some(*amount), - _ => None, + // All xcm instructions must be consumed before exit. + if self.next().is_ok() { + return Err(EndOfXcmMessageExpected) } - .ok_or(AssetResolutionFailed)?; - Ok(fee_amount) + + Ok(message) } } diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs index 292fede50fe0..939a3090564e 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs @@ -18,7 +18,7 @@ use xcm::prelude::*; use xcm_builder::{CreateMatcher, ExporterFor, MatchXcm}; use xcm_executor::traits::{ConvertLocation, ExportXcm}; -const TARGET: &'static str = "xcm::ethereum_blob_exporter::v2"; +pub const TARGET: &'static str = "xcm::ethereum_blob_exporter::v2"; pub struct EthereumBlobExporter< UniversalLocation, @@ -61,6 +61,8 @@ where destination: &mut Option, message: &mut Option>, ) -> SendResult { + log::debug!(target: TARGET, "message route through bridge {message:?}."); + let expected_network = EthereumNetwork::get(); let universal_location = UniversalLocation::get(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs index 8ded64c512ec..65b887ebcdc0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs @@ -312,3 +312,159 @@ fn transfer_relay_token() { ); }); } + +#[test] +fn send_weth_and_dot_from_asset_hub_to_ethereum() { + let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); + let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); + let weth_asset_location: Location = + (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + + BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); + + AssetHubWestend::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + + // Register WETH on AH + assert_ok!(::ForeignAssets::force_create( + RuntimeOrigin::root(), + weth_asset_location.clone().try_into().unwrap(), + assethub_sovereign.clone().into(), + false, + 1, + )); + + assert!(::ForeignAssets::asset_exists( + weth_asset_location.clone().try_into().unwrap(), + )); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + // Register WND on BH + assert_ok!(::Balances::force_set_balance( + RuntimeOrigin::root(), + MultiAddress::Id(BridgeHubWestendSender::get()), + INITIAL_FUND * 10, + )); + assert_ok!(::EthereumSystem::register_token( + RuntimeOrigin::root(), + Box::new(VersionedLocation::from(Location::parent())), + AssetMetadata { + name: "wnd".as_bytes().to_vec().try_into().unwrap(), + symbol: "wnd".as_bytes().to_vec().try_into().unwrap(), + decimals: 12, + }, + )); + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::RegisterToken { .. }) => {},] + ); + + // Transfer some WETH to AH + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::SendToken { + token: WETH.into(), + destination: Destination::AccountId32 { id: AssetHubWestendReceiver::get().into() }, + amount: TOKEN_AMOUNT, + fee: XCM_FEE, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert([0; 32].into(), message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()).unwrap(); + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) =>{},] + ); + }); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + // Check that AssetHub has issued the foreign asset + assert_expected_events!( + AssetHubWestend, + vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},] + ); + + // Local fee amount(in DOT) should cover + // 1. execution cost on AH + // 2. delivery cost to BH + // 3. execution cost on BH + let local_fee_amount = 200_000_000_000; + // Remote fee amount(in WETH) should cover execution cost on Ethereum + let remote_fee_amount = 4_000_000_000; + + let local_fee_asset = + Asset { id: AssetId(Location::parent()), fun: Fungible(local_fee_amount) }; + let remote_fee_asset = + Asset { id: AssetId(weth_asset_location.clone()), fun: Fungible(remote_fee_amount) }; + let reserve_asset = Asset { + id: AssetId(weth_asset_location.clone()), + fun: Fungible(TOKEN_AMOUNT - remote_fee_amount), + }; + + let weth_asset = + Asset { id: weth_asset_location.clone().into(), fun: Fungible(TOKEN_AMOUNT) }; + let dot_asset = Asset { id: AssetId(Location::parent()), fun: Fungible(TOKEN_AMOUNT) }; + + let assets = vec![weth_asset, dot_asset.clone(), local_fee_asset.clone()]; + let destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); + + let beneficiary = Location::new( + 0, + [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], + ); + + let xcm_on_bh = Xcm(vec![DepositAsset { assets: Wild(All), beneficiary }]); + + let xcms = VersionedXcm::from(Xcm(vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: local_fee_asset.clone() }, + InitiateTransfer { + destination, + remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( + remote_fee_asset.clone().into(), + ))), + preserve_origin: true, + assets: vec![ + AssetTransferFilter::ReserveWithdraw(Definite(reserve_asset.clone().into())), + AssetTransferFilter::ReserveDeposit(Definite(dot_asset.into())), + ], + remote_xcm: xcm_on_bh, + }, + ])); + + // Send the Weth back to Ethereum + ::PolkadotXcm::execute( + RuntimeOrigin::signed(AssetHubWestendReceiver::get()), + bx!(xcms), + Weight::from(8_000_000_000), + ) + .unwrap(); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + // Check that the transfer token back to Ethereum message was queue in the Ethereum + // Outbound Queue + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] + ); + let events = BridgeHubWestend::events(); + // Check that the remote fee was credited to the AssetHub sovereign account + assert!( + events.iter().any(|event| matches!( + event, + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) + if *who == assethub_sovereign + )), + "AssetHub sovereign takes remote fee." + ); + }); +} From d5ab77ba2f72798a8e1072f96e361a745edd0c8d Mon Sep 17 00:00:00 2001 From: ron Date: Tue, 19 Nov 2024 21:23:55 +0800 Subject: [PATCH 16/68] Rename to InvalidPendingNonce & Cleanup --- bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index 85745101e2f5..a400193f3d27 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -185,7 +185,7 @@ pub mod pallet { /// Invalid Gateway InvalidGateway, /// No pending nonce - PendingNonceNotExist, + InvalidPendingNonce, } /// Messages to be committed in the current block. This storage value is killed in @@ -274,9 +274,8 @@ pub mod pallet { ensure!(T::GatewayAddress::get() == envelope.gateway, Error::::InvalidGateway); let nonce = envelope.nonce; - ensure!(>::contains_key(nonce), Error::::PendingNonceNotExist); - let order = >::get(nonce).ok_or(Error::::PendingNonceNotExist)?; + let order = >::get(nonce).ok_or(Error::::InvalidPendingNonce)?; // No fee for governance order if !order.fee.is_zero() { From f96a6fc26b144e4f640d53c8c9413321c8d929e1 Mon Sep 17 00:00:00 2001 From: ron Date: Tue, 19 Nov 2024 21:25:29 +0800 Subject: [PATCH 17/68] Improve comment --- bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index a400193f3d27..ceeb13bfe41f 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -184,7 +184,7 @@ pub mod pallet { Verification(VerificationError), /// Invalid Gateway InvalidGateway, - /// No pending nonce + /// Pending nonce does not exist InvalidPendingNonce, } From d4910ea98410a5950778b9920fb77b9d619e20e9 Mon Sep 17 00:00:00 2001 From: ron Date: Wed, 20 Nov 2024 09:58:59 +0800 Subject: [PATCH 18/68] Fix breaking tests --- .../router/src/outbound/v2/convert.rs | 22 ++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs index 9e4766e67c8d..65f8063686a0 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs @@ -137,7 +137,7 @@ where .ok_or(AliasOriginExpected)?; let origin = LocationIdOf::convert_location(&origin_loc).ok_or(InvalidOrigin)?; - let (_, beneficiary) = match_expression!( + let (deposit_assets, beneficiary) = match_expression!( self.next()?, DepositAsset { assets, beneficiary }, (assets, beneficiary) @@ -161,7 +161,13 @@ where let mut commands: Vec = Vec::new(); if let Some(enas) = enas { + ensure!(enas.len() > 0, NoReserveAssets); for ena in enas.clone().inner().iter() { + // Check the the deposit asset filter matches what was reserved. + if !deposit_assets.matches(ena) { + return Err(FilterDoesNotConsumeAllAssets) + } + // only fungible asset is allowed let (token, amount) = match ena { Asset { id: AssetId(inner_location), fun: Fungible(amount) } => @@ -188,7 +194,14 @@ where } if let Some(pnas) = pnas { + ensure!(pnas.len() > 0, NoReserveAssets); for pna in pnas.clone().inner().iter() { + // Check the the deposit asset filter matches what was reserved. + if !deposit_assets.matches(pna) { + return Err(FilterDoesNotConsumeAllAssets) + } + + // Only fungible is allowed let (asset_id, amount) = match pna { Asset { id: AssetId(inner_location), fun: Fungible(amount) } => Some((inner_location.clone(), *amount)), @@ -231,7 +244,6 @@ where mod tests { use super::*; use crate::outbound::v2::tests::{BridgedNetwork, MockTokenIdConvert, NonBridgedNetwork}; - use frame_support::parameter_types; use hex_literal::hex; use snowbridge_core::AgentIdOf; use sp_std::default::Default; @@ -493,7 +505,7 @@ mod tests { XcmConverter::::new(&message, network, Default::default()); let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::UnexpectedInstruction)); + assert_eq!(result.err(), Some(XcmConverterError::WithdrawAssetExpected)); } #[test] @@ -559,7 +571,7 @@ mod tests { } #[test] - fn xcm_converter_convert_with_two_assets_yields_too_many_assets() { + fn xcm_converter_convert_with_two_assets_yields() { let network = BridgedNetwork::get(); let token_address_1: [u8; 20] = hex!("1000000000000000000000000000000000000000"); @@ -595,7 +607,7 @@ mod tests { XcmConverter::::new(&message, network, Default::default()); let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::TooManyAssets)); + assert_eq!(result.is_ok(), true); } #[test] From 8e803155c65b256fd5de0c87ff99c674e7b08b2a Mon Sep 17 00:00:00 2001 From: ron Date: Wed, 20 Nov 2024 21:43:31 +0800 Subject: [PATCH 19/68] Remove Inbound-queue V2 completely --- Cargo.lock | 41 -- Cargo.toml | 3 - .../pallets/inbound-queue-v2/Cargo.toml | 93 ----- .../pallets/inbound-queue-v2/README.md | 3 - .../inbound-queue-v2/fixtures/Cargo.toml | 34 -- .../inbound-queue-v2/fixtures/src/lib.rs | 7 - .../fixtures/src/register_token.rs | 97 ----- .../fixtures/src/send_token.rs | 95 ----- .../fixtures/src/send_token_to_penpal.rs | 95 ----- .../inbound-queue-v2/src/benchmarking/mod.rs | 53 --- .../pallets/inbound-queue-v2/src/envelope.rs | 50 --- .../pallets/inbound-queue-v2/src/lib.rs | 378 ------------------ .../pallets/inbound-queue-v2/src/mock.rs | 362 ----------------- .../pallets/inbound-queue-v2/src/test.rs | 245 ------------ .../pallets/inbound-queue-v2/src/weights.rs | 31 -- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 4 - .../src/bridge_to_ethereum_config.rs | 39 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 - .../bridge-hub-westend/src/weights/mod.rs | 1 - .../snowbridge_pallet_inbound_queue_v2.rs | 69 ---- 20 files changed, 1 insertion(+), 1701 deletions(-) delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/Cargo.toml delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/README.md delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/fixtures/Cargo.toml delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/lib.rs delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/register_token.rs delete mode 100755 bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token.rs delete mode 100755 bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token_to_penpal.rs delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/benchmarking/mod.rs delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/envelope.rs delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/lib.rs delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/test.rs delete mode 100644 bridges/snowbridge/pallets/inbound-queue-v2/src/weights.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_inbound_queue_v2.rs diff --git a/Cargo.lock b/Cargo.lock index 8881b6e66d0a..9256c98a3763 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2830,7 +2830,6 @@ dependencies = [ "snowbridge-outbound-queue-runtime-api-v2", "snowbridge-pallet-ethereum-client 0.2.0", "snowbridge-pallet-inbound-queue 0.2.0", - "snowbridge-pallet-inbound-queue-v2", "snowbridge-pallet-outbound-queue 0.2.0", "snowbridge-pallet-outbound-queue-v2", "snowbridge-pallet-system 0.2.0", @@ -25031,46 +25030,6 @@ dependencies = [ "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "snowbridge-pallet-inbound-queue-fixtures-v2" -version = "0.10.0" -dependencies = [ - "hex-literal", - "snowbridge-beacon-primitives 0.2.0", - "snowbridge-core 0.2.0", - "sp-core 28.0.0", - "sp-std 14.0.0", -] - -[[package]] -name = "snowbridge-pallet-inbound-queue-v2" -version = "0.2.0" -dependencies = [ - "alloy-primitives", - "alloy-sol-types", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "hex-literal", - "log", - "pallet-balances 28.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "snowbridge-beacon-primitives 0.2.0", - "snowbridge-core 0.2.0", - "snowbridge-pallet-ethereum-client 0.2.0", - "snowbridge-pallet-inbound-queue-fixtures-v2", - "snowbridge-router-primitives 0.9.0", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-runtime 31.0.1", - "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", -] - [[package]] name = "snowbridge-pallet-outbound-queue" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 6ca014833691..bce62ba0c72d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,8 +49,6 @@ members = [ "bridges/snowbridge/pallets/ethereum-client", "bridges/snowbridge/pallets/ethereum-client/fixtures", "bridges/snowbridge/pallets/inbound-queue", - "bridges/snowbridge/pallets/inbound-queue-v2", - "bridges/snowbridge/pallets/inbound-queue-v2/fixtures", "bridges/snowbridge/pallets/inbound-queue/fixtures", "bridges/snowbridge/pallets/outbound-queue", "bridges/snowbridge/pallets/outbound-queue-v2", @@ -1234,7 +1232,6 @@ snowbridge-pallet-ethereum-client-fixtures = { path = "bridges/snowbridge/pallet snowbridge-pallet-inbound-queue = { path = "bridges/snowbridge/pallets/inbound-queue", default-features = false } snowbridge-pallet-inbound-queue-fixtures = { path = "bridges/snowbridge/pallets/inbound-queue/fixtures", default-features = false } snowbridge-pallet-inbound-queue-fixtures-v2 = { path = "bridges/snowbridge/pallets/inbound-queue-v2/fixtures", default-features = false } -snowbridge-pallet-inbound-queue-v2 = { path = "bridges/snowbridge/pallets/inbound-queue-v2", default-features = false } snowbridge-pallet-outbound-queue = { path = "bridges/snowbridge/pallets/outbound-queue", default-features = false } snowbridge-pallet-outbound-queue-v2 = { path = "bridges/snowbridge/pallets/outbound-queue-v2", default-features = false } snowbridge-pallet-system = { path = "bridges/snowbridge/pallets/system", default-features = false } diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue-v2/Cargo.toml deleted file mode 100644 index d212b18d2d54..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/Cargo.toml +++ /dev/null @@ -1,93 +0,0 @@ -[package] -name = "snowbridge-pallet-inbound-queue-v2" -description = "Snowbridge Inbound Queue Pallet V2" -version = "0.2.0" -authors = ["Snowfork "] -edition.workspace = true -repository.workspace = true -license = "Apache-2.0" -categories = ["cryptography::cryptocurrencies"] - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -serde = { optional = true, workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } -hex-literal = { optional = true, workspace = true, default-features = true } -log = { workspace = true } -alloy-primitives = { features = ["rlp"], workspace = true } -alloy-sol-types = { workspace = true } - -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -pallet-balances = { workspace = true } -sp-core = { workspace = true } -sp-std = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } - -xcm = { workspace = true } -xcm-executor = { workspace = true } - -snowbridge-core = { workspace = true } -snowbridge-router-primitives = { workspace = true } -snowbridge-beacon-primitives = { workspace = true } -snowbridge-pallet-inbound-queue-fixtures-v2 = { optional = true, workspace = true } - -[dev-dependencies] -frame-benchmarking = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -snowbridge-pallet-ethereum-client = { workspace = true, default-features = true } -hex-literal = { workspace = true, default-features = true } - -[features] -default = ["std"] -std = [ - "alloy-primitives/std", - "alloy-sol-types/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-balances/std", - "scale-info/std", - "serde", - "snowbridge-beacon-primitives/std", - "snowbridge-core/std", - "snowbridge-pallet-inbound-queue-fixtures-v2?/std", - "snowbridge-router-primitives/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "xcm-executor/std", - "xcm/std", -] -runtime-benchmarks = [ - "frame-benchmarking", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "hex-literal", - "pallet-balances/runtime-benchmarks", - "snowbridge-core/runtime-benchmarks", - "snowbridge-pallet-ethereum-client/runtime-benchmarks", - "snowbridge-pallet-inbound-queue-fixtures-v2/runtime-benchmarks", - "snowbridge-router-primitives/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "pallet-balances/try-runtime", - "snowbridge-pallet-ethereum-client/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/README.md b/bridges/snowbridge/pallets/inbound-queue-v2/README.md deleted file mode 100644 index cc2f7c636e68..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Ethereum Inbound Queue - -Reads messages from Ethereum and sends it to intended destination on Polkadot, using XCM. diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/Cargo.toml deleted file mode 100644 index ea30fdddb553..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "snowbridge-pallet-inbound-queue-fixtures-v2" -description = "Snowbridge Inbound Queue Test Fixtures V2" -version = "0.10.0" -authors = ["Snowfork "] -edition.workspace = true -repository.workspace = true -license = "Apache-2.0" -categories = ["cryptography::cryptocurrencies"] - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -hex-literal = { workspace = true, default-features = true } -sp-core = { workspace = true } -sp-std = { workspace = true } -snowbridge-core = { workspace = true } -snowbridge-beacon-primitives = { workspace = true } - -[features] -default = ["std"] -std = [ - "snowbridge-beacon-primitives/std", - "snowbridge-core/std", - "sp-core/std", - "sp-std/std", -] -runtime-benchmarks = [ - "snowbridge-core/runtime-benchmarks", -] diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/lib.rs deleted file mode 100644 index 00adcdfa186a..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -#![cfg_attr(not(feature = "std"), no_std)] - -pub mod register_token; -pub mod send_token; -pub mod send_token_to_penpal; diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/register_token.rs b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/register_token.rs deleted file mode 100644 index 340b2fadfacf..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/register_token.rs +++ /dev/null @@ -1,97 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -// Generated, do not edit! -// See ethereum client README.md for instructions to generate - -use hex_literal::hex; -use snowbridge_beacon_primitives::{ - types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, -}; -use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; -use sp_core::U256; -use sp_std::vec; - -pub fn make_register_token_message() -> InboundQueueFixture { - InboundQueueFixture { - message: Message { - event_log: Log { - address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), - topics: vec![ - hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), - hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), - ], - data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), - }, - proof: Proof { - receipt_proof: (vec![ - hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").to_vec(), - hex!("4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f").to_vec(), - ], vec![ - hex!("f851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080").to_vec(), - hex!("f9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), - ]), - execution_proof: ExecutionProof { - header: BeaconHeader { - slot: 393, - proposer_index: 4, - parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), - state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), - body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), - }, - ancestry_proof: Some(AncestryProof { - header_branch: vec![ - hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), - hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), - hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), - hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), - hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), - hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), - hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), - hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), - hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), - hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), - hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), - hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), - hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), - ], - finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), - }), - execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { - parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), - fee_recipient: hex!("0000000000000000000000000000000000000000").into(), - state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), - receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), - logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), - prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), - block_number: 393, - gas_limit: 54492273, - gas_used: 199644, - timestamp: 1710552813, - extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), - base_fee_per_gas: U256::from(7u64), - block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), - transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), - withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), - blob_gas_used: 0, - excess_blob_gas: 0, - }), - execution_branch: vec![ - hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), - hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), - hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), - hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), - ], - } - }, - }, - finalized_header: BeaconHeader { - slot: 864, - proposer_index: 4, - parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), - state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), - body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), - }, - block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), - } -} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token.rs b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token.rs deleted file mode 100755 index 4075febab59d..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token.rs +++ /dev/null @@ -1,95 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -// Generated, do not edit! -// See ethereum client README.md for instructions to generate - -use hex_literal::hex; -use snowbridge_beacon_primitives::{ - types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, -}; -use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; -use sp_core::U256; -use sp_std::vec; - -pub fn make_send_token_message() -> InboundQueueFixture { - InboundQueueFixture { - message: Message { - event_log: Log { - address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), - topics: vec![ - hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), - hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - hex!("c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26f").into(), - ], - data: hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").into(), - }, - proof: Proof { - receipt_proof: (vec![ - hex!("f9d844c5b79638609ba385b910fec3b5d891c9d7b189f135f0432f33473de915").to_vec(), - ], vec![ - hex!("f90451822080b9044b02f90447018301bcb6b9010000800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000f9033cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8c000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").to_vec(), - ]), - execution_proof: ExecutionProof { - header: BeaconHeader { - slot: 2321, - proposer_index: 5, - parent_root: hex!("2add14727840d3a5ea061e14baa47030bb81380a65999200d119e73b86411d20").into(), - state_root: hex!("d962981467920bb2b7efa4a7a1baf64745582c3250857f49a957c5dae9a0da39").into(), - body_root: hex!("18e3f7f51a350f371ad35d166f2683b42af51d1836b295e4093be08acb0dcb7a").into(), - }, - ancestry_proof: Some(AncestryProof { - header_branch: vec![ - hex!("2add14727840d3a5ea061e14baa47030bb81380a65999200d119e73b86411d20").into(), - hex!("48b2e2f5256906a564e5058698f70e3406765fefd6a2edc064bb5fb88aa2ed0a").into(), - hex!("e5ed7c704e845418219b2fda42cd2f3438ffbe4c4b320935ae49439c6189f7a7").into(), - hex!("4a7ce24526b3f571548ad69679e4e260653a1b3b911a344e7f988f25a5c917a7").into(), - hex!("46fc859727ab0d0e8c344011f7d7a4426ccb537bb51363397e56cc7153f56391").into(), - hex!("f496b6f85a7c6c28a9048f2153550a7c5bcb4b23844ed3b87f6baa646124d8a3").into(), - hex!("7318644e474beb46e595a1875acc7444b937f5208065241911d2a71ac50c2de3").into(), - hex!("5cf48519e518ac64286aef5391319782dd38831d5dcc960578a6b9746d5f8cee").into(), - hex!("efb3e50fa39ca9fe7f76adbfa36fa8451ec2fd5d07b22aaf822137c04cf95a76").into(), - hex!("2206cd50750355ffaef4a67634c21168f2b564c58ffd04f33b0dc7af7dab3291").into(), - hex!("1a4014f6c4fcce9949fba74cb0f9e88df086706f9e05560cc9f0926f8c90e373").into(), - hex!("2df7cc0bcf3060be4132c63da7599c2600d9bbadf37ab001f15629bc2255698e").into(), - hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), - ], - finalized_block_root: hex!("f869dd1c9598043008a3ac2a5d91b3d6c7b0bb3295b3843bc84c083d70b0e604").into(), - }), - execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { - parent_hash: hex!("5d7859883dde1eba6c98b20eac18426134b25da2a89e5e360f3343b15e0e0a31").into(), - fee_recipient: hex!("0000000000000000000000000000000000000000").into(), - state_root: hex!("f8fbebed4c84d46231bd293bb9fbc9340d5c28c284d99fdaddb77238b8960ae2").into(), - receipts_root: hex!("f9d844c5b79638609ba385b910fec3b5d891c9d7b189f135f0432f33473de915").into(), - logs_bloom: hex!("00800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000").into(), - prev_randao: hex!("15533eeb366c6386bea5aeb8f425871928348c092209e4377f2418a6dedd7fd0").into(), - block_number: 2321, - gas_limit: 30000000, - gas_used: 113846, - timestamp: 1710554741, - extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), - base_fee_per_gas: U256::from(7u64), - block_hash: hex!("585a07122a30339b03b6481eae67c2d3de2b6b64f9f426230986519bf0f1bdfe").into(), - transactions_root: hex!("09cd60ee2207d804397c81f7b7e1e5d3307712b136e5376623a80317a4bdcd7a").into(), - withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), - blob_gas_used: 0, - excess_blob_gas: 0, - }), - execution_branch: vec![ - hex!("9d419471a9a4719b40e7607781fbe32d9a7766b79805505c78c0c58133496ba2").into(), - hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), - hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), - hex!("bee375b8f1bbe4cd0e783c78026c1829ae72741c2dead5cab05d6834c5e5df65").into(), - ], - } - }, - }, - finalized_header: BeaconHeader { - slot: 4032, - proposer_index: 5, - parent_root: hex!("180aaaec59d38c3860e8af203f01f41c9bc41665f4d17916567c80f6cd23e8a2").into(), - state_root: hex!("3341790429ed3bf894cafa3004351d0b99e08baf6c38eb2a54d58e69fd2d19c6").into(), - body_root: hex!("a221e0c695ac7b7d04ce39b28b954d8a682ecd57961d81b44783527c6295f455").into(), - }, - block_roots_root: hex!("5744385ef06f82e67606f49aa29cd162f2e837a68fb7bd82f1fc6155d9f8640f").into(), - } -} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token_to_penpal.rs b/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token_to_penpal.rs deleted file mode 100755 index 6a951b568ae5..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/fixtures/src/send_token_to_penpal.rs +++ /dev/null @@ -1,95 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -// Generated, do not edit! -// See ethereum client README.md for instructions to generate - -use hex_literal::hex; -use snowbridge_beacon_primitives::{ - types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, -}; -use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; -use sp_core::U256; -use sp_std::vec; - -pub fn make_send_token_to_penpal_message() -> InboundQueueFixture { - InboundQueueFixture { - message: Message { - event_log: Log { - address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), - topics: vec![ - hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), - hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - hex!("be323bced46a1a49c8da2ab62ad5e974fd50f1dabaeed70b23ca5bcf14bfe4aa").into(), - ], - data: hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").into(), - }, - proof: Proof { - receipt_proof: (vec![ - hex!("106f1eaeac04e469da0020ad5c8a72af66323638bd3f561a3c8236063202c120").to_vec(), - ], vec![ - hex!("f90471822080b9046b02f904670183017d9cb9010000800000000000008000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000001008000000000000000000000001000008000040000000000000000000000000008000080000000000200000000000000000000000000100000000000000000010000000000000020000000000000000000000000000003000000000080018000000000000000000040004000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000f9035cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000007d0b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000201cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07cf9015c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0be323bced46a1a49c8da2ab62ad5e974fd50f1dabaeed70b23ca5bcf14bfe4aab8e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").to_vec(), - ]), - execution_proof: ExecutionProof { - header: BeaconHeader { - slot: 4235, - proposer_index: 4, - parent_root: hex!("1b31e6264c19bcad120e434e0aede892e7d7c8ed80ab505cb593d9a4a16bc566").into(), - state_root: hex!("725f51771a0ecf72c647a283ab814ca088f998eb8c203181496b0b8e01f624fa").into(), - body_root: hex!("6f1c326d192e7e97e21e27b16fd7f000b8fa09b435ff028849927e382302b0ce").into(), - }, - ancestry_proof: Some(AncestryProof { - header_branch: vec![ - hex!("1b31e6264c19bcad120e434e0aede892e7d7c8ed80ab505cb593d9a4a16bc566").into(), - hex!("335eb186c077fa7053ec96dcc5d34502c997713d2d5bc4eb74842118d8cd5a64").into(), - hex!("326607faf2a7dfc9cfc4b6895f8f3d92a659552deb2c8fd1e892ec00c86c734c").into(), - hex!("4e20002125d7b6504df7c774f3f48e018e1e6762d03489149670a8335bba1425").into(), - hex!("e76af5cd61aade5aec8282b6f1df9046efa756b0466bba5e49032410f7739a1b").into(), - hex!("ee4dcd9527712116380cddafd120484a3bedf867225bbb86850b84decf6da730").into(), - hex!("e4687a07421d3150439a2cd2f09f3b468145d75b359a2e5fa88dfbec51725b15").into(), - hex!("38eaa78978e95759aa9b6f8504a8dbe36151f20ae41907e6a1ea165700ceefcd").into(), - hex!("1c1b071ec6f13e15c47d07d1bfbcc9135d6a6c819e68e7e6078a2007418c1a23").into(), - hex!("0b3ad7ad193c691c8c4ba1606ad2a90482cd1d033c7db58cfe739d0e20431e9e").into(), - hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), - hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), - hex!("b2ffec5f2c14640305dd941330f09216c53b99d198e93735a400a6d3a4de191f").into(), - ], - finalized_block_root: hex!("08be7a59e947f08cd95c4ef470758730bf9e3b0db0824cb663ea541c39b0e65c").into(), - }), - execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { - parent_hash: hex!("5d1186ae041f58785edb2f01248e95832f2e5e5d6c4eb8f7ff2f58980bfc2de9").into(), - fee_recipient: hex!("0000000000000000000000000000000000000000").into(), - state_root: hex!("2a66114d20e93082c8e9b47c8d401a937013487d757c9c2f3123cf43dc1f656d").into(), - receipts_root: hex!("106f1eaeac04e469da0020ad5c8a72af66323638bd3f561a3c8236063202c120").into(), - logs_bloom: hex!("00800000000000008000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000001008000000000000000000000001000008000040000000000000000000000000008000080000000000200000000000000000000000000100000000000000000010000000000000020000000000000000000000000000003000000000080018000000000000000000040004000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000").into(), - prev_randao: hex!("92e063c7e369b74149fdd1d7132ed2f635a19b9d8bff57637b8ee4736576426e").into(), - block_number: 4235, - gas_limit: 30000000, - gas_used: 97692, - timestamp: 1710556655, - extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), - base_fee_per_gas: U256::from(7u64), - block_hash: hex!("ce24fe3047aa20a8f222cd1d04567c12b39455400d681141962c2130e690953f").into(), - transactions_root: hex!("0c8388731de94771777c60d452077065354d90d6e5088db61fc6a134684195cc").into(), - withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), - blob_gas_used: 0, - excess_blob_gas: 0, - }), - execution_branch: vec![ - hex!("99d397fa180078e66cd3a3b77bcb07553052f4e21d447167f3a406f663b14e6a").into(), - hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), - hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), - hex!("53ddf17147819c1abb918178b0230d965d1bc2c0d389f45e91e54cb1d2d468aa").into(), - ], - } - }, - }, - finalized_header: BeaconHeader { - slot: 4672, - proposer_index: 4, - parent_root: hex!("951233bf9f4bddfb2fa8f54e3bd0c7883779ef850e13e076baae3130dd7732db").into(), - state_root: hex!("4d303003b8cb097cbcc14b0f551ee70dac42de2c1cc2f4acfca7058ca9713291").into(), - body_root: hex!("664d13952b6f369bf4cf3af74d067ec33616eb57ed3a8a403fd5bae4fbf737dd").into(), - }, - block_roots_root: hex!("af71048297c070e6539cf3b9b90ae07d86d363454606bc239734629e6b49b983").into(), - } -} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/benchmarking/mod.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/benchmarking/mod.rs deleted file mode 100644 index 52461a8a7fbe..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/src/benchmarking/mod.rs +++ /dev/null @@ -1,53 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -use super::*; - -use crate::Pallet as InboundQueue; -use frame_benchmarking::v2::*; -use frame_support::assert_ok; -use frame_system::RawOrigin; -use snowbridge_pallet_inbound_queue_fixtures_v2::register_token::make_register_token_message; - -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn submit() -> Result<(), BenchmarkError> { - let caller: T::AccountId = whitelisted_caller(); - - let create_message = make_register_token_message(); - - T::Helper::initialize_storage( - create_message.finalized_header, - create_message.block_roots_root, - ); - - let sovereign_account = sibling_sovereign_account::(1000u32.into()); - - let minimum_balance = T::Token::minimum_balance(); - - // So that the receiving account exists - assert_ok!(T::Token::mint_into(&caller, minimum_balance)); - // Fund the sovereign account (parachain sovereign account) so it can transfer a reward - // fee to the caller account - assert_ok!(T::Token::mint_into( - &sovereign_account, - 3_000_000_000_000u128 - .try_into() - .unwrap_or_else(|_| panic!("unable to cast sovereign account balance")), - )); - - #[block] - { - assert_ok!(InboundQueue::::submit( - RawOrigin::Signed(caller.clone()).into(), - create_message.message, - )); - } - - Ok(()) - } - - impl_benchmark_test_suite!(InboundQueue, crate::mock::new_tester(), crate::mock::Test); -} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/envelope.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/envelope.rs deleted file mode 100644 index 31a8992442d8..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/src/envelope.rs +++ /dev/null @@ -1,50 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -use snowbridge_core::{inbound::Log, ChannelId}; - -use sp_core::{RuntimeDebug, H160, H256}; -use sp_std::prelude::*; - -use alloy_primitives::B256; -use alloy_sol_types::{sol, SolEvent}; - -sol! { - event OutboundMessageAccepted(bytes32 indexed channel_id, uint64 nonce, bytes32 indexed message_id, bytes payload); -} - -/// An inbound message that has had its outer envelope decoded. -#[derive(Clone, RuntimeDebug)] -pub struct Envelope { - /// The address of the outbound queue on Ethereum that emitted this message as an event log - pub gateway: H160, - /// The message Channel - pub channel_id: ChannelId, - /// A nonce for enforcing replay protection and ordering. - pub nonce: u64, - /// An id for tracing the message on its route (has no role in bridge consensus) - pub message_id: H256, - /// The inner payload generated from the source application. - pub payload: Vec, -} - -#[derive(Copy, Clone, RuntimeDebug)] -pub struct EnvelopeDecodeError; - -impl TryFrom<&Log> for Envelope { - type Error = EnvelopeDecodeError; - - fn try_from(log: &Log) -> Result { - let topics: Vec = log.topics.iter().map(|x| B256::from_slice(x.as_ref())).collect(); - - let event = OutboundMessageAccepted::decode_log(topics, &log.data, true) - .map_err(|_| EnvelopeDecodeError)?; - - Ok(Self { - gateway: log.address, - channel_id: ChannelId::from(event.channel_id.as_ref()), - nonce: event.nonce, - message_id: H256::from(event.message_id.as_ref()), - payload: event.payload, - }) - } -} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/lib.rs deleted file mode 100644 index c26859dcf5d7..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/src/lib.rs +++ /dev/null @@ -1,378 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -//! Inbound Queue -//! -//! # Overview -//! -//! Receives messages emitted by the Gateway contract on Ethereum, whereupon they are verified, -//! translated to XCM, and finally sent to their final destination parachain. -//! -//! The message relayers are rewarded using native currency from the sovereign account of the -//! destination parachain. -//! -//! # Extrinsics -//! -//! ## Governance -//! -//! * [`Call::set_operating_mode`]: Set the operating mode of the pallet. Can be used to disable -//! processing of inbound messages. -//! -//! ## Message Submission -//! -//! * [`Call::submit`]: Submit a message for verification and dispatch the final destination -//! parachain. -#![cfg_attr(not(feature = "std"), no_std)] - -mod envelope; - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; - -pub mod weights; - -#[cfg(test)] -mod mock; - -#[cfg(test)] -mod test; - -use codec::{Decode, DecodeAll, Encode}; -use envelope::Envelope; -use frame_support::{ - traits::{ - fungible::{Inspect, Mutate}, - tokens::{Fortitude, Preservation}, - }, - weights::WeightToFee, - PalletError, -}; -use frame_system::ensure_signed; -use scale_info::TypeInfo; -use sp_core::H160; -use sp_runtime::traits::Zero; -use sp_std::vec; -use xcm::prelude::{ - send_xcm, Junction::*, Location, SendError as XcmpSendError, SendXcm, Xcm, XcmContext, XcmHash, -}; -use xcm_executor::traits::TransactAsset; - -use snowbridge_core::{ - inbound::{Message, VerificationError, Verifier}, - sibling_sovereign_account, BasicOperatingMode, Channel, ChannelId, ParaId, PricingParameters, - StaticLookup, -}; -use snowbridge_router_primitives::inbound::v2::{ - ConvertMessage, ConvertMessageError, VersionedMessage, -}; -use sp_runtime::{traits::Saturating, SaturatedConversion, TokenError}; - -pub use weights::WeightInfo; - -#[cfg(feature = "runtime-benchmarks")] -use snowbridge_beacon_primitives::BeaconHeader; - -type BalanceOf = - <::Token as Inspect<::AccountId>>::Balance; - -pub use pallet::*; - -pub const LOG_TARGET: &str = "snowbridge-inbound-queue"; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - use sp_core::H256; - - #[pallet::pallet] - pub struct Pallet(_); - - #[cfg(feature = "runtime-benchmarks")] - pub trait BenchmarkHelper { - fn initialize_storage(beacon_header: BeaconHeader, block_roots_root: H256); - } - - #[pallet::config] - pub trait Config: frame_system::Config { - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - /// The verifier for inbound messages from Ethereum - type Verifier: Verifier; - - /// Message relayers are rewarded with this asset - type Token: Mutate + Inspect; - - /// XCM message sender - type XcmSender: SendXcm; - - // Address of the Gateway contract - #[pallet::constant] - type GatewayAddress: Get; - - /// Convert inbound message to XCM - type MessageConverter: ConvertMessage< - AccountId = Self::AccountId, - Balance = BalanceOf, - >; - - /// Lookup a channel descriptor - type ChannelLookup: StaticLookup; - - /// Lookup pricing parameters - type PricingParameters: Get>>; - - type WeightInfo: WeightInfo; - - #[cfg(feature = "runtime-benchmarks")] - type Helper: BenchmarkHelper; - - /// Convert a weight value into deductible balance type. - type WeightToFee: WeightToFee>; - - /// Convert a length value into deductible balance type - type LengthToFee: WeightToFee>; - - /// The upper limit here only used to estimate delivery cost - type MaxMessageSize: Get; - - /// To withdraw and deposit an asset. - type AssetTransactor: TransactAsset; - } - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// A message was received from Ethereum - MessageReceived { - /// The message channel - channel_id: ChannelId, - /// The message nonce - nonce: u64, - /// ID of the XCM message which was forwarded to the final destination parachain - message_id: [u8; 32], - /// Fee burned for the teleport - fee_burned: BalanceOf, - }, - /// Set OperatingMode - OperatingModeChanged { mode: BasicOperatingMode }, - } - - #[pallet::error] - pub enum Error { - /// Message came from an invalid outbound channel on the Ethereum side. - InvalidGateway, - /// Message has an invalid envelope. - InvalidEnvelope, - /// Message has an unexpected nonce. - InvalidNonce, - /// Message has an invalid payload. - InvalidPayload, - /// Message channel is invalid - InvalidChannel, - /// The max nonce for the type has been reached - MaxNonceReached, - /// Cannot convert location - InvalidAccountConversion, - /// Pallet is halted - Halted, - /// Message verification error, - Verification(VerificationError), - /// XCMP send failure - Send(SendError), - /// Message conversion error - ConvertMessage(ConvertMessageError), - } - - #[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo, PalletError)] - pub enum SendError { - NotApplicable, - NotRoutable, - Transport, - DestinationUnsupported, - ExceedsMaxMessageSize, - MissingArgument, - Fees, - } - - impl From for Error { - fn from(e: XcmpSendError) -> Self { - match e { - XcmpSendError::NotApplicable => Error::::Send(SendError::NotApplicable), - XcmpSendError::Unroutable => Error::::Send(SendError::NotRoutable), - XcmpSendError::Transport(_) => Error::::Send(SendError::Transport), - XcmpSendError::DestinationUnsupported => - Error::::Send(SendError::DestinationUnsupported), - XcmpSendError::ExceedsMaxMessageSize => - Error::::Send(SendError::ExceedsMaxMessageSize), - XcmpSendError::MissingArgument => Error::::Send(SendError::MissingArgument), - XcmpSendError::Fees => Error::::Send(SendError::Fees), - } - } - } - - /// The current nonce for each channel - #[pallet::storage] - pub type Nonce = StorageMap<_, Twox64Concat, ChannelId, u64, ValueQuery>; - - /// The current operating mode of the pallet. - #[pallet::storage] - #[pallet::getter(fn operating_mode)] - pub type OperatingMode = StorageValue<_, BasicOperatingMode, ValueQuery>; - - #[pallet::call] - impl Pallet { - /// Submit an inbound message originating from the Gateway contract on Ethereum - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::submit())] - pub fn submit(origin: OriginFor, message: Message) -> DispatchResult { - let who = ensure_signed(origin)?; - ensure!(!Self::operating_mode().is_halted(), Error::::Halted); - - // submit message to verifier for verification - T::Verifier::verify(&message.event_log, &message.proof) - .map_err(|e| Error::::Verification(e))?; - - // Decode event log into an Envelope - let envelope = - Envelope::try_from(&message.event_log).map_err(|_| Error::::InvalidEnvelope)?; - - // Verify that the message was submitted from the known Gateway contract - ensure!(T::GatewayAddress::get() == envelope.gateway, Error::::InvalidGateway); - - // Retrieve the registered channel for this message - let channel = - T::ChannelLookup::lookup(envelope.channel_id).ok_or(Error::::InvalidChannel)?; - - // Verify message nonce - >::try_mutate(envelope.channel_id, |nonce| -> DispatchResult { - if *nonce == u64::MAX { - return Err(Error::::MaxNonceReached.into()) - } - if envelope.nonce != nonce.saturating_add(1) { - Err(Error::::InvalidNonce.into()) - } else { - *nonce = nonce.saturating_add(1); - Ok(()) - } - })?; - - // Reward relayer from the sovereign account of the destination parachain, only if funds - // are available - let sovereign_account = sibling_sovereign_account::(channel.para_id); - let delivery_cost = Self::calculate_delivery_cost(message.encode().len() as u32); - let amount = T::Token::reducible_balance( - &sovereign_account, - Preservation::Preserve, - Fortitude::Polite, - ) - .min(delivery_cost); - if !amount.is_zero() { - T::Token::transfer(&sovereign_account, &who, amount, Preservation::Preserve)?; - } - - // Decode payload into `VersionedMessage` - let message = VersionedMessage::decode_all(&mut envelope.payload.as_ref()) - .map_err(|_| Error::::InvalidPayload)?; - - // Decode message into XCM - let (xcm, fee) = Self::do_convert(envelope.message_id, message.clone())?; - - log::info!( - target: LOG_TARGET, - "💫 xcm decoded as {:?} with fee {:?}", - xcm, - fee - ); - - // Burning fees for teleport - Self::burn_fees(channel.para_id, fee)?; - - // Attempt to send XCM to a dest parachain - let message_id = Self::send_xcm(xcm, channel.para_id)?; - - Self::deposit_event(Event::MessageReceived { - channel_id: envelope.channel_id, - nonce: envelope.nonce, - message_id, - fee_burned: fee, - }); - - Ok(()) - } - - /// Halt or resume all pallet operations. May only be called by root. - #[pallet::call_index(1)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_operating_mode( - origin: OriginFor, - mode: BasicOperatingMode, - ) -> DispatchResult { - ensure_root(origin)?; - OperatingMode::::set(mode); - Self::deposit_event(Event::OperatingModeChanged { mode }); - Ok(()) - } - } - - impl Pallet { - pub fn do_convert( - message_id: H256, - message: VersionedMessage, - ) -> Result<(Xcm<()>, BalanceOf), Error> { - let (xcm, fee) = T::MessageConverter::convert(message_id, message) - .map_err(|e| Error::::ConvertMessage(e))?; - Ok((xcm, fee)) - } - - pub fn send_xcm(xcm: Xcm<()>, dest: ParaId) -> Result> { - let dest = Location::new(1, [Parachain(dest.into())]); - let (xcm_hash, _) = send_xcm::(dest, xcm).map_err(Error::::from)?; - Ok(xcm_hash) - } - - pub fn calculate_delivery_cost(length: u32) -> BalanceOf { - let weight_fee = T::WeightToFee::weight_to_fee(&T::WeightInfo::submit()); - let len_fee = T::LengthToFee::weight_to_fee(&Weight::from_parts(length as u64, 0)); - weight_fee - .saturating_add(len_fee) - .saturating_add(T::PricingParameters::get().rewards.local) - } - - /// Burn the amount of the fee embedded into the XCM for teleports - pub fn burn_fees(para_id: ParaId, fee: BalanceOf) -> DispatchResult { - let dummy_context = - XcmContext { origin: None, message_id: Default::default(), topic: None }; - let dest = Location::new(1, [Parachain(para_id.into())]); - let fees = (Location::parent(), fee.saturated_into::()).into(); - T::AssetTransactor::can_check_out(&dest, &fees, &dummy_context).map_err(|error| { - log::error!( - target: LOG_TARGET, - "XCM asset check out failed with error {:?}", error - ); - TokenError::FundsUnavailable - })?; - T::AssetTransactor::check_out(&dest, &fees, &dummy_context); - T::AssetTransactor::withdraw_asset(&fees, &dest, None).map_err(|error| { - log::error!( - target: LOG_TARGET, - "XCM asset withdraw failed with error {:?}", error - ); - TokenError::FundsUnavailable - })?; - Ok(()) - } - } - - /// API for accessing the delivery cost of a message - impl Get> for Pallet { - fn get() -> BalanceOf { - // Cost here based on MaxMessagePayloadSize(the worst case) - Self::calculate_delivery_cost(T::MaxMessageSize::get()) - } - } -} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs deleted file mode 100644 index 07e0a5564e09..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs +++ /dev/null @@ -1,362 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -use super::*; - -use frame_support::{derive_impl, parameter_types, traits::ConstU32, weights::IdentityFee}; -use hex_literal::hex; -use snowbridge_beacon_primitives::{ - types::deneb, BeaconHeader, ExecutionProof, Fork, ForkVersions, VersionedExecutionPayloadHeader, -}; -use snowbridge_core::{ - gwei, - inbound::{Log, Proof, VerificationError}, - meth, Channel, ChannelId, PricingParameters, Rewards, StaticLookup, TokenId, -}; -use snowbridge_router_primitives::inbound::v2::MessageToXcm; -use sp_core::{H160, H256}; -use sp_runtime::{ - traits::{IdentifyAccount, IdentityLookup, MaybeEquivalence, Verify}, - BuildStorage, FixedU128, MultiSignature, -}; -use sp_std::{convert::From, default::Default}; -use xcm::prelude::*; -use xcm_executor::AssetsInHolding; - -use crate::{self as inbound_queue}; - -use xcm::latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; - -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system::{Pallet, Call, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - EthereumBeaconClient: snowbridge_pallet_ethereum_client::{Pallet, Call, Storage, Event}, - InboundQueue: inbound_queue::{Pallet, Call, Storage, Event}, - } -); - -pub type Signature = MultiSignature; -pub type AccountId = <::Signer as IdentifyAccount>::AccountId; - -type Balance = u128; - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Test { - type AccountId = AccountId; - type Lookup = IdentityLookup; - type AccountData = pallet_balances::AccountData; - type Block = Block; -} - -parameter_types! { - pub const ExistentialDeposit: u128 = 1; -} - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] -impl pallet_balances::Config for Test { - type Balance = Balance; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; -} - -parameter_types! { - pub const ChainForkVersions: ForkVersions = ForkVersions{ - genesis: Fork { - version: [0, 0, 0, 1], // 0x00000001 - epoch: 0, - }, - altair: Fork { - version: [1, 0, 0, 1], // 0x01000001 - epoch: 0, - }, - bellatrix: Fork { - version: [2, 0, 0, 1], // 0x02000001 - epoch: 0, - }, - capella: Fork { - version: [3, 0, 0, 1], // 0x03000001 - epoch: 0, - }, - deneb: Fork { - version: [4, 0, 0, 1], // 0x04000001 - epoch: 4294967295, - } - }; -} - -impl snowbridge_pallet_ethereum_client::Config for Test { - type RuntimeEvent = RuntimeEvent; - type ForkVersions = ChainForkVersions; - type FreeHeadersInterval = ConstU32<32>; - type WeightInfo = (); -} - -// Mock verifier -pub struct MockVerifier; - -impl Verifier for MockVerifier { - fn verify(_: &Log, _: &Proof) -> Result<(), VerificationError> { - Ok(()) - } -} - -const GATEWAY_ADDRESS: [u8; 20] = hex!["eda338e4dc46038493b885327842fd3e301cab39"]; - -parameter_types! { - pub const EthereumNetwork: xcm::v3::NetworkId = xcm::v3::NetworkId::Ethereum { chain_id: 11155111 }; - pub const GatewayAddress: H160 = H160(GATEWAY_ADDRESS); - pub const CreateAssetCall: [u8;2] = [53, 0]; - pub const CreateAssetExecutionFee: u128 = 2_000_000_000; - pub const CreateAssetDeposit: u128 = 100_000_000_000; - pub const SendTokenExecutionFee: u128 = 1_000_000_000; - pub const InitialFund: u128 = 1_000_000_000_000; - pub const InboundQueuePalletInstance: u8 = 80; - pub UniversalLocation: InteriorLocation = - [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1002)].into(); - pub AssetHubFromEthereum: Location = Location::new(1,[GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)),Parachain(1000)]); -} - -#[cfg(feature = "runtime-benchmarks")] -impl BenchmarkHelper for Test { - // not implemented since the MockVerifier is used for tests - fn initialize_storage(_: BeaconHeader, _: H256) {} -} - -// Mock XCM sender that always succeeds -pub struct MockXcmSender; - -impl SendXcm for MockXcmSender { - type Ticket = Xcm<()>; - - fn validate( - dest: &mut Option, - xcm: &mut Option>, - ) -> SendResult { - if let Some(location) = dest { - match location.unpack() { - (_, [Parachain(1001)]) => return Err(XcmpSendError::NotApplicable), - _ => Ok((xcm.clone().unwrap(), Assets::default())), - } - } else { - Ok((xcm.clone().unwrap(), Assets::default())) - } - } - - fn deliver(xcm: Self::Ticket) -> core::result::Result { - let hash = xcm.using_encoded(sp_io::hashing::blake2_256); - Ok(hash) - } -} - -parameter_types! { - pub const OwnParaId: ParaId = ParaId::new(1013); - pub Parameters: PricingParameters = PricingParameters { - exchange_rate: FixedU128::from_rational(1, 400), - fee_per_gas: gwei(20), - rewards: Rewards { local: DOT, remote: meth(1) }, - multiplier: FixedU128::from_rational(1, 1), - }; -} - -pub const DOT: u128 = 10_000_000_000; - -pub struct MockChannelLookup; -impl StaticLookup for MockChannelLookup { - type Source = ChannelId; - type Target = Channel; - - fn lookup(channel_id: Self::Source) -> Option { - if channel_id != - hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into() - { - return None - } - Some(Channel { agent_id: H256::zero(), para_id: ASSET_HUB_PARAID.into() }) - } -} - -pub struct SuccessfulTransactor; -impl TransactAsset for SuccessfulTransactor { - fn can_check_in(_origin: &Location, _what: &Asset, _context: &XcmContext) -> XcmResult { - Ok(()) - } - - fn can_check_out(_dest: &Location, _what: &Asset, _context: &XcmContext) -> XcmResult { - Ok(()) - } - - fn deposit_asset(_what: &Asset, _who: &Location, _context: Option<&XcmContext>) -> XcmResult { - Ok(()) - } - - fn withdraw_asset( - _what: &Asset, - _who: &Location, - _context: Option<&XcmContext>, - ) -> Result { - Ok(AssetsInHolding::default()) - } - - fn internal_transfer_asset( - _what: &Asset, - _from: &Location, - _to: &Location, - _context: &XcmContext, - ) -> Result { - Ok(AssetsInHolding::default()) - } -} - -pub struct MockTokenIdConvert; -impl MaybeEquivalence for MockTokenIdConvert { - fn convert(_id: &TokenId) -> Option { - Some(Location::parent()) - } - fn convert_back(_loc: &Location) -> Option { - None - } -} - -impl inbound_queue::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Verifier = MockVerifier; - type Token = Balances; - type XcmSender = MockXcmSender; - type WeightInfo = (); - type GatewayAddress = GatewayAddress; - type MessageConverter = MessageToXcm< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - MockTokenIdConvert, - UniversalLocation, - AssetHubFromEthereum, - >; - type PricingParameters = Parameters; - type ChannelLookup = MockChannelLookup; - #[cfg(feature = "runtime-benchmarks")] - type Helper = Test; - type WeightToFee = IdentityFee; - type LengthToFee = IdentityFee; - type MaxMessageSize = ConstU32<1024>; - type AssetTransactor = SuccessfulTransactor; -} - -pub fn last_events(n: usize) -> Vec { - frame_system::Pallet::::events() - .into_iter() - .rev() - .take(n) - .rev() - .map(|e| e.event) - .collect() -} - -pub fn expect_events(e: Vec) { - assert_eq!(last_events(e.len()), e); -} - -pub fn setup() { - System::set_block_number(1); - Balances::mint_into( - &sibling_sovereign_account::(ASSET_HUB_PARAID.into()), - InitialFund::get(), - ) - .unwrap(); - Balances::mint_into( - &sibling_sovereign_account::(TEMPLATE_PARAID.into()), - InitialFund::get(), - ) - .unwrap(); -} - -pub fn new_tester() -> sp_io::TestExternalities { - let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext: sp_io::TestExternalities = storage.into(); - ext.execute_with(setup); - ext -} - -// Generated from smoketests: -// cd smoketests -// ./make-bindings -// cargo test --test register_token -- --nocapture -pub fn mock_event_log() -> Log { - Log { - // gateway address - address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), - topics: vec![ - hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), - // channel id - hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - // message id - hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), - ], - // Nonce + Payload - data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e000f000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), - } -} - -pub fn mock_event_log_invalid_channel() -> Log { - Log { - address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), - topics: vec![ - hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), - // invalid channel id - hex!("0000000000000000000000000000000000000000000000000000000000000000").into(), - hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), - ], - data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000001e000f000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d0000").into(), - } -} - -pub fn mock_event_log_invalid_gateway() -> Log { - Log { - // gateway address - address: H160::zero(), - topics: vec![ - hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), - // channel id - hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - // message id - hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), - ], - // Nonce + Payload - data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000001e000f000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d0000").into(), - } -} - -pub fn mock_execution_proof() -> ExecutionProof { - ExecutionProof { - header: BeaconHeader::default(), - ancestry_proof: None, - execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { - parent_hash: Default::default(), - fee_recipient: Default::default(), - state_root: Default::default(), - receipts_root: Default::default(), - logs_bloom: vec![], - prev_randao: Default::default(), - block_number: 0, - gas_limit: 0, - gas_used: 0, - timestamp: 0, - extra_data: vec![], - base_fee_per_gas: Default::default(), - block_hash: Default::default(), - transactions_root: Default::default(), - withdrawals_root: Default::default(), - blob_gas_used: 0, - excess_blob_gas: 0, - }), - execution_branch: vec![], - } -} - -pub const ASSET_HUB_PARAID: u32 = 1000u32; -pub const TEMPLATE_PARAID: u32 = 1001u32; diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/test.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/test.rs deleted file mode 100644 index 44f6c0ebc658..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/src/test.rs +++ /dev/null @@ -1,245 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -use super::*; - -use frame_support::{assert_noop, assert_ok}; -use hex_literal::hex; -use snowbridge_core::{inbound::Proof, ChannelId}; -use sp_keyring::AccountKeyring as Keyring; -use sp_runtime::DispatchError; -use sp_std::convert::From; - -use crate::{Error, Event as InboundQueueEvent}; - -use crate::mock::*; - -#[test] -fn test_submit_happy_path() { - new_tester().execute_with(|| { - let relayer: AccountId = Keyring::Bob.into(); - let channel_sovereign = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); - - let origin = RuntimeOrigin::signed(relayer.clone()); - - // Submit message - let message = Message { - event_log: mock_event_log(), - proof: Proof { - receipt_proof: Default::default(), - execution_proof: mock_execution_proof(), - }, - }; - - let initial_fund = InitialFund::get(); - assert_eq!(Balances::balance(&relayer), 0); - assert_eq!(Balances::balance(&channel_sovereign), initial_fund); - - assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); - expect_events(vec![InboundQueueEvent::MessageReceived { - channel_id: hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539") - .into(), - nonce: 1, - message_id: [ - 183, 243, 1, 130, 170, 254, 104, 45, 116, 181, 146, 237, 14, 139, 138, 89, 43, 166, - 182, 24, 163, 222, 112, 238, 215, 83, 21, 160, 24, 88, 112, 9, - ], - fee_burned: 110000000000, - } - .into()]); - - let delivery_cost = InboundQueue::calculate_delivery_cost(message.encode().len() as u32); - assert!( - Parameters::get().rewards.local < delivery_cost, - "delivery cost exceeds pure reward" - ); - - assert_eq!(Balances::balance(&relayer), delivery_cost, "relayer was rewarded"); - assert!( - Balances::balance(&channel_sovereign) <= initial_fund - delivery_cost, - "sovereign account paid reward" - ); - }); -} - -#[test] -fn test_submit_xcm_invalid_channel() { - new_tester().execute_with(|| { - let relayer: AccountId = Keyring::Bob.into(); - let origin = RuntimeOrigin::signed(relayer); - - // Deposit funds into sovereign account of parachain 1001 - let sovereign_account = sibling_sovereign_account::(TEMPLATE_PARAID.into()); - println!("account: {}", sovereign_account); - let _ = Balances::mint_into(&sovereign_account, 10000); - - // Submit message - let message = Message { - event_log: mock_event_log_invalid_channel(), - proof: Proof { - receipt_proof: Default::default(), - execution_proof: mock_execution_proof(), - }, - }; - assert_noop!( - InboundQueue::submit(origin.clone(), message.clone()), - Error::::InvalidChannel, - ); - }); -} - -#[test] -fn test_submit_with_invalid_gateway() { - new_tester().execute_with(|| { - let relayer: AccountId = Keyring::Bob.into(); - let origin = RuntimeOrigin::signed(relayer); - - // Deposit funds into sovereign account of Asset Hub (Statemint) - let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); - let _ = Balances::mint_into(&sovereign_account, 10000); - - // Submit message - let message = Message { - event_log: mock_event_log_invalid_gateway(), - proof: Proof { - receipt_proof: Default::default(), - execution_proof: mock_execution_proof(), - }, - }; - assert_noop!( - InboundQueue::submit(origin.clone(), message.clone()), - Error::::InvalidGateway - ); - }); -} - -#[test] -fn test_submit_with_invalid_nonce() { - new_tester().execute_with(|| { - let relayer: AccountId = Keyring::Bob.into(); - let origin = RuntimeOrigin::signed(relayer); - - // Deposit funds into sovereign account of Asset Hub (Statemint) - let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); - let _ = Balances::mint_into(&sovereign_account, 10000); - - // Submit message - let message = Message { - event_log: mock_event_log(), - proof: Proof { - receipt_proof: Default::default(), - execution_proof: mock_execution_proof(), - }, - }; - assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); - - let nonce: u64 = >::get(ChannelId::from(hex!( - "c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539" - ))); - assert_eq!(nonce, 1); - - // Submit the same again - assert_noop!( - InboundQueue::submit(origin.clone(), message.clone()), - Error::::InvalidNonce - ); - }); -} - -#[test] -fn test_submit_no_funds_to_reward_relayers_just_ignore() { - new_tester().execute_with(|| { - let relayer: AccountId = Keyring::Bob.into(); - let origin = RuntimeOrigin::signed(relayer); - - // Reset balance of sovereign_account to zero first - let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); - Balances::set_balance(&sovereign_account, 0); - - // Submit message - let message = Message { - event_log: mock_event_log(), - proof: Proof { - receipt_proof: Default::default(), - execution_proof: mock_execution_proof(), - }, - }; - // Check submit successfully in case no funds available - assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); - }); -} - -#[test] -fn test_set_operating_mode() { - new_tester().execute_with(|| { - let relayer: AccountId = Keyring::Bob.into(); - let origin = RuntimeOrigin::signed(relayer); - let message = Message { - event_log: mock_event_log(), - proof: Proof { - receipt_proof: Default::default(), - execution_proof: mock_execution_proof(), - }, - }; - - assert_ok!(InboundQueue::set_operating_mode( - RuntimeOrigin::root(), - snowbridge_core::BasicOperatingMode::Halted - )); - - assert_noop!(InboundQueue::submit(origin, message), Error::::Halted); - }); -} - -#[test] -fn test_set_operating_mode_root_only() { - new_tester().execute_with(|| { - assert_noop!( - InboundQueue::set_operating_mode( - RuntimeOrigin::signed(Keyring::Bob.into()), - snowbridge_core::BasicOperatingMode::Halted - ), - DispatchError::BadOrigin - ); - }); -} - -#[test] -fn test_submit_no_funds_to_reward_relayers_and_ed_preserved() { - new_tester().execute_with(|| { - let relayer: AccountId = Keyring::Bob.into(); - let origin = RuntimeOrigin::signed(relayer); - - // Reset balance of sovereign account to (ED+1) first - let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); - Balances::set_balance(&sovereign_account, ExistentialDeposit::get() + 1); - - // Submit message successfully - let message = Message { - event_log: mock_event_log(), - proof: Proof { - receipt_proof: Default::default(), - execution_proof: mock_execution_proof(), - }, - }; - assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); - - // Check balance of sovereign account to ED - let amount = Balances::balance(&sovereign_account); - assert_eq!(amount, ExistentialDeposit::get()); - - // Submit another message with nonce set as 2 - let mut event_log = mock_event_log(); - event_log.data[31] = 2; - let message = Message { - event_log, - proof: Proof { - receipt_proof: Default::default(), - execution_proof: mock_execution_proof(), - }, - }; - assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); - // Check balance of sovereign account as ED does not change - let amount = Balances::balance(&sovereign_account); - assert_eq!(amount, ExistentialDeposit::get()); - }); -} diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/weights.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/weights.rs deleted file mode 100644 index c2c665f40d9e..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue-v2/src/weights.rs +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -//! Autogenerated weights for `snowbridge_inbound_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `macbook pro 14 m2`, CPU: `m2-arm64` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for ethereum_beacon_client. -pub trait WeightInfo { - fn submit() -> Weight; -} - -// For backwards compatibility and tests -impl WeightInfo for () { - fn submit() -> Weight { - Weight::from_parts(70_000_000, 0) - .saturating_add(Weight::from_parts(0, 3601)) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index ed94e442c2f7..4127ad68424b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -114,7 +114,6 @@ snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } -snowbridge-pallet-inbound-queue-v2 = { workspace = true } snowbridge-pallet-outbound-queue-v2 = { workspace = true } snowbridge-outbound-queue-runtime-api-v2 = { workspace = true } snowbridge-merkle-tree = { workspace = true } @@ -193,7 +192,6 @@ std = [ "snowbridge-outbound-queue-runtime-api-v2/std", "snowbridge-outbound-queue-runtime-api/std", "snowbridge-pallet-ethereum-client/std", - "snowbridge-pallet-inbound-queue-v2/std", "snowbridge-pallet-inbound-queue/std", "snowbridge-pallet-outbound-queue-v2/std", "snowbridge-pallet-outbound-queue/std", @@ -256,7 +254,6 @@ runtime-benchmarks = [ "polkadot-runtime-common/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", "snowbridge-pallet-ethereum-client/runtime-benchmarks", - "snowbridge-pallet-inbound-queue-v2/runtime-benchmarks", "snowbridge-pallet-inbound-queue/runtime-benchmarks", "snowbridge-pallet-outbound-queue-v2/runtime-benchmarks", "snowbridge-pallet-outbound-queue/runtime-benchmarks", @@ -298,7 +295,6 @@ try-runtime = [ "parachain-info/try-runtime", "polkadot-runtime-common/try-runtime", "snowbridge-pallet-ethereum-client/try-runtime", - "snowbridge-pallet-inbound-queue-v2/try-runtime", "snowbridge-pallet-inbound-queue/try-runtime", "snowbridge-pallet-outbound-queue-v2/try-runtime", "snowbridge-pallet-outbound-queue/try-runtime", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index e25caed95a02..633248ca6efb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -26,7 +26,7 @@ use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards}; use snowbridge_router_primitives::{ - inbound::{v1::MessageToXcm, v2::MessageToXcm as MessageToXcmV2}, + inbound::v1::MessageToXcm, outbound::{v1::EthereumBlobExporter, v2::EthereumBlobExporter as EthereumBlobExporterV2}, }; use sp_core::H160; @@ -113,36 +113,6 @@ impl snowbridge_pallet_inbound_queue::Config for Runtime { type AssetTransactor = ::AssetTransactor; } -impl snowbridge_pallet_inbound_queue_v2::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Verifier = snowbridge_pallet_ethereum_client::Pallet; - type Token = Balances; - #[cfg(not(feature = "runtime-benchmarks"))] - type XcmSender = XcmRouter; - #[cfg(feature = "runtime-benchmarks")] - type XcmSender = DoNothingRouter; - type ChannelLookup = EthereumSystem; - type GatewayAddress = EthereumGatewayAddress; - #[cfg(feature = "runtime-benchmarks")] - type Helper = Runtime; - type MessageConverter = MessageToXcmV2< - CreateAssetCall, - CreateAssetDeposit, - ConstU8, - AccountId, - Balance, - EthereumSystem, - EthereumUniversalLocation, - AssetHubFromEthereum, - >; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type MaxMessageSize = ConstU32<2048>; - type WeightInfo = crate::weights::snowbridge_pallet_inbound_queue_v2::WeightInfo; - type PricingParameters = EthereumSystem; - type AssetTransactor = ::AssetTransactor; -} - impl snowbridge_pallet_outbound_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Hashing = Keccak256; @@ -257,7 +227,6 @@ pub mod benchmark_helpers { use codec::Encode; use snowbridge_beacon_primitives::BeaconHeader; use snowbridge_pallet_inbound_queue::BenchmarkHelper; - use snowbridge_pallet_inbound_queue_v2::BenchmarkHelper as BenchmarkHelperV2; use sp_core::H256; use xcm::latest::{Assets, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}; @@ -267,12 +236,6 @@ pub mod benchmark_helpers { } } - impl BenchmarkHelperV2 for Runtime { - fn initialize_storage(beacon_header: BeaconHeader, block_roots_root: H256) { - EthereumBeaconClient::store_finalized_header(beacon_header, block_roots_root).unwrap(); - } - } - pub struct DoNothingRouter; impl SendXcm for DoNothingRouter { type Ticket = Xcm<()>; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 0ac65c20a86c..28e79ce2f5fe 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -567,7 +567,6 @@ construct_runtime!( EthereumOutboundQueue: snowbridge_pallet_outbound_queue = 81, EthereumBeaconClient: snowbridge_pallet_ethereum_client = 82, EthereumSystem: snowbridge_pallet_system = 83, - EthereumInboundQueueV2: snowbridge_pallet_inbound_queue_v2 = 84, EthereumOutboundQueueV2: snowbridge_pallet_outbound_queue_v2 = 85, // Message Queue. Importantly, is registered last so that messages are processed after @@ -627,7 +626,6 @@ mod benches { [snowbridge_pallet_outbound_queue, EthereumOutboundQueue] [snowbridge_pallet_system, EthereumSystem] [snowbridge_pallet_ethereum_client, EthereumBeaconClient] - [snowbridge_pallet_inbound_queue_v2, EthereumInboundQueueV2] [snowbridge_pallet_outbound_queue_v2, EthereumOutboundQueueV2] ); } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs index cba49ab186c5..27746c287933 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -47,7 +47,6 @@ pub mod xcm; pub mod snowbridge_pallet_ethereum_client; pub mod snowbridge_pallet_inbound_queue; -pub mod snowbridge_pallet_inbound_queue_v2; pub mod snowbridge_pallet_outbound_queue; pub mod snowbridge_pallet_outbound_queue_v2; pub mod snowbridge_pallet_system; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_inbound_queue_v2.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_inbound_queue_v2.rs deleted file mode 100644 index 7844816f903f..000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/snowbridge_pallet_inbound_queue_v2.rs +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `snowbridge_pallet_inbound_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `macbook pro 14 m2`, CPU: `m2-arm64` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// target/release/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-rococo-dev -// --pallet=snowbridge_inbound_queue -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --steps -// 50 -// --repeat -// 20 -// --output -// ./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_inbound_queue.rs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `snowbridge_pallet_inbound_queue`. -pub struct WeightInfo(PhantomData); -impl snowbridge_pallet_inbound_queue_v2::WeightInfo for WeightInfo { - /// Storage: EthereumInboundQueue PalletOperatingMode (r:1 w:0) - /// Proof: EthereumInboundQueue PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient ExecutionHeaders (r:1 w:0) - /// Proof: EthereumBeaconClient ExecutionHeaders (max_values: None, max_size: Some(136), added: 2611, mode: MaxEncodedLen) - /// Storage: EthereumInboundQueue Nonce (r:1 w:1) - /// Proof: EthereumInboundQueue Nonce (max_values: None, max_size: Some(20), added: 2495, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn submit() -> Weight { - // Proof Size summary in bytes: - // Measured: `800` - // Estimated: `7200` - // Minimum execution time: 200_000_000 picoseconds. - Weight::from_parts(200_000_000, 0) - .saturating_add(Weight::from_parts(0, 7200)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) - } -} From e8951e4798366d330fe2ed3a041af1f5b9d76897 Mon Sep 17 00:00:00 2001 From: ron Date: Wed, 20 Nov 2024 22:09:59 +0800 Subject: [PATCH 20/68] Reorgnize code layout --- .../primitives/router/src/outbound/{v1.rs => v1/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename bridges/snowbridge/primitives/router/src/outbound/{v1.rs => v1/mod.rs} (100%) diff --git a/bridges/snowbridge/primitives/router/src/outbound/v1.rs b/bridges/snowbridge/primitives/router/src/outbound/v1/mod.rs similarity index 100% rename from bridges/snowbridge/primitives/router/src/outbound/v1.rs rename to bridges/snowbridge/primitives/router/src/outbound/v1/mod.rs From 2a413991fa35c8b5c337314f81e61671b8274162 Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 22 Nov 2024 15:50:48 +0800 Subject: [PATCH 21/68] For Transact --- .../pallets/system/src/benchmarking.rs | 65 +---- bridges/snowbridge/pallets/system/src/lib.rs | 246 +++--------------- bridges/snowbridge/primitives/core/src/lib.rs | 12 + .../primitives/core/src/registry.rs | 13 + .../primitives/core/src/transact.rs | 36 +++ .../router/src/outbound/v2/convert.rs | 34 +++ .../src/tests/snowbridge_v2.rs | 125 ++++++++- .../src/bridge_to_ethereum_config.rs | 4 +- 8 files changed, 254 insertions(+), 281 deletions(-) create mode 100644 bridges/snowbridge/primitives/core/src/registry.rs create mode 100644 bridges/snowbridge/primitives/core/src/transact.rs diff --git a/bridges/snowbridge/pallets/system/src/benchmarking.rs b/bridges/snowbridge/pallets/system/src/benchmarking.rs index 939de9d40d13..e4cb079419d0 100644 --- a/bridges/snowbridge/pallets/system/src/benchmarking.rs +++ b/bridges/snowbridge/pallets/system/src/benchmarking.rs @@ -60,74 +60,12 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn create_agent() -> Result<(), BenchmarkError> { - let origin_para_id = 2000; - let origin_location = Location::new(1, [Parachain(origin_para_id)]); - let origin = T::Helper::make_xcm_origin(origin_location); - fund_sovereign_account::(origin_para_id.into())?; - - #[extrinsic_call] - _(origin as T::RuntimeOrigin); - - Ok(()) - } - - #[benchmark] - fn create_channel() -> Result<(), BenchmarkError> { - let origin_para_id = 2000; - let origin_location = Location::new(1, [Parachain(origin_para_id)]); - let origin = T::Helper::make_xcm_origin(origin_location); - fund_sovereign_account::(origin_para_id.into())?; - - SnowbridgeControl::::create_agent(origin.clone())?; - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, OperatingMode::Normal); - - Ok(()) - } - - #[benchmark] - fn update_channel() -> Result<(), BenchmarkError> { - let origin_para_id = 2000; - let origin_location = Location::new(1, [Parachain(origin_para_id)]); - let origin = T::Helper::make_xcm_origin(origin_location); - fund_sovereign_account::(origin_para_id.into())?; - SnowbridgeControl::::create_agent(origin.clone())?; - SnowbridgeControl::::create_channel(origin.clone(), OperatingMode::Normal)?; - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, OperatingMode::RejectingOutboundMessages); - - Ok(()) - } - - #[benchmark] - fn force_update_channel() -> Result<(), BenchmarkError> { - let origin_para_id = 2000; - let origin_location = Location::new(1, [Parachain(origin_para_id)]); - let origin = T::Helper::make_xcm_origin(origin_location); - let channel_id: ChannelId = ParaId::from(origin_para_id).into(); - - fund_sovereign_account::(origin_para_id.into())?; - SnowbridgeControl::::create_agent(origin.clone())?; - SnowbridgeControl::::create_channel(origin.clone(), OperatingMode::Normal)?; - - #[extrinsic_call] - _(RawOrigin::Root, channel_id, OperatingMode::RejectingOutboundMessages); - - Ok(()) - } - #[benchmark] fn transfer_native_from_agent() -> Result<(), BenchmarkError> { let origin_para_id = 2000; let origin_location = Location::new(1, [Parachain(origin_para_id)]); let origin = T::Helper::make_xcm_origin(origin_location); fund_sovereign_account::(origin_para_id.into())?; - SnowbridgeControl::::create_agent(origin.clone())?; - SnowbridgeControl::::create_channel(origin.clone(), OperatingMode::Normal)?; #[extrinsic_call] _(origin as T::RuntimeOrigin, H160::default(), 1); @@ -139,9 +77,8 @@ mod benchmarks { fn force_transfer_native_from_agent() -> Result<(), BenchmarkError> { let origin_para_id = 2000; let origin_location = Location::new(1, [Parachain(origin_para_id)]); - let origin = T::Helper::make_xcm_origin(origin_location.clone()); + let _origin = T::Helper::make_xcm_origin(origin_location.clone()); fund_sovereign_account::(origin_para_id.into())?; - SnowbridgeControl::::create_agent(origin.clone())?; let versioned_location: VersionedLocation = origin_location.into(); diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 8a5b0a6edbf9..da927061da65 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -15,26 +15,16 @@ //! The `create_agent` extrinsic should be called via an XCM `Transact` instruction from the sibling //! parachain. //! -//! ## Channels -//! -//! Each sibling parachain has its own dedicated messaging channel for sending and receiving -//! messages. As a prerequisite to creating a channel, the sibling should have already created -//! an agent using the `create_agent` extrinsic. -//! -//! * [`Call::create_channel`]: Create channel for a sibling -//! * [`Call::update_channel`]: Update a channel for a sibling -//! //! ## Governance //! //! Only Polkadot governance itself can call these extrinsics. Delivery fees are waived. //! //! * [`Call::upgrade`]`: Upgrade the gateway contract //! * [`Call::set_operating_mode`]: Update the operating mode of the gateway contract -//! * [`Call::force_update_channel`]: Allow root to update a channel for a sibling //! * [`Call::force_transfer_native_from_agent`]: Allow root to withdraw ether from an agent //! //! Typically, Polkadot governance will use the `force_transfer_native_from_agent` and -//! `force_update_channel` and extrinsics to manage agents and channels for system parachains. +//! `force_update_channel` and extrinsics to manage agents for system parachains. //! //! ## Polkadot-native tokens on Ethereum //! @@ -73,6 +63,7 @@ use snowbridge_core::{ v2::{Command as CommandV2, Message as MessageV2, SendMessage as SendMessageV2}, OperatingMode, SendError, }, + registry::{AgentRegistry, TokenRegistry}, sibling_sovereign_account, AgentId, AssetMetadata, Channel, ChannelId, ParaId, PricingParameters as PricingParametersRecord, TokenId, TokenIdOf, PRIMARY_GOVERNANCE_CHANNEL, SECONDARY_GOVERNANCE_CHANNEL, @@ -102,8 +93,8 @@ fn ensure_sibling(location: &Location) -> Result<(ParaId, H256), DispatchErro where T: Config, { - match location.unpack() { - (1, [Parachain(para_id)]) => { + match (location.parents, location.first_interior()) { + (1, Some(Parachain(para_id))) => { let agent_id = agent_id_of::(location)?; Ok(((*para_id).into(), agent_id)) }, @@ -141,7 +132,7 @@ where #[frame_support::pallet] pub mod pallet { use frame_support::dispatch::PostDispatchInfo; - use snowbridge_core::{outbound::v2::second_governance_origin, StaticLookup}; + use snowbridge_core::StaticLookup; use sp_core::U256; use super::*; @@ -255,6 +246,7 @@ pub mod pallet { InvalidTokenTransferFees, InvalidPricingParameters, InvalidUpgradeParameters, + TokenAlreadyCreated, } /// The set of registered agents @@ -382,126 +374,6 @@ pub mod pallet { Ok(()) } - /// Sends a command to the Gateway contract to instantiate a new agent contract representing - /// `origin`. - /// - /// Fee required: Yes - /// - /// - `origin`: Must be `Location` of a sibling parachain - #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::create_agent())] - pub fn create_agent(origin: OriginFor) -> DispatchResult { - let origin_location: Location = T::SiblingOrigin::ensure_origin(origin)?; - - // Ensure that origin location is some consensus system on a sibling parachain - let (para_id, agent_id) = ensure_sibling::(&origin_location)?; - - // Record the agent id or fail if it has already been created - ensure!(!Agents::::contains_key(agent_id), Error::::AgentAlreadyCreated); - Agents::::insert(agent_id, ()); - - let command = Command::CreateAgent { agent_id }; - let pays_fee = PaysFee::::Yes(sibling_sovereign_account::(para_id)); - Self::send(SECONDARY_GOVERNANCE_CHANNEL, command, pays_fee)?; - - Self::deposit_event(Event::::CreateAgent { - location: Box::new(origin_location), - agent_id, - }); - Ok(()) - } - - /// Sends a message to the Gateway contract to create a new channel representing `origin` - /// - /// Fee required: Yes - /// - /// This extrinsic is permissionless, so a fee is charged to prevent spamming and pay - /// for execution costs on the remote side. - /// - /// The message is sent over the bridge on BridgeHub's own channel to the Gateway. - /// - /// - `origin`: Must be `Location` - /// - `mode`: Initial operating mode of the channel - #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::create_channel())] - pub fn create_channel(origin: OriginFor, mode: OperatingMode) -> DispatchResult { - let origin_location: Location = T::SiblingOrigin::ensure_origin(origin)?; - - // Ensure that origin location is a sibling parachain - let (para_id, agent_id) = ensure_sibling::(&origin_location)?; - - let channel_id: ChannelId = para_id.into(); - - ensure!(Agents::::contains_key(agent_id), Error::::NoAgent); - ensure!(!Channels::::contains_key(channel_id), Error::::ChannelAlreadyCreated); - - let channel = Channel { agent_id, para_id }; - Channels::::insert(channel_id, channel); - - let command = Command::CreateChannel { channel_id, agent_id, mode }; - let pays_fee = PaysFee::::Yes(sibling_sovereign_account::(para_id)); - Self::send(SECONDARY_GOVERNANCE_CHANNEL, command, pays_fee)?; - - Self::deposit_event(Event::::CreateChannel { channel_id, agent_id }); - Ok(()) - } - - /// Sends a message to the Gateway contract to update a channel configuration - /// - /// The origin must already have a channel initialized, as this message is sent over it. - /// - /// A partial fee will be charged for local processing only. - /// - /// - `origin`: Must be `Location` - /// - `mode`: Initial operating mode of the channel - #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::update_channel())] - pub fn update_channel(origin: OriginFor, mode: OperatingMode) -> DispatchResult { - let origin_location: Location = T::SiblingOrigin::ensure_origin(origin)?; - - // Ensure that origin location is a sibling parachain - let (para_id, _) = ensure_sibling::(&origin_location)?; - - let channel_id: ChannelId = para_id.into(); - - ensure!(Channels::::contains_key(channel_id), Error::::NoChannel); - - let command = Command::UpdateChannel { channel_id, mode }; - let pays_fee = PaysFee::::Partial(sibling_sovereign_account::(para_id)); - - // Parachains send the update message on their own channel - Self::send(channel_id, command, pays_fee)?; - - Self::deposit_event(Event::::UpdateChannel { channel_id, mode }); - Ok(()) - } - - /// Sends a message to the Gateway contract to update an arbitrary channel - /// - /// Fee required: No - /// - /// - `origin`: Must be root - /// - `channel_id`: ID of channel - /// - `mode`: Initial operating mode of the channel - /// - `outbound_fee`: Fee charged to users for sending outbound messages to Polkadot - #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::force_update_channel())] - pub fn force_update_channel( - origin: OriginFor, - channel_id: ChannelId, - mode: OperatingMode, - ) -> DispatchResult { - ensure_root(origin)?; - - ensure!(Channels::::contains_key(channel_id), Error::::NoChannel); - - let command = Command::UpdateChannel { channel_id, mode }; - Self::send(PRIMARY_GOVERNANCE_CHANNEL, command, PaysFee::::No)?; - - Self::deposit_event(Event::::UpdateChannel { channel_id, mode }); - Ok(()) - } - /// Sends a message to the Gateway contract to transfer ether from an agent to `recipient`. /// /// A partial fee will be charged for local processing only. @@ -641,34 +513,6 @@ pub mod pallet { pays_fee: Pays::No, }) } - - /// Registers a Polkadot-native token as a wrapped ERC20 token on Ethereum. - /// Privileged. Can only be called by root. - /// - /// Fee required: No - /// - /// - `origin`: Must be root - /// - `location`: Location of the asset (relative to this chain) - /// - `metadata`: Metadata to include in the instantiated ERC20 contract on Ethereum - #[pallet::call_index(11)] - #[pallet::weight(T::WeightInfo::register_token())] - pub fn register_token_v2( - origin: OriginFor, - location: Box, - metadata: AssetMetadata, - ) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - - let location: Location = - (*location).try_into().map_err(|_| Error::::UnsupportedLocationVersion)?; - - Self::do_register_token_v2(&location, metadata, PaysFee::::No)?; - - Ok(PostDispatchInfo { - actual_weight: Some(T::WeightInfo::register_token()), - pays_fee: Pays::No, - }) - } } impl Pallet { @@ -795,44 +639,9 @@ pub mod pallet { Ok(()) } - pub(crate) fn do_register_token_v2( - location: &Location, - metadata: AssetMetadata, - pays_fee: PaysFee, - ) -> Result<(), DispatchError> { - let ethereum_location = T::EthereumLocation::get(); - // reanchor to Ethereum context - let location = location - .clone() - .reanchored(ðereum_location, &T::UniversalLocation::get()) - .map_err(|_| Error::::LocationConversionFailed)?; - - let token_id = TokenIdOf::convert_location(&location) - .ok_or(Error::::LocationConversionFailed)?; - - if !ForeignToNativeId::::contains_key(token_id) { - NativeToForeignId::::insert(location.clone(), token_id); - ForeignToNativeId::::insert(token_id, location.clone()); - } - - let command = CommandV2::RegisterForeignToken { - token_id, - name: metadata.name.into_inner(), - symbol: metadata.symbol.into_inner(), - decimals: metadata.decimals, - }; - Self::send_v2(second_governance_origin(), command, pays_fee)?; - - Self::deposit_event(Event::::RegisterToken { - location: location.clone().into(), - foreign_token_id: token_id, - }); - - Ok(()) - } - + #[allow(dead_code)] /// Send `command` to the Gateway on the Channel identified by `channel_id` - fn send_v2(origin: H256, command: CommandV2, pays_fee: PaysFee) -> DispatchResult { + fn send_governance_command(origin: H256, command: CommandV2) -> DispatchResult { let message = MessageV2 { origin, id: Default::default(), @@ -840,23 +649,9 @@ pub mod pallet { commands: BoundedVec::try_from(vec![command]).unwrap(), }; - let (ticket, fee) = + let (ticket, _) = T::OutboundQueueV2::validate(&message).map_err(|err| Error::::Send(err))?; - let payment = match pays_fee { - PaysFee::Yes(account) | PaysFee::Partial(account) => Some((account, fee)), - PaysFee::No => None, - }; - - if let Some((payer, fee)) = payment { - T::Token::transfer( - &payer, - &T::TreasuryAccount::get(), - fee, - Preservation::Preserve, - )?; - } - T::OutboundQueueV2::deliver(ticket).map_err(|err| Error::::Send(err))?; Ok(()) } @@ -890,4 +685,27 @@ pub mod pallet { NativeToForeignId::::get(location) } } + + impl TokenRegistry for Pallet { + fn register(location: Location) -> DispatchResult { + ensure!( + NativeToForeignId::::contains_key(location.clone()), + Error::::TokenAlreadyCreated + ); + let token_id = TokenIdOf::convert_location(&location) + .ok_or(Error::::LocationConversionFailed)?; + ForeignToNativeId::::insert(token_id, location.clone()); + NativeToForeignId::::insert(location.clone(), token_id); + Ok(()) + } + } + + impl AgentRegistry for Pallet { + fn register(location: Location) -> DispatchResult { + let agent_id = agent_id_of::(&location)?; + ensure!(!Agents::::contains_key(agent_id), Error::::AgentAlreadyCreated); + Agents::::insert(agent_id, ()); + Ok(()) + } + } } diff --git a/bridges/snowbridge/primitives/core/src/lib.rs b/bridges/snowbridge/primitives/core/src/lib.rs index 88ac8124a15b..7a9a8df544d7 100644 --- a/bridges/snowbridge/primitives/core/src/lib.rs +++ b/bridges/snowbridge/primitives/core/src/lib.rs @@ -13,8 +13,10 @@ pub mod location; pub mod operating_mode; pub mod outbound; pub mod pricing; +pub mod registry; pub mod reward; pub mod ringbuffer; +pub mod transact; pub use location::{AgentId, AgentIdOf, TokenId, TokenIdOf}; pub use polkadot_parachain_primitives::primitives::{ @@ -54,6 +56,16 @@ impl Contains for AllowSiblingsOnly { } } +pub struct AllowAnySovereignFromSiblings; +impl Contains for AllowAnySovereignFromSiblings { + fn contains(location: &Location) -> bool { + match (location.parent_count(), location.first_interior()) { + (1, Some(Parachain(..))) => true, + _ => false, + } + } +} + pub fn gwei(x: u128) -> U256 { U256::from(1_000_000_000u128).saturating_mul(x.into()) } diff --git a/bridges/snowbridge/primitives/core/src/registry.rs b/bridges/snowbridge/primitives/core/src/registry.rs new file mode 100644 index 000000000000..f3b87bdbbfac --- /dev/null +++ b/bridges/snowbridge/primitives/core/src/registry.rs @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork + +use frame_support::dispatch::DispatchResult; +use xcm::prelude::Location; + +pub trait TokenRegistry { + fn register(location: Location) -> DispatchResult; +} + +pub trait AgentRegistry { + fn register(location: Location) -> DispatchResult; +} diff --git a/bridges/snowbridge/primitives/core/src/transact.rs b/bridges/snowbridge/primitives/core/src/transact.rs new file mode 100644 index 000000000000..ab18a3d6202d --- /dev/null +++ b/bridges/snowbridge/primitives/core/src/transact.rs @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork + +use crate::{AssetMetadata, Vec}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_core::H160; +use sp_runtime::RuntimeDebug; +use xcm::prelude::Location; + +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct TransactInfo { + pub kind: TransactKind, + pub params: Vec, +} + +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub enum TransactKind { + RegisterToken, + RegisterAgent, + CallContract, +} + +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct RegisterTokenParams { + pub location: Location, + pub metadata: AssetMetadata, +} + +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct CallContractParams { + pub target: H160, + pub call: Vec, + pub gas_limit: u64, + pub value: u128, +} diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs index 65f8063686a0..3319ff6877f8 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs @@ -2,10 +2,12 @@ // SPDX-FileCopyrightText: 2023 Snowfork //! Converts XCM messages into InboundMessage that can be processed by the Gateway contract +use codec::DecodeAll; use core::slice::Iter; use frame_support::{ensure, BoundedVec}; use snowbridge_core::{ outbound::v2::{Command, Message}, + transact::{CallContractParams, RegisterTokenParams, TransactInfo, TransactKind::*}, AgentId, TokenId, TokenIdOf, TokenIdOf as LocationIdOf, }; use sp_core::H160; @@ -35,6 +37,7 @@ pub enum XcmConverterError { TooManyCommands, AliasOriginExpected, InvalidOrigin, + TransactDecodeFailed, } macro_rules! match_expression { @@ -160,6 +163,7 @@ where let mut commands: Vec = Vec::new(); + // ENA transfer commands if let Some(enas) = enas { ensure!(enas.len() > 0, NoReserveAssets); for ena in enas.clone().inner().iter() { @@ -193,6 +197,7 @@ where } } + // PNA transfer commands if let Some(pnas) = pnas { ensure!(pnas.len() > 0, NoReserveAssets); for pna in pnas.clone().inner().iter() { @@ -221,6 +226,35 @@ where } } + // Transact commands + let transact_call = match_expression!(self.peek(), Ok(Transact { call, .. }), call); + if let Some(transact_call) = transact_call { + let _ = self.next(); + let message = + TransactInfo::decode_all(&mut transact_call.clone().into_encoded().as_slice()) + .map_err(|_| TransactDecodeFailed)?; + match message.kind { + RegisterAgent => commands.push(Command::CreateAgent { agent_id: origin }), + RegisterToken => { + let params = RegisterTokenParams::decode_all(&mut message.params.as_slice()) + .map_err(|_| TransactDecodeFailed)?; + let token_id = + TokenIdOf::convert_location(¶ms.location).ok_or(InvalidAsset)?; + commands.push(Command::RegisterForeignToken { + token_id, + name: params.metadata.name.into_inner(), + symbol: params.metadata.symbol.into_inner(), + decimals: params.metadata.decimals, + }); + }, + // Todo: For Transact + CallContract => { + let _ = CallContractParams::decode_all(&mut message.params.as_slice()) + .map_err(|_| TransactDecodeFailed)?; + }, + } + } + // ensure SetTopic exists let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs index 65b887ebcdc0..2c297c171173 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs @@ -14,8 +14,12 @@ // limitations under the License. use crate::imports::*; use bridge_hub_westend_runtime::EthereumInboundQueue; +use frame_support::traits::fungibles::Mutate; use hex_literal::hex; -use snowbridge_core::AssetMetadata; +use snowbridge_core::{ + transact::{TransactInfo, TransactKind}, + AssetMetadata, +}; use snowbridge_router_primitives::inbound::{ v1::{Command, Destination, MessageV1, VersionedMessage}, EthereumLocationsConverterFor, @@ -32,6 +36,40 @@ const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EB const XCM_FEE: u128 = 100_000_000_000; const TOKEN_AMOUNT: u128 = 100_000_000_000; +pub fn fund_sovereign() { + let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); + let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); + BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); +} + +pub fn register_weth() { + let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); + let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); + let weth_asset_location: Location = + (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + AssetHubWestend::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + + assert_ok!(::ForeignAssets::force_create( + RuntimeOrigin::root(), + weth_asset_location.clone().try_into().unwrap(), + assethub_sovereign.clone().into(), + false, + 1, + )); + + assert!(::ForeignAssets::asset_exists( + weth_asset_location.clone().try_into().unwrap(), + )); + + assert_ok!(::ForeignAssets::mint_into( + weth_asset_location.clone().try_into().unwrap(), + &AssetHubWestendReceiver::get(), + TOKEN_AMOUNT, + )); + }); +} + #[test] fn send_weth_from_asset_hub_to_ethereum() { let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); @@ -468,3 +506,88 @@ fn send_weth_and_dot_from_asset_hub_to_ethereum() { ); }); } + +#[test] +fn create_agent() { + let weth_asset_location: Location = + (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + + fund_sovereign(); + + register_weth(); + + BridgeHubWestend::execute_with(|| {}); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + let local_fee_amount = 200_000_000_000; + + let remote_fee_amount = 4_000_000_000; + + let local_fee_asset = + Asset { id: AssetId(Location::parent()), fun: Fungible(local_fee_amount) }; + let remote_fee_asset = + Asset { id: AssetId(weth_asset_location.clone()), fun: Fungible(remote_fee_amount) }; + let reserve_asset = Asset { + id: AssetId(weth_asset_location.clone()), + fun: Fungible(TOKEN_AMOUNT - remote_fee_amount), + }; + let assets = vec![ + Asset { id: weth_asset_location.clone().into(), fun: Fungible(TOKEN_AMOUNT) }, + local_fee_asset.clone(), + ]; + let destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); + + let beneficiary = Location::new( + 0, + [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], + ); + + let transact_info = TransactInfo { kind: TransactKind::RegisterAgent, params: vec![] }; + + let xcms = VersionedXcm::from(Xcm(vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: local_fee_asset.clone() }, + InitiateTransfer { + destination, + remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( + remote_fee_asset.clone().into(), + ))), + preserve_origin: true, + assets: vec![AssetTransferFilter::ReserveWithdraw(Definite( + reserve_asset.clone().into(), + ))], + remote_xcm: Xcm(vec![ + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + Transact { + origin_kind: OriginKind::SovereignAccount, + call: transact_info.encode().into(), + }, + ]), + }, + ])); + + // Send the Weth back to Ethereum + ::PolkadotXcm::execute( + RuntimeOrigin::signed(AssetHubWestendReceiver::get()), + bx!(xcms), + Weight::from(8_000_000_000), + ) + .unwrap(); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + // Check that the transfer token back to Ethereum message was queue in the Ethereum + // Outbound Queue + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] + ); + }); +} + +#[test] +fn transact_with_agent() {} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index 633248ca6efb..ffb75a510cb5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -24,7 +24,7 @@ use crate::{ }; use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; -use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards}; +use snowbridge_core::{gwei, meth, AllowAnySovereignFromSiblings, PricingParameters, Rewards}; use snowbridge_router_primitives::{ inbound::v1::MessageToXcm, outbound::{v1::EthereumBlobExporter, v2::EthereumBlobExporter as EthereumBlobExporterV2}, @@ -207,7 +207,7 @@ impl snowbridge_pallet_ethereum_client::Config for Runtime { impl snowbridge_pallet_system::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OutboundQueue = EthereumOutboundQueue; - type SiblingOrigin = EnsureXcm; + type SiblingOrigin = EnsureXcm; type AgentIdOf = snowbridge_core::AgentIdOf; type TreasuryAccount = TreasuryAccount; type Token = Balances; From 66afb0760b0f8877e2ca2481b659dc13cef6f372 Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 23 Nov 2024 09:22:35 +0800 Subject: [PATCH 22/68] Transact support --- .../primitives/core/src/outbound/v2.rs | 49 +- .../primitives/core/src/transact.rs | 2 +- .../router/src/outbound/v2/convert.rs | 16 +- .../src/tests/snowbridge_v2.rs | 503 +++++++----------- 4 files changed, 240 insertions(+), 330 deletions(-) diff --git a/bridges/snowbridge/primitives/core/src/outbound/v2.rs b/bridges/snowbridge/primitives/core/src/outbound/v2.rs index 59be8767c2f4..e9ff47786363 100644 --- a/bridges/snowbridge/primitives/core/src/outbound/v2.rs +++ b/bridges/snowbridge/primitives/core/src/outbound/v2.rs @@ -12,10 +12,10 @@ use sp_core::{RuntimeDebug, H160, H256}; use sp_std::{vec, vec::Vec}; use crate::outbound::v2::abi::{ - CreateAgentParams, MintForeignTokenParams, RegisterForeignTokenParams, SetOperatingModeParams, + CallContractParams, MintForeignTokenParams, RegisterForeignTokenParams, SetOperatingModeParams, UnlockNativeTokenParams, UpgradeParams, }; -use alloy_primitives::{Address, FixedBytes}; +use alloy_primitives::{Address, FixedBytes, U256}; use alloy_sol_types::SolValue; pub mod abi { @@ -54,12 +54,6 @@ pub mod abi { bytes initParams; } - // Payload for CreateAgent - struct CreateAgentParams { - /// @dev The agent ID of the consensus system - bytes32 agentID; - } - // Payload for SetOperatingMode instruction struct SetOperatingModeParams { /// The new operating mode @@ -97,6 +91,16 @@ pub mod abi { // Amount to mint uint128 amount; } + + // Payload for CallContract + struct CallContractParams { + // target contract + address target; + // Call data + bytes data; + // Ether value + uint256 value; + } } #[derive(Encode, Decode, TypeInfo, PartialEq, Clone, RuntimeDebug)] @@ -138,11 +142,7 @@ pub enum Command { initializer: Option, }, /// Create an agent representing a consensus system on Polkadot - CreateAgent { - /// The ID of the agent, derived from the `MultiLocation` of the consensus system on - /// Polkadot - agent_id: H256, - }, + CreateAgent {}, /// Set the global operating mode of the Gateway contract SetOperatingMode { /// The new operating mode @@ -179,6 +179,17 @@ pub enum Command { /// The amount of tokens to mint amount: u128, }, + /// Call Contract on Ethereum + CallContract { + /// Target contract address + target: H160, + /// The call data to the contract + data: Vec, + /// The dynamic gas component that needs to be specified when executing the contract + gas_limit: u64, + /// Ether Value(require to prefund the agent first) + value: u128, + }, } impl Command { @@ -191,6 +202,7 @@ impl Command { Command::RegisterForeignToken { .. } => 3, Command::MintForeignToken { .. } => 4, Command::CreateAgent { .. } => 5, + Command::CallContract { .. } => 6, } } @@ -203,9 +215,7 @@ impl Command { initParams: initializer.clone().map_or(vec![], |i| i.params), } .abi_encode(), - Command::CreateAgent { agent_id } => - CreateAgentParams { agentID: FixedBytes::from(agent_id.as_fixed_bytes()) } - .abi_encode(), + Command::CreateAgent {} => vec![], Command::SetOperatingMode { mode } => SetOperatingModeParams { mode: (*mode) as u8 }.abi_encode(), Command::UnlockNativeToken { token, recipient, amount, .. } => @@ -229,6 +239,12 @@ impl Command { amount: *amount, } .abi_encode(), + Command::CallContract { target, data, value, .. } => CallContractParams { + target: Address::from(target.as_fixed_bytes()), + data: data.to_vec(), + value: U256::try_from(*value).unwrap(), + } + .abi_encode(), } } } @@ -290,6 +306,7 @@ impl GasMeter for ConstantGasMeter { Command::UnlockNativeToken { .. } => 100_000, Command::RegisterForeignToken { .. } => 1_200_000, Command::MintForeignToken { .. } => 100_000, + Command::CallContract { gas_limit, .. } => *gas_limit, } } } diff --git a/bridges/snowbridge/primitives/core/src/transact.rs b/bridges/snowbridge/primitives/core/src/transact.rs index ab18a3d6202d..0dc77555ae45 100644 --- a/bridges/snowbridge/primitives/core/src/transact.rs +++ b/bridges/snowbridge/primitives/core/src/transact.rs @@ -30,7 +30,7 @@ pub struct RegisterTokenParams { #[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] pub struct CallContractParams { pub target: H160, - pub call: Vec, + pub data: Vec, pub gas_limit: u64, pub value: u128, } diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs index 3319ff6877f8..69ee69991e9f 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs @@ -165,7 +165,6 @@ where // ENA transfer commands if let Some(enas) = enas { - ensure!(enas.len() > 0, NoReserveAssets); for ena in enas.clone().inner().iter() { // Check the the deposit asset filter matches what was reserved. if !deposit_assets.matches(ena) { @@ -234,7 +233,7 @@ where TransactInfo::decode_all(&mut transact_call.clone().into_encoded().as_slice()) .map_err(|_| TransactDecodeFailed)?; match message.kind { - RegisterAgent => commands.push(Command::CreateAgent { agent_id: origin }), + RegisterAgent => commands.push(Command::CreateAgent {}), RegisterToken => { let params = RegisterTokenParams::decode_all(&mut message.params.as_slice()) .map_err(|_| TransactDecodeFailed)?; @@ -247,10 +246,19 @@ where decimals: params.metadata.decimals, }); }, - // Todo: For Transact CallContract => { - let _ = CallContractParams::decode_all(&mut message.params.as_slice()) + let params = CallContractParams::decode_all(&mut message.params.as_slice()) .map_err(|_| TransactDecodeFailed)?; + if params.value > 0 { + //Todo: Ensure amount of WETH deposit to the agent in same message can + // cover the value here + } + commands.push(Command::CallContract { + target: params.target, + data: params.data, + gas_limit: params.gas_limit, + value: params.value, + }); }, } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs index 2c297c171173..a610ddc70bf9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs @@ -13,28 +13,46 @@ // See the License for the specific language governing permissions and // limitations under the License. use crate::imports::*; -use bridge_hub_westend_runtime::EthereumInboundQueue; use frame_support::traits::fungibles::Mutate; use hex_literal::hex; use snowbridge_core::{ - transact::{TransactInfo, TransactKind}, + transact::{CallContractParams, TransactInfo, TransactKind}, AssetMetadata, }; -use snowbridge_router_primitives::inbound::{ - v1::{Command, Destination, MessageV1, VersionedMessage}, - EthereumLocationsConverterFor, -}; +use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; use sp_runtime::MultiAddress; use testnet_parachains_constants::westend::snowbridge::EthereumNetwork; use xcm::v5::AssetTransferFilter; use xcm_executor::traits::ConvertLocation; -const INITIAL_FUND: u128 = 5_000_000_000_000; +const INITIAL_FUND: u128 = 50_000_000_000_000; pub const CHAIN_ID: u64 = 11155111; pub const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); -const XCM_FEE: u128 = 100_000_000_000; +const AGENT_ADDRESS: [u8; 20] = hex!("90A987B944Cb1dCcE5564e5FDeCD7a54D3de27Fe"); const TOKEN_AMOUNT: u128 = 100_000_000_000; +const REMOTE_FEE_AMOUNT_IN_WETH: u128 = 4_000_000_000; +const LOCAL_FEE_AMOUNT_IN_DOT: u128 = 200_000_000_000; + +const EXECUTION_WEIGHT: u64 = 8_000_000_000; + +pub fn weth_location() -> Location { + Location::new( + 2, + [ + GlobalConsensus(Ethereum { chain_id: CHAIN_ID }), + AccountKey20 { network: None, key: WETH }, + ], + ) +} + +pub fn destination() -> Location { + Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]) +} + +pub fn beneficiary() -> Location { + Location::new(0, [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }]) +} pub fn fund_sovereign() { let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); @@ -45,121 +63,91 @@ pub fn fund_sovereign() { pub fn register_weth() { let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); - let weth_asset_location: Location = - (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); AssetHubWestend::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; assert_ok!(::ForeignAssets::force_create( RuntimeOrigin::root(), - weth_asset_location.clone().try_into().unwrap(), + weth_location().try_into().unwrap(), assethub_sovereign.clone().into(), false, 1, )); assert!(::ForeignAssets::asset_exists( - weth_asset_location.clone().try_into().unwrap(), + weth_location().try_into().unwrap(), )); assert_ok!(::ForeignAssets::mint_into( - weth_asset_location.clone().try_into().unwrap(), + weth_location().try_into().unwrap(), &AssetHubWestendReceiver::get(), TOKEN_AMOUNT, )); - }); -} - -#[test] -fn send_weth_from_asset_hub_to_ethereum() { - let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); - let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); - let weth_asset_location: Location = - (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); - - BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); - - AssetHubWestend::execute_with(|| { - type RuntimeOrigin = ::RuntimeOrigin; - - assert_ok!(::ForeignAssets::force_create( - RuntimeOrigin::root(), - weth_asset_location.clone().try_into().unwrap(), - assethub_sovereign.clone().into(), - false, - 1, - )); - assert!(::ForeignAssets::asset_exists( - weth_asset_location.clone().try_into().unwrap(), + assert_ok!(::ForeignAssets::mint_into( + weth_location().try_into().unwrap(), + &AssetHubWestendSender::get(), + TOKEN_AMOUNT, )); }); - +} +pub fn register_relay_token() { BridgeHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; - let message = VersionedMessage::V1(MessageV1 { - chain_id: CHAIN_ID, - command: Command::SendToken { - token: WETH.into(), - destination: Destination::AccountId32 { id: AssetHubWestendReceiver::get().into() }, - amount: TOKEN_AMOUNT, - fee: XCM_FEE, + // Register WND on BH + assert_ok!(::Balances::force_set_balance( + RuntimeOrigin::root(), + MultiAddress::Id(BridgeHubWestendSender::get()), + INITIAL_FUND, + )); + assert_ok!(::EthereumSystem::register_token( + RuntimeOrigin::root(), + Box::new(VersionedLocation::from(Location::parent())), + AssetMetadata { + name: "wnd".as_bytes().to_vec().try_into().unwrap(), + symbol: "wnd".as_bytes().to_vec().try_into().unwrap(), + decimals: 12, }, - }); - let (xcm, _) = EthereumInboundQueue::do_convert([0; 32].into(), message).unwrap(); - let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()).unwrap(); - - // Check that the send token message was sent using xcm + )); assert_expected_events!( BridgeHubWestend, - vec![RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) =>{},] + vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::RegisterToken { .. }) => {},] ); }); +} + +#[test] +fn send_weth_from_asset_hub_to_ethereum() { + fund_sovereign(); + + register_weth(); AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; type RuntimeOrigin = ::RuntimeOrigin; - // Check that AssetHub has issued the foreign asset - assert_expected_events!( - AssetHubWestend, - vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},] - ); - - // Local fee amount(in DOT) should cover - // 1. execution cost on AH - // 2. delivery cost to BH - // 3. execution cost on BH - let local_fee_amount = 200_000_000_000; - // Remote fee amount(in WETH) should cover execution cost on Ethereum - let remote_fee_amount = 4_000_000_000; let local_fee_asset = - Asset { id: AssetId(Location::parent()), fun: Fungible(local_fee_amount) }; + Asset { id: AssetId(Location::parent()), fun: Fungible(LOCAL_FEE_AMOUNT_IN_DOT) }; + let remote_fee_asset = - Asset { id: AssetId(weth_asset_location.clone()), fun: Fungible(remote_fee_amount) }; + Asset { id: AssetId(weth_location()), fun: Fungible(REMOTE_FEE_AMOUNT_IN_WETH) }; + let reserve_asset = Asset { - id: AssetId(weth_asset_location.clone()), - fun: Fungible(TOKEN_AMOUNT - remote_fee_amount), + id: AssetId(weth_location()), + fun: Fungible(TOKEN_AMOUNT - REMOTE_FEE_AMOUNT_IN_WETH), }; + let assets = vec![ - Asset { id: weth_asset_location.clone().into(), fun: Fungible(TOKEN_AMOUNT) }, + Asset { id: weth_location().into(), fun: Fungible(TOKEN_AMOUNT) }, local_fee_asset.clone(), ]; - let destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); - - let beneficiary = Location::new( - 0, - [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], - ); - - let xcm_on_bh = Xcm(vec![DepositAsset { assets: Wild(AllCounted(2)), beneficiary }]); - let xcms = VersionedXcm::from(Xcm(vec![ + let xcm = VersionedXcm::from(Xcm(vec![ WithdrawAsset(assets.clone().into()), PayFees { asset: local_fee_asset.clone() }, InitiateTransfer { - destination, + destination: destination(), remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( remote_fee_asset.clone().into(), ))), @@ -167,147 +155,68 @@ fn send_weth_from_asset_hub_to_ethereum() { assets: vec![AssetTransferFilter::ReserveWithdraw(Definite( reserve_asset.clone().into(), ))], - remote_xcm: xcm_on_bh, + remote_xcm: Xcm(vec![DepositAsset { + assets: Wild(AllCounted(2)), + beneficiary: beneficiary(), + }]), }, ])); // Send the Weth back to Ethereum ::PolkadotXcm::execute( RuntimeOrigin::signed(AssetHubWestendReceiver::get()), - bx!(xcms), - Weight::from(8_000_000_000), + bx!(xcm), + Weight::from(EXECUTION_WEIGHT), ) .unwrap(); }); BridgeHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - // Check that the transfer token back to Ethereum message was queue in the Ethereum - // Outbound Queue + // Check that the Ethereum message was queue in the Outbound Queue assert_expected_events!( BridgeHubWestend, vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] ); - let events = BridgeHubWestend::events(); - // Check that the remote fee was credited to the AssetHub sovereign account - assert!( - events.iter().any(|event| matches!( - event, - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) - if *who == assethub_sovereign - )), - "AssetHub sovereign takes remote fee." - ); }); } #[test] fn transfer_relay_token() { - let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of( - BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()), - ); - BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); - - let asset_id: Location = Location { parents: 1, interior: [].into() }; - let _expected_asset_id: Location = Location { - parents: 1, - interior: [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))].into(), - }; - let ethereum_sovereign: AccountId = - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&Location::new( - 2, - [GlobalConsensus(EthereumNetwork::get())], - )) - .unwrap() - .into(); - - // Register token - BridgeHubWestend::execute_with(|| { - type RuntimeOrigin = ::RuntimeOrigin; - type RuntimeEvent = ::RuntimeEvent; + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&destination()) + .unwrap() + .into(); - assert_ok!(::Balances::force_set_balance( - RuntimeOrigin::root(), - sp_runtime::MultiAddress::Id(BridgeHubWestendSender::get()), - INITIAL_FUND * 10, - )); + fund_sovereign(); - assert_ok!(::EthereumSystem::register_token( - RuntimeOrigin::root(), - Box::new(VersionedLocation::from(asset_id.clone())), - AssetMetadata { - name: "wnd".as_bytes().to_vec().try_into().unwrap(), - symbol: "wnd".as_bytes().to_vec().try_into().unwrap(), - decimals: 12, - }, - )); - // Check that a message was sent to Ethereum to create the agent - assert_expected_events!( - BridgeHubWestend, - vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::RegisterToken { .. }) => {},] - ); - }); + register_weth(); + + register_relay_token(); // Send token to Ethereum AssetHubWestend::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; type RuntimeEvent = ::RuntimeEvent; - let weth_asset_location: Location = - (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }) - .into(); - - assert_ok!(::ForeignAssets::force_create( - RuntimeOrigin::root(), - weth_asset_location.clone().try_into().unwrap(), - assethub_sovereign.clone().into(), - false, - 1, - )); - - assert_ok!(::ForeignAssets::mint( - RuntimeOrigin::signed(assethub_sovereign.clone().into()), - weth_asset_location.clone().try_into().unwrap(), - MultiAddress::Id(AssetHubWestendSender::get()), - TOKEN_AMOUNT, - )); - - // Local fee amount(in DOT) should cover - // 1. execution cost on AH - // 2. delivery cost to BH - // 3. execution cost on BH - let local_fee_amount = 200_000_000_000; - // Remote fee amount(in WETH) should cover execution cost on Ethereum - let remote_fee_amount = 4_000_000_000; - let local_fee_asset = - Asset { id: AssetId(Location::parent()), fun: Fungible(local_fee_amount) }; + Asset { id: AssetId(Location::parent()), fun: Fungible(LOCAL_FEE_AMOUNT_IN_DOT) }; let remote_fee_asset = - Asset { id: AssetId(weth_asset_location.clone()), fun: Fungible(remote_fee_amount) }; + Asset { id: AssetId(weth_location()), fun: Fungible(REMOTE_FEE_AMOUNT_IN_WETH) }; let assets = vec![ Asset { id: AssetId(Location::parent()), - fun: Fungible(TOKEN_AMOUNT + local_fee_amount), + fun: Fungible(TOKEN_AMOUNT + LOCAL_FEE_AMOUNT_IN_DOT), }, remote_fee_asset.clone(), ]; - let destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); - - let beneficiary = Location::new( - 0, - [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], - ); - - let xcm_on_bh = Xcm(vec![DepositAsset { assets: Wild(AllCounted(2)), beneficiary }]); - - let xcms = VersionedXcm::from(Xcm(vec![ + let xcm = VersionedXcm::from(Xcm(vec![ WithdrawAsset(assets.clone().into()), PayFees { asset: local_fee_asset.clone() }, InitiateTransfer { - destination, + destination: destination(), remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( remote_fee_asset.clone().into(), ))), @@ -315,15 +224,18 @@ fn transfer_relay_token() { assets: vec![AssetTransferFilter::ReserveDeposit(Definite( Asset { id: AssetId(Location::parent()), fun: Fungible(TOKEN_AMOUNT) }.into(), ))], - remote_xcm: xcm_on_bh, + remote_xcm: Xcm(vec![DepositAsset { + assets: Wild(AllCounted(2)), + beneficiary: beneficiary(), + }]), }, ])); // Send DOT to Ethereum ::PolkadotXcm::execute( RuntimeOrigin::signed(AssetHubWestendSender::get()), - bx!(xcms), - Weight::from(8_000_000_000), + bx!(xcm), + Weight::from(EXECUTION_WEIGHT), ) .unwrap(); @@ -342,8 +254,7 @@ fn transfer_relay_token() { BridgeHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - // Check that the transfer token back to Ethereum message was queue in the Ethereum - // Outbound Queue + // Check that the Ethereum message was queue in the Outbound Queue assert_expected_events!( BridgeHubWestend, vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] @@ -353,127 +264,113 @@ fn transfer_relay_token() { #[test] fn send_weth_and_dot_from_asset_hub_to_ethereum() { - let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); - let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); - let weth_asset_location: Location = - (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + fund_sovereign(); - BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); + register_weth(); + + register_relay_token(); AssetHubWestend::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; - // Register WETH on AH - assert_ok!(::ForeignAssets::force_create( - RuntimeOrigin::root(), - weth_asset_location.clone().try_into().unwrap(), - assethub_sovereign.clone().into(), - false, - 1, - )); + let local_fee_asset = + Asset { id: AssetId(Location::parent()), fun: Fungible(LOCAL_FEE_AMOUNT_IN_DOT) }; + let remote_fee_asset = + Asset { id: AssetId(weth_location()), fun: Fungible(REMOTE_FEE_AMOUNT_IN_WETH) }; - assert!(::ForeignAssets::asset_exists( - weth_asset_location.clone().try_into().unwrap(), - )); - }); + let reserve_asset = Asset { + id: AssetId(weth_location()), + fun: Fungible(TOKEN_AMOUNT - REMOTE_FEE_AMOUNT_IN_WETH), + }; - BridgeHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - type RuntimeOrigin = ::RuntimeOrigin; + let weth_asset = Asset { id: weth_location().into(), fun: Fungible(TOKEN_AMOUNT) }; - // Register WND on BH - assert_ok!(::Balances::force_set_balance( - RuntimeOrigin::root(), - MultiAddress::Id(BridgeHubWestendSender::get()), - INITIAL_FUND * 10, - )); - assert_ok!(::EthereumSystem::register_token( - RuntimeOrigin::root(), - Box::new(VersionedLocation::from(Location::parent())), - AssetMetadata { - name: "wnd".as_bytes().to_vec().try_into().unwrap(), - symbol: "wnd".as_bytes().to_vec().try_into().unwrap(), - decimals: 12, - }, - )); - assert_expected_events!( - BridgeHubWestend, - vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::RegisterToken { .. }) => {},] - ); + let dot_asset = Asset { id: AssetId(Location::parent()), fun: Fungible(TOKEN_AMOUNT) }; + + let assets = vec![weth_asset, dot_asset.clone(), local_fee_asset.clone()]; - // Transfer some WETH to AH - let message = VersionedMessage::V1(MessageV1 { - chain_id: CHAIN_ID, - command: Command::SendToken { - token: WETH.into(), - destination: Destination::AccountId32 { id: AssetHubWestendReceiver::get().into() }, - amount: TOKEN_AMOUNT, - fee: XCM_FEE, + let xcms = VersionedXcm::from(Xcm(vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: local_fee_asset.clone() }, + InitiateTransfer { + destination: destination(), + remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( + remote_fee_asset.clone().into(), + ))), + preserve_origin: true, + assets: vec![ + AssetTransferFilter::ReserveWithdraw(Definite(reserve_asset.clone().into())), + AssetTransferFilter::ReserveDeposit(Definite(dot_asset.into())), + ], + remote_xcm: Xcm(vec![DepositAsset { + assets: Wild(All), + beneficiary: beneficiary(), + }]), }, - }); - let (xcm, _) = EthereumInboundQueue::do_convert([0; 32].into(), message).unwrap(); - let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()).unwrap(); + ])); + + ::PolkadotXcm::execute( + RuntimeOrigin::signed(AssetHubWestendReceiver::get()), + bx!(xcms), + Weight::from(EXECUTION_WEIGHT), + ) + .unwrap(); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + // Check that Ethereum message was queue in the Outbound Queue assert_expected_events!( BridgeHubWestend, - vec![RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) =>{},] + vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] ); }); +} - AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - type RuntimeOrigin = ::RuntimeOrigin; +#[test] +fn create_agent() { + fund_sovereign(); - // Check that AssetHub has issued the foreign asset - assert_expected_events!( - AssetHubWestend, - vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},] - ); + register_weth(); - // Local fee amount(in DOT) should cover - // 1. execution cost on AH - // 2. delivery cost to BH - // 3. execution cost on BH - let local_fee_amount = 200_000_000_000; - // Remote fee amount(in WETH) should cover execution cost on Ethereum - let remote_fee_amount = 4_000_000_000; + BridgeHubWestend::execute_with(|| {}); - let local_fee_asset = - Asset { id: AssetId(Location::parent()), fun: Fungible(local_fee_amount) }; - let remote_fee_asset = - Asset { id: AssetId(weth_asset_location.clone()), fun: Fungible(remote_fee_amount) }; - let reserve_asset = Asset { - id: AssetId(weth_asset_location.clone()), - fun: Fungible(TOKEN_AMOUNT - remote_fee_amount), - }; + AssetHubWestend::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; - let weth_asset = - Asset { id: weth_asset_location.clone().into(), fun: Fungible(TOKEN_AMOUNT) }; - let dot_asset = Asset { id: AssetId(Location::parent()), fun: Fungible(TOKEN_AMOUNT) }; + let local_fee_asset = + Asset { id: AssetId(Location::parent()), fun: Fungible(LOCAL_FEE_AMOUNT_IN_DOT) }; - let assets = vec![weth_asset, dot_asset.clone(), local_fee_asset.clone()]; - let destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); + // All WETH as fee and reserve_asset is zero, so there is no transfer in this case + let remote_fee_asset = Asset { id: AssetId(weth_location()), fun: Fungible(TOKEN_AMOUNT) }; + let reserve_asset = Asset { id: AssetId(weth_location()), fun: Fungible(0) }; - let beneficiary = Location::new( - 0, - [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], - ); + let assets = vec![ + Asset { id: weth_location().into(), fun: Fungible(TOKEN_AMOUNT) }, + local_fee_asset.clone(), + ]; - let xcm_on_bh = Xcm(vec![DepositAsset { assets: Wild(All), beneficiary }]); + let transact_info = TransactInfo { kind: TransactKind::RegisterAgent, params: vec![] }; let xcms = VersionedXcm::from(Xcm(vec![ WithdrawAsset(assets.clone().into()), PayFees { asset: local_fee_asset.clone() }, InitiateTransfer { - destination, + destination: destination(), remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( remote_fee_asset.clone().into(), ))), preserve_origin: true, - assets: vec![ - AssetTransferFilter::ReserveWithdraw(Definite(reserve_asset.clone().into())), - AssetTransferFilter::ReserveDeposit(Definite(dot_asset.into())), - ], - remote_xcm: xcm_on_bh, + assets: vec![AssetTransferFilter::ReserveWithdraw(Definite( + reserve_asset.clone().into(), + ))], + remote_xcm: Xcm(vec![ + DepositAsset { assets: Wild(AllCounted(2)), beneficiary: beneficiary() }, + Transact { + origin_kind: OriginKind::SovereignAccount, + call: transact_info.encode().into(), + }, + ]), }, ])); @@ -488,27 +385,16 @@ fn send_weth_and_dot_from_asset_hub_to_ethereum() { BridgeHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - // Check that the transfer token back to Ethereum message was queue in the Ethereum - // Outbound Queue + // Check that Ethereum message was queue in the Outbound Queue assert_expected_events!( BridgeHubWestend, vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] ); - let events = BridgeHubWestend::events(); - // Check that the remote fee was credited to the AssetHub sovereign account - assert!( - events.iter().any(|event| matches!( - event, - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) - if *who == assethub_sovereign - )), - "AssetHub sovereign takes remote fee." - ); }); } #[test] -fn create_agent() { +fn transact_with_agent() { let weth_asset_location: Location = (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); @@ -519,39 +405,43 @@ fn create_agent() { BridgeHubWestend::execute_with(|| {}); AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; type RuntimeOrigin = ::RuntimeOrigin; - let local_fee_amount = 200_000_000_000; - - let remote_fee_amount = 4_000_000_000; - let local_fee_asset = - Asset { id: AssetId(Location::parent()), fun: Fungible(local_fee_amount) }; - let remote_fee_asset = - Asset { id: AssetId(weth_asset_location.clone()), fun: Fungible(remote_fee_amount) }; + Asset { id: AssetId(Location::parent()), fun: Fungible(LOCAL_FEE_AMOUNT_IN_DOT) }; + + let remote_fee_asset = Asset { + id: AssetId(weth_asset_location.clone()), + fun: Fungible(REMOTE_FEE_AMOUNT_IN_WETH), + }; let reserve_asset = Asset { id: AssetId(weth_asset_location.clone()), - fun: Fungible(TOKEN_AMOUNT - remote_fee_amount), + fun: Fungible(TOKEN_AMOUNT - REMOTE_FEE_AMOUNT_IN_WETH), }; + let assets = vec![ Asset { id: weth_asset_location.clone().into(), fun: Fungible(TOKEN_AMOUNT) }, local_fee_asset.clone(), ]; - let destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); - let beneficiary = Location::new( - 0, - [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], - ); + let beneficiary = + Location::new(0, [AccountKey20 { network: None, key: AGENT_ADDRESS.into() }]); - let transact_info = TransactInfo { kind: TransactKind::RegisterAgent, params: vec![] }; + let call_params = CallContractParams { + target: Default::default(), + data: vec![], + gas_limit: 40000, + // value should be less than the transfer amount, require validation on BH Exporter + value: 4 * (TOKEN_AMOUNT - REMOTE_FEE_AMOUNT_IN_WETH) / 5, + }; + let transact_info = + TransactInfo { kind: TransactKind::CallContract, params: call_params.encode() }; let xcms = VersionedXcm::from(Xcm(vec![ WithdrawAsset(assets.clone().into()), PayFees { asset: local_fee_asset.clone() }, InitiateTransfer { - destination, + destination: destination(), remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( remote_fee_asset.clone().into(), ))), @@ -569,25 +459,20 @@ fn create_agent() { }, ])); - // Send the Weth back to Ethereum ::PolkadotXcm::execute( RuntimeOrigin::signed(AssetHubWestendReceiver::get()), bx!(xcms), - Weight::from(8_000_000_000), + Weight::from(EXECUTION_WEIGHT), ) .unwrap(); }); BridgeHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - // Check that the transfer token back to Ethereum message was queue in the Ethereum - // Outbound Queue + // Check that Ethereum message was queue in the Outbound Queue assert_expected_events!( BridgeHubWestend, vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] ); }); } - -#[test] -fn transact_with_agent() {} From 78da35b9388654dd47ae3c743e06f0ab5da14d59 Mon Sep 17 00:00:00 2001 From: ron Date: Sun, 24 Nov 2024 10:31:23 +0800 Subject: [PATCH 23/68] Remove irrelevant changes --- .../pallets/system/src/benchmarking.rs | 65 ++++- bridges/snowbridge/pallets/system/src/lib.rs | 246 +++++++++++++++--- bridges/snowbridge/primitives/core/src/lib.rs | 10 - .../src/bridge_to_ethereum_config.rs | 4 +- 4 files changed, 280 insertions(+), 45 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/benchmarking.rs b/bridges/snowbridge/pallets/system/src/benchmarking.rs index e4cb079419d0..939de9d40d13 100644 --- a/bridges/snowbridge/pallets/system/src/benchmarking.rs +++ b/bridges/snowbridge/pallets/system/src/benchmarking.rs @@ -60,12 +60,74 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn create_agent() -> Result<(), BenchmarkError> { + let origin_para_id = 2000; + let origin_location = Location::new(1, [Parachain(origin_para_id)]); + let origin = T::Helper::make_xcm_origin(origin_location); + fund_sovereign_account::(origin_para_id.into())?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin); + + Ok(()) + } + + #[benchmark] + fn create_channel() -> Result<(), BenchmarkError> { + let origin_para_id = 2000; + let origin_location = Location::new(1, [Parachain(origin_para_id)]); + let origin = T::Helper::make_xcm_origin(origin_location); + fund_sovereign_account::(origin_para_id.into())?; + + SnowbridgeControl::::create_agent(origin.clone())?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, OperatingMode::Normal); + + Ok(()) + } + + #[benchmark] + fn update_channel() -> Result<(), BenchmarkError> { + let origin_para_id = 2000; + let origin_location = Location::new(1, [Parachain(origin_para_id)]); + let origin = T::Helper::make_xcm_origin(origin_location); + fund_sovereign_account::(origin_para_id.into())?; + SnowbridgeControl::::create_agent(origin.clone())?; + SnowbridgeControl::::create_channel(origin.clone(), OperatingMode::Normal)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, OperatingMode::RejectingOutboundMessages); + + Ok(()) + } + + #[benchmark] + fn force_update_channel() -> Result<(), BenchmarkError> { + let origin_para_id = 2000; + let origin_location = Location::new(1, [Parachain(origin_para_id)]); + let origin = T::Helper::make_xcm_origin(origin_location); + let channel_id: ChannelId = ParaId::from(origin_para_id).into(); + + fund_sovereign_account::(origin_para_id.into())?; + SnowbridgeControl::::create_agent(origin.clone())?; + SnowbridgeControl::::create_channel(origin.clone(), OperatingMode::Normal)?; + + #[extrinsic_call] + _(RawOrigin::Root, channel_id, OperatingMode::RejectingOutboundMessages); + + Ok(()) + } + #[benchmark] fn transfer_native_from_agent() -> Result<(), BenchmarkError> { let origin_para_id = 2000; let origin_location = Location::new(1, [Parachain(origin_para_id)]); let origin = T::Helper::make_xcm_origin(origin_location); fund_sovereign_account::(origin_para_id.into())?; + SnowbridgeControl::::create_agent(origin.clone())?; + SnowbridgeControl::::create_channel(origin.clone(), OperatingMode::Normal)?; #[extrinsic_call] _(origin as T::RuntimeOrigin, H160::default(), 1); @@ -77,8 +139,9 @@ mod benchmarks { fn force_transfer_native_from_agent() -> Result<(), BenchmarkError> { let origin_para_id = 2000; let origin_location = Location::new(1, [Parachain(origin_para_id)]); - let _origin = T::Helper::make_xcm_origin(origin_location.clone()); + let origin = T::Helper::make_xcm_origin(origin_location.clone()); fund_sovereign_account::(origin_para_id.into())?; + SnowbridgeControl::::create_agent(origin.clone())?; let versioned_location: VersionedLocation = origin_location.into(); diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index da927061da65..8a5b0a6edbf9 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -15,16 +15,26 @@ //! The `create_agent` extrinsic should be called via an XCM `Transact` instruction from the sibling //! parachain. //! +//! ## Channels +//! +//! Each sibling parachain has its own dedicated messaging channel for sending and receiving +//! messages. As a prerequisite to creating a channel, the sibling should have already created +//! an agent using the `create_agent` extrinsic. +//! +//! * [`Call::create_channel`]: Create channel for a sibling +//! * [`Call::update_channel`]: Update a channel for a sibling +//! //! ## Governance //! //! Only Polkadot governance itself can call these extrinsics. Delivery fees are waived. //! //! * [`Call::upgrade`]`: Upgrade the gateway contract //! * [`Call::set_operating_mode`]: Update the operating mode of the gateway contract +//! * [`Call::force_update_channel`]: Allow root to update a channel for a sibling //! * [`Call::force_transfer_native_from_agent`]: Allow root to withdraw ether from an agent //! //! Typically, Polkadot governance will use the `force_transfer_native_from_agent` and -//! `force_update_channel` and extrinsics to manage agents for system parachains. +//! `force_update_channel` and extrinsics to manage agents and channels for system parachains. //! //! ## Polkadot-native tokens on Ethereum //! @@ -63,7 +73,6 @@ use snowbridge_core::{ v2::{Command as CommandV2, Message as MessageV2, SendMessage as SendMessageV2}, OperatingMode, SendError, }, - registry::{AgentRegistry, TokenRegistry}, sibling_sovereign_account, AgentId, AssetMetadata, Channel, ChannelId, ParaId, PricingParameters as PricingParametersRecord, TokenId, TokenIdOf, PRIMARY_GOVERNANCE_CHANNEL, SECONDARY_GOVERNANCE_CHANNEL, @@ -93,8 +102,8 @@ fn ensure_sibling(location: &Location) -> Result<(ParaId, H256), DispatchErro where T: Config, { - match (location.parents, location.first_interior()) { - (1, Some(Parachain(para_id))) => { + match location.unpack() { + (1, [Parachain(para_id)]) => { let agent_id = agent_id_of::(location)?; Ok(((*para_id).into(), agent_id)) }, @@ -132,7 +141,7 @@ where #[frame_support::pallet] pub mod pallet { use frame_support::dispatch::PostDispatchInfo; - use snowbridge_core::StaticLookup; + use snowbridge_core::{outbound::v2::second_governance_origin, StaticLookup}; use sp_core::U256; use super::*; @@ -246,7 +255,6 @@ pub mod pallet { InvalidTokenTransferFees, InvalidPricingParameters, InvalidUpgradeParameters, - TokenAlreadyCreated, } /// The set of registered agents @@ -374,6 +382,126 @@ pub mod pallet { Ok(()) } + /// Sends a command to the Gateway contract to instantiate a new agent contract representing + /// `origin`. + /// + /// Fee required: Yes + /// + /// - `origin`: Must be `Location` of a sibling parachain + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::create_agent())] + pub fn create_agent(origin: OriginFor) -> DispatchResult { + let origin_location: Location = T::SiblingOrigin::ensure_origin(origin)?; + + // Ensure that origin location is some consensus system on a sibling parachain + let (para_id, agent_id) = ensure_sibling::(&origin_location)?; + + // Record the agent id or fail if it has already been created + ensure!(!Agents::::contains_key(agent_id), Error::::AgentAlreadyCreated); + Agents::::insert(agent_id, ()); + + let command = Command::CreateAgent { agent_id }; + let pays_fee = PaysFee::::Yes(sibling_sovereign_account::(para_id)); + Self::send(SECONDARY_GOVERNANCE_CHANNEL, command, pays_fee)?; + + Self::deposit_event(Event::::CreateAgent { + location: Box::new(origin_location), + agent_id, + }); + Ok(()) + } + + /// Sends a message to the Gateway contract to create a new channel representing `origin` + /// + /// Fee required: Yes + /// + /// This extrinsic is permissionless, so a fee is charged to prevent spamming and pay + /// for execution costs on the remote side. + /// + /// The message is sent over the bridge on BridgeHub's own channel to the Gateway. + /// + /// - `origin`: Must be `Location` + /// - `mode`: Initial operating mode of the channel + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::create_channel())] + pub fn create_channel(origin: OriginFor, mode: OperatingMode) -> DispatchResult { + let origin_location: Location = T::SiblingOrigin::ensure_origin(origin)?; + + // Ensure that origin location is a sibling parachain + let (para_id, agent_id) = ensure_sibling::(&origin_location)?; + + let channel_id: ChannelId = para_id.into(); + + ensure!(Agents::::contains_key(agent_id), Error::::NoAgent); + ensure!(!Channels::::contains_key(channel_id), Error::::ChannelAlreadyCreated); + + let channel = Channel { agent_id, para_id }; + Channels::::insert(channel_id, channel); + + let command = Command::CreateChannel { channel_id, agent_id, mode }; + let pays_fee = PaysFee::::Yes(sibling_sovereign_account::(para_id)); + Self::send(SECONDARY_GOVERNANCE_CHANNEL, command, pays_fee)?; + + Self::deposit_event(Event::::CreateChannel { channel_id, agent_id }); + Ok(()) + } + + /// Sends a message to the Gateway contract to update a channel configuration + /// + /// The origin must already have a channel initialized, as this message is sent over it. + /// + /// A partial fee will be charged for local processing only. + /// + /// - `origin`: Must be `Location` + /// - `mode`: Initial operating mode of the channel + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::update_channel())] + pub fn update_channel(origin: OriginFor, mode: OperatingMode) -> DispatchResult { + let origin_location: Location = T::SiblingOrigin::ensure_origin(origin)?; + + // Ensure that origin location is a sibling parachain + let (para_id, _) = ensure_sibling::(&origin_location)?; + + let channel_id: ChannelId = para_id.into(); + + ensure!(Channels::::contains_key(channel_id), Error::::NoChannel); + + let command = Command::UpdateChannel { channel_id, mode }; + let pays_fee = PaysFee::::Partial(sibling_sovereign_account::(para_id)); + + // Parachains send the update message on their own channel + Self::send(channel_id, command, pays_fee)?; + + Self::deposit_event(Event::::UpdateChannel { channel_id, mode }); + Ok(()) + } + + /// Sends a message to the Gateway contract to update an arbitrary channel + /// + /// Fee required: No + /// + /// - `origin`: Must be root + /// - `channel_id`: ID of channel + /// - `mode`: Initial operating mode of the channel + /// - `outbound_fee`: Fee charged to users for sending outbound messages to Polkadot + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::force_update_channel())] + pub fn force_update_channel( + origin: OriginFor, + channel_id: ChannelId, + mode: OperatingMode, + ) -> DispatchResult { + ensure_root(origin)?; + + ensure!(Channels::::contains_key(channel_id), Error::::NoChannel); + + let command = Command::UpdateChannel { channel_id, mode }; + Self::send(PRIMARY_GOVERNANCE_CHANNEL, command, PaysFee::::No)?; + + Self::deposit_event(Event::::UpdateChannel { channel_id, mode }); + Ok(()) + } + /// Sends a message to the Gateway contract to transfer ether from an agent to `recipient`. /// /// A partial fee will be charged for local processing only. @@ -513,6 +641,34 @@ pub mod pallet { pays_fee: Pays::No, }) } + + /// Registers a Polkadot-native token as a wrapped ERC20 token on Ethereum. + /// Privileged. Can only be called by root. + /// + /// Fee required: No + /// + /// - `origin`: Must be root + /// - `location`: Location of the asset (relative to this chain) + /// - `metadata`: Metadata to include in the instantiated ERC20 contract on Ethereum + #[pallet::call_index(11)] + #[pallet::weight(T::WeightInfo::register_token())] + pub fn register_token_v2( + origin: OriginFor, + location: Box, + metadata: AssetMetadata, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + + let location: Location = + (*location).try_into().map_err(|_| Error::::UnsupportedLocationVersion)?; + + Self::do_register_token_v2(&location, metadata, PaysFee::::No)?; + + Ok(PostDispatchInfo { + actual_weight: Some(T::WeightInfo::register_token()), + pays_fee: Pays::No, + }) + } } impl Pallet { @@ -639,9 +795,44 @@ pub mod pallet { Ok(()) } - #[allow(dead_code)] + pub(crate) fn do_register_token_v2( + location: &Location, + metadata: AssetMetadata, + pays_fee: PaysFee, + ) -> Result<(), DispatchError> { + let ethereum_location = T::EthereumLocation::get(); + // reanchor to Ethereum context + let location = location + .clone() + .reanchored(ðereum_location, &T::UniversalLocation::get()) + .map_err(|_| Error::::LocationConversionFailed)?; + + let token_id = TokenIdOf::convert_location(&location) + .ok_or(Error::::LocationConversionFailed)?; + + if !ForeignToNativeId::::contains_key(token_id) { + NativeToForeignId::::insert(location.clone(), token_id); + ForeignToNativeId::::insert(token_id, location.clone()); + } + + let command = CommandV2::RegisterForeignToken { + token_id, + name: metadata.name.into_inner(), + symbol: metadata.symbol.into_inner(), + decimals: metadata.decimals, + }; + Self::send_v2(second_governance_origin(), command, pays_fee)?; + + Self::deposit_event(Event::::RegisterToken { + location: location.clone().into(), + foreign_token_id: token_id, + }); + + Ok(()) + } + /// Send `command` to the Gateway on the Channel identified by `channel_id` - fn send_governance_command(origin: H256, command: CommandV2) -> DispatchResult { + fn send_v2(origin: H256, command: CommandV2, pays_fee: PaysFee) -> DispatchResult { let message = MessageV2 { origin, id: Default::default(), @@ -649,9 +840,23 @@ pub mod pallet { commands: BoundedVec::try_from(vec![command]).unwrap(), }; - let (ticket, _) = + let (ticket, fee) = T::OutboundQueueV2::validate(&message).map_err(|err| Error::::Send(err))?; + let payment = match pays_fee { + PaysFee::Yes(account) | PaysFee::Partial(account) => Some((account, fee)), + PaysFee::No => None, + }; + + if let Some((payer, fee)) = payment { + T::Token::transfer( + &payer, + &T::TreasuryAccount::get(), + fee, + Preservation::Preserve, + )?; + } + T::OutboundQueueV2::deliver(ticket).map_err(|err| Error::::Send(err))?; Ok(()) } @@ -685,27 +890,4 @@ pub mod pallet { NativeToForeignId::::get(location) } } - - impl TokenRegistry for Pallet { - fn register(location: Location) -> DispatchResult { - ensure!( - NativeToForeignId::::contains_key(location.clone()), - Error::::TokenAlreadyCreated - ); - let token_id = TokenIdOf::convert_location(&location) - .ok_or(Error::::LocationConversionFailed)?; - ForeignToNativeId::::insert(token_id, location.clone()); - NativeToForeignId::::insert(location.clone(), token_id); - Ok(()) - } - } - - impl AgentRegistry for Pallet { - fn register(location: Location) -> DispatchResult { - let agent_id = agent_id_of::(&location)?; - ensure!(!Agents::::contains_key(agent_id), Error::::AgentAlreadyCreated); - Agents::::insert(agent_id, ()); - Ok(()) - } - } } diff --git a/bridges/snowbridge/primitives/core/src/lib.rs b/bridges/snowbridge/primitives/core/src/lib.rs index 7a9a8df544d7..a4432227beef 100644 --- a/bridges/snowbridge/primitives/core/src/lib.rs +++ b/bridges/snowbridge/primitives/core/src/lib.rs @@ -56,16 +56,6 @@ impl Contains for AllowSiblingsOnly { } } -pub struct AllowAnySovereignFromSiblings; -impl Contains for AllowAnySovereignFromSiblings { - fn contains(location: &Location) -> bool { - match (location.parent_count(), location.first_interior()) { - (1, Some(Parachain(..))) => true, - _ => false, - } - } -} - pub fn gwei(x: u128) -> U256 { U256::from(1_000_000_000u128).saturating_mul(x.into()) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index ffb75a510cb5..633248ca6efb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -24,7 +24,7 @@ use crate::{ }; use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; -use snowbridge_core::{gwei, meth, AllowAnySovereignFromSiblings, PricingParameters, Rewards}; +use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards}; use snowbridge_router_primitives::{ inbound::v1::MessageToXcm, outbound::{v1::EthereumBlobExporter, v2::EthereumBlobExporter as EthereumBlobExporterV2}, @@ -207,7 +207,7 @@ impl snowbridge_pallet_ethereum_client::Config for Runtime { impl snowbridge_pallet_system::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OutboundQueue = EthereumOutboundQueue; - type SiblingOrigin = EnsureXcm; + type SiblingOrigin = EnsureXcm; type AgentIdOf = snowbridge_core::AgentIdOf; type TreasuryAccount = TreasuryAccount; type Token = Balances; From 4808c36de7bbf5d99384327ed2c472f5846ddadc Mon Sep 17 00:00:00 2001 From: ron Date: Sun, 24 Nov 2024 12:31:25 +0800 Subject: [PATCH 24/68] Validate fee asset is always in WETH --- .../pallets/outbound-queue-v2/src/api.rs | 2 +- .../pallets/outbound-queue-v2/src/lib.rs | 2 ++ .../router/src/outbound/v2/convert.rs | 29 +++++++++++++------ .../primitives/router/src/outbound/v2/mod.rs | 23 +++++++++++---- .../src/tests/snowbridge_v2.rs | 2 +- .../src/bridge_to_ethereum_config.rs | 6 ++++ 6 files changed, 48 insertions(+), 16 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs index 754cc59b022e..eca5312bd807 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs @@ -40,7 +40,7 @@ pub fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage, T::Balance), DryRunEr where T: Config, { - let mut converter = XcmConverter::::new( + let mut converter = XcmConverter::::new( &xcm, T::EthereumNetwork::get(), AgentIdOf::convert_location(&Location::new(1, Parachain(1000))) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index ceeb13bfe41f..c632d18fcd64 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -139,6 +139,8 @@ pub mod pallet { type ConvertAssetId: MaybeEquivalence; type EthereumNetwork: Get; + + type WETHAddress: Get; } #[pallet::event] diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs index 69ee69991e9f..cfa6a6da2aa9 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs @@ -4,7 +4,7 @@ use codec::DecodeAll; use core::slice::Iter; -use frame_support::{ensure, BoundedVec}; +use frame_support::{ensure, traits::Get, BoundedVec}; use snowbridge_core::{ outbound::v2::{Command, Message}, transact::{CallContractParams, RegisterTokenParams, TransactInfo, TransactKind::*}, @@ -38,6 +38,8 @@ pub enum XcmConverterError { AliasOriginExpected, InvalidOrigin, TransactDecodeFailed, + TransactParamsDecodeFailed, + FeeAssetResolutionFailed, } macro_rules! match_expression { @@ -49,15 +51,16 @@ macro_rules! match_expression { }; } -pub struct XcmConverter<'a, ConvertAssetId, Call> { +pub struct XcmConverter<'a, ConvertAssetId, WETHAddress, Call> { iter: Peekable>>, ethereum_network: NetworkId, agent_id: AgentId, - _marker: PhantomData, + _marker: PhantomData<(ConvertAssetId, WETHAddress)>, } -impl<'a, ConvertAssetId, Call> XcmConverter<'a, ConvertAssetId, Call> +impl<'a, ConvertAssetId, WETHAddress, Call> XcmConverter<'a, ConvertAssetId, WETHAddress, Call> where ConvertAssetId: MaybeEquivalence, + WETHAddress: Get, { pub fn new(message: &'a Xcm, ethereum_network: NetworkId, agent_id: AgentId) -> Self { Self { @@ -96,12 +99,19 @@ where .ok_or(WithdrawAssetExpected)?; let fee_asset = match_expression!(self.next()?, PayFees { asset: fee }, fee).ok_or(InvalidFeeAsset)?; - // Todo: Validate fee asset is WETH - let fee_amount = match fee_asset { - Asset { id: _, fun: Fungible(amount) } => Some(*amount), + let (fee_asset_id, fee_amount) = match fee_asset { + Asset { id: asset_id, fun: Fungible(amount) } => Some((asset_id, *amount)), _ => None, } .ok_or(AssetResolutionFailed)?; + let weth_address = match_expression!( + fee_asset_id.0.unpack(), + (0, [AccountKey20 { network, key }]) + if self.network_matches(network), + H160(*key) + ) + .ok_or(FeeAssetResolutionFailed)?; + ensure!(weth_address == WETHAddress::get(), InvalidFeeAsset); Ok(fee_amount) } @@ -112,6 +122,7 @@ where /// # ReserveAssetDeposited(PNA) | WithdrawAsset(ENA) /// # AliasOrigin(Origin) /// # DepositAsset(PNA|ENA) + /// # Transact() ---Optional /// # SetTopic fn to_ethereum_message(&mut self) -> Result { use XcmConverterError::*; @@ -236,7 +247,7 @@ where RegisterAgent => commands.push(Command::CreateAgent {}), RegisterToken => { let params = RegisterTokenParams::decode_all(&mut message.params.as_slice()) - .map_err(|_| TransactDecodeFailed)?; + .map_err(|_| TransactParamsDecodeFailed)?; let token_id = TokenIdOf::convert_location(¶ms.location).ok_or(InvalidAsset)?; commands.push(Command::RegisterForeignToken { @@ -248,7 +259,7 @@ where }, CallContract => { let params = CallContractParams::decode_all(&mut message.params.as_slice()) - .map_err(|_| TransactDecodeFailed)?; + .map_err(|_| TransactParamsDecodeFailed)?; if params.value > 0 { //Todo: Ensure amount of WETH deposit to the agent in same message can // cover the value here diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs index 939a3090564e..0a29818acbf3 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs @@ -11,7 +11,7 @@ use frame_support::{ traits::{Contains, Get, ProcessMessageError}, }; use snowbridge_core::{outbound::v2::SendMessage, TokenId}; -use sp_core::H256; +use sp_core::{H160, H256}; use sp_runtime::traits::MaybeEquivalence; use sp_std::{marker::PhantomData, ops::ControlFlow, prelude::*}; use xcm::prelude::*; @@ -26,6 +26,7 @@ pub struct EthereumBlobExporter< OutboundQueue, AgentHashedDescription, ConvertAssetId, + WETHAddress, >( PhantomData<( UniversalLocation, @@ -33,17 +34,25 @@ pub struct EthereumBlobExporter< OutboundQueue, AgentHashedDescription, ConvertAssetId, + WETHAddress, )>, ); -impl - ExportXcm +impl< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + WETHAddress, + > ExportXcm for EthereumBlobExporter< UniversalLocation, EthereumNetwork, OutboundQueue, AgentHashedDescription, ConvertAssetId, + WETHAddress, > where UniversalLocation: Get, @@ -51,6 +60,7 @@ where OutboundQueue: SendMessage, AgentHashedDescription: ConvertLocation, ConvertAssetId: MaybeEquivalence, + WETHAddress: Get, { type Ticket = (Vec, XcmHash); @@ -123,8 +133,11 @@ where ); ensure!(result.is_err(), SendError::NotApplicable); - let mut converter = - XcmConverter::::new(&message, expected_network, agent_id); + let mut converter = XcmConverter::::new( + &message, + expected_network, + agent_id, + ); let message = converter.convert().map_err(|err| { log::error!(target: TARGET, "unroutable due to pattern matching error '{err:?}'."); SendError::Unroutable diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs index a610ddc70bf9..c75698c2e476 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs @@ -27,7 +27,7 @@ use xcm_executor::traits::ConvertLocation; const INITIAL_FUND: u128 = 50_000_000_000_000; pub const CHAIN_ID: u64 = 11155111; -pub const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); +pub const WETH: [u8; 20] = hex!("c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); const AGENT_ADDRESS: [u8; 20] = hex!("90A987B944Cb1dCcE5564e5FDeCD7a54D3de27Fe"); const TOKEN_AMOUNT: u128 = 100_000_000_000; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index 633248ca6efb..d9e1ff1a3d3c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -58,12 +58,17 @@ pub type SnowbridgeExporter = EthereumBlobExporter< EthereumSystem, >; +parameter_types! { + pub storage WETHAddress: H160 = H160(hex_literal::hex!("c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2")); +} + pub type SnowbridgeExporterV2 = EthereumBlobExporterV2< UniversalLocation, EthereumNetwork, snowbridge_pallet_outbound_queue_v2::Pallet, snowbridge_core::AgentIdOf, EthereumSystem, + WETHAddress, >; // Ethereum Bridge @@ -143,6 +148,7 @@ impl snowbridge_pallet_outbound_queue_v2::Config for Runtime { type RewardLedger = (); type ConvertAssetId = EthereumSystem; type EthereumNetwork = EthereumNetwork; + type WETHAddress = WETHAddress; } #[cfg(any(feature = "std", feature = "fast-runtime", feature = "runtime-benchmarks", test))] From 84cbb930d1effdb5cf17163916e045b611c92d8c Mon Sep 17 00:00:00 2001 From: ron Date: Sun, 24 Nov 2024 13:54:50 +0800 Subject: [PATCH 25/68] Check ether value is sufficient --- .../primitives/router/src/outbound/v2/convert.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs index cfa6a6da2aa9..14f814d0db5c 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs @@ -40,6 +40,7 @@ pub enum XcmConverterError { TransactDecodeFailed, TransactParamsDecodeFailed, FeeAssetResolutionFailed, + CallContractValueInsufficient, } macro_rules! match_expression { @@ -173,6 +174,7 @@ where } let mut commands: Vec = Vec::new(); + let mut weth_amount = 0; // ENA transfer commands if let Some(enas) = enas { @@ -198,6 +200,10 @@ where // transfer amount must be greater than 0. ensure!(amount > 0, ZeroAssetTransfer); + if token == WETHAddress::get() { + weth_amount = amount; + } + commands.push(Command::UnlockNativeToken { agent_id: self.agent_id, token, @@ -261,8 +267,7 @@ where let params = CallContractParams::decode_all(&mut message.params.as_slice()) .map_err(|_| TransactParamsDecodeFailed)?; if params.value > 0 { - //Todo: Ensure amount of WETH deposit to the agent in same message can - // cover the value here + ensure!(weth_amount > params.value, CallContractValueInsufficient); } commands.push(Command::CallContract { target: params.target, From 4190bf04f94be28de7edac579bc91434c906c0a5 Mon Sep 17 00:00:00 2001 From: ron Date: Mon, 25 Nov 2024 18:29:49 +0800 Subject: [PATCH 26/68] Limited system pallet to only send_governance_call --- bridges/snowbridge/pallets/system/src/lib.rs | 24 ++++---------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 8a5b0a6edbf9..77bac014b633 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -662,7 +662,7 @@ pub mod pallet { let location: Location = (*location).try_into().map_err(|_| Error::::UnsupportedLocationVersion)?; - Self::do_register_token_v2(&location, metadata, PaysFee::::No)?; + Self::do_register_token_v2(&location, metadata)?; Ok(PostDispatchInfo { actual_weight: Some(T::WeightInfo::register_token()), @@ -798,7 +798,6 @@ pub mod pallet { pub(crate) fn do_register_token_v2( location: &Location, metadata: AssetMetadata, - pays_fee: PaysFee, ) -> Result<(), DispatchError> { let ethereum_location = T::EthereumLocation::get(); // reanchor to Ethereum context @@ -821,7 +820,7 @@ pub mod pallet { symbol: metadata.symbol.into_inner(), decimals: metadata.decimals, }; - Self::send_v2(second_governance_origin(), command, pays_fee)?; + Self::send_governance_call(second_governance_origin(), command)?; Self::deposit_event(Event::::RegisterToken { location: location.clone().into(), @@ -831,8 +830,7 @@ pub mod pallet { Ok(()) } - /// Send `command` to the Gateway on the Channel identified by `channel_id` - fn send_v2(origin: H256, command: CommandV2, pays_fee: PaysFee) -> DispatchResult { + fn send_governance_call(origin: H256, command: CommandV2) -> DispatchResult { let message = MessageV2 { origin, id: Default::default(), @@ -840,23 +838,9 @@ pub mod pallet { commands: BoundedVec::try_from(vec![command]).unwrap(), }; - let (ticket, fee) = + let (ticket, _) = T::OutboundQueueV2::validate(&message).map_err(|err| Error::::Send(err))?; - let payment = match pays_fee { - PaysFee::Yes(account) | PaysFee::Partial(account) => Some((account, fee)), - PaysFee::No => None, - }; - - if let Some((payer, fee)) = payment { - T::Token::transfer( - &payer, - &T::TreasuryAccount::get(), - fee, - Preservation::Preserve, - )?; - } - T::OutboundQueueV2::deliver(ticket).map_err(|err| Error::::Send(err))?; Ok(()) } From 178f50a3125d382c78dc41fbf84a462faf56c6d6 Mon Sep 17 00:00:00 2001 From: ron Date: Tue, 26 Nov 2024 11:40:09 +0800 Subject: [PATCH 27/68] Remove agent_id from converter --- .../pallets/outbound-queue-v2/src/api.rs | 27 ++++++------------- .../pallets/outbound-queue-v2/src/lib.rs | 16 ++++++++++- bridges/snowbridge/pallets/system/src/lib.rs | 24 +++++++++++++++++ .../primitives/core/src/location.rs | 10 +++++-- .../primitives/core/src/outbound/v2.rs | 5 ++-- .../primitives/core/src/registry.rs | 8 +++--- .../router/src/outbound/v2/convert.rs | 18 +++++-------- .../primitives/router/src/outbound/v2/mod.rs | 19 +++---------- .../src/tests/snowbridge_v2.rs | 2 +- .../src/bridge_to_ethereum_config.rs | 1 + 10 files changed, 72 insertions(+), 58 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs index eca5312bd807..2912705dd151 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs @@ -4,25 +4,18 @@ use crate::{Config, MessageLeaves}; use frame_support::storage::StorageStreamIter; -use snowbridge_core::{ - outbound::{ - v2::{ - abi::{CommandWrapper, InboundMessage}, - GasMeter, Message, - }, - DryRunError, +use snowbridge_core::outbound::{ + v2::{ + abi::{CommandWrapper, InboundMessage}, + GasMeter, Message, }, - AgentIdOf, + DryRunError, }; use snowbridge_merkle_tree::{merkle_proof, MerkleProof}; use snowbridge_router_primitives::outbound::v2::convert::XcmConverter; use sp_core::Get; use sp_std::{default::Default, vec::Vec}; -use xcm::{ - latest::Location, - prelude::{Parachain, Xcm}, -}; -use xcm_executor::traits::ConvertLocation; +use xcm::prelude::Xcm; pub fn prove_message(leaf_index: u64) -> Option where @@ -40,12 +33,8 @@ pub fn dry_run(xcm: Xcm<()>) -> Result<(InboundMessage, T::Balance), DryRunEr where T: Config, { - let mut converter = XcmConverter::::new( - &xcm, - T::EthereumNetwork::get(), - AgentIdOf::convert_location(&Location::new(1, Parachain(1000))) - .ok_or(DryRunError::ConvertLocationFailed)?, - ); + let mut converter = + XcmConverter::::new(&xcm, T::EthereumNetwork::get()); let message: Message = converter.convert().map_err(|_| DryRunError::ConvertXcmFailed)?; diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index c632d18fcd64..88772d2e3aa0 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -75,8 +75,9 @@ use snowbridge_core::{ inbound::{Message as DeliveryMessage, VerificationError, Verifier}, outbound::v2::{ abi::{CommandWrapper, InboundMessage, InboundMessageWrapper}, - GasMeter, Message, + Command, GasMeter, Message, }, + registry::Registry, BasicOperatingMode, RewardLedger, TokenId, }; use snowbridge_merkle_tree::merkle_root; @@ -141,6 +142,8 @@ pub mod pallet { type EthereumNetwork: Get; type WETHAddress: Get; + + type Registry: Registry; } #[pallet::event] @@ -331,8 +334,11 @@ pub mod pallet { let nonce = Nonce::::get(); + let original_location = message.origin_location; + let commands: Vec = message .commands + .clone() .into_iter() .map(|command| CommandWrapper { kind: command.index(), @@ -370,6 +376,14 @@ pub mod pallet { Nonce::::set(nonce.checked_add(1).ok_or(Unsupported)?); + for command in message.commands.into_iter() { + match command { + Command::CreateAgent {} => + T::Registry::register_agent(&original_location).map_err(|_| Corrupt)?, + _ => (), + } + } + Self::deposit_event(Event::MessageAccepted { id: message.id, nonce }); Ok(true) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 77bac014b633..5ce1f00cd468 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -73,6 +73,7 @@ use snowbridge_core::{ v2::{Command as CommandV2, Message as MessageV2, SendMessage as SendMessageV2}, OperatingMode, SendError, }, + registry::Registry, sibling_sovereign_account, AgentId, AssetMetadata, Channel, ChannelId, ParaId, PricingParameters as PricingParametersRecord, TokenId, TokenIdOf, PRIMARY_GOVERNANCE_CHANNEL, SECONDARY_GOVERNANCE_CHANNEL, @@ -255,6 +256,7 @@ pub mod pallet { InvalidTokenTransferFees, InvalidPricingParameters, InvalidUpgradeParameters, + TokenAlreadyCreated, } /// The set of registered agents @@ -833,6 +835,7 @@ pub mod pallet { fn send_governance_call(origin: H256, command: CommandV2) -> DispatchResult { let message = MessageV2 { origin, + origin_location: Default::default(), id: Default::default(), fee: Default::default(), commands: BoundedVec::try_from(vec![command]).unwrap(), @@ -874,4 +877,25 @@ pub mod pallet { NativeToForeignId::::get(location) } } + + impl Registry for pallet::Pallet { + fn register_agent(location: &Location) -> DispatchResult { + let agent_id = agent_id_of::(&location)?; + ensure!(!Agents::::contains_key(agent_id), Error::::AgentAlreadyCreated); + Agents::::insert(agent_id, ()); + Ok(()) + } + + fn register_token(location: &Location) -> DispatchResult { + ensure!( + NativeToForeignId::::contains_key(location.clone()), + Error::::TokenAlreadyCreated + ); + let token_id = TokenIdOf::convert_location(&location) + .ok_or(Error::::LocationConversionFailed)?; + ForeignToNativeId::::insert(token_id, location.clone()); + NativeToForeignId::::insert(location.clone(), token_id); + Ok(()) + } + } } diff --git a/bridges/snowbridge/primitives/core/src/location.rs b/bridges/snowbridge/primitives/core/src/location.rs index f49a245c4126..4940fb229c60 100644 --- a/bridges/snowbridge/primitives/core/src/location.rs +++ b/bridges/snowbridge/primitives/core/src/location.rs @@ -24,8 +24,14 @@ pub type AgentId = H256; /// Creates an AgentId from a Location. An AgentId is a unique mapping to a Agent contract on /// Ethereum which acts as the sovereign account for the Location. #[allow(deprecated)] -pub type AgentIdOf = - HashedDescription)>; +pub type AgentIdOf = HashedDescription< + AgentId, + ( + DescribeHere, + DescribeFamily, + DescribeGlobalPrefix<(DescribeTerminus, DescribeFamily)>, + ), +>; pub type TokenId = H256; diff --git a/bridges/snowbridge/primitives/core/src/outbound/v2.rs b/bridges/snowbridge/primitives/core/src/outbound/v2.rs index e9ff47786363..a45fcc9eb261 100644 --- a/bridges/snowbridge/primitives/core/src/outbound/v2.rs +++ b/bridges/snowbridge/primitives/core/src/outbound/v2.rs @@ -17,6 +17,7 @@ use crate::outbound::v2::abi::{ }; use alloy_primitives::{Address, FixedBytes, U256}; use alloy_sol_types::SolValue; +use xcm::prelude::Location; pub mod abi { use super::MAX_COMMANDS; @@ -119,6 +120,8 @@ pub const MAX_COMMANDS: u32 = 8; /// A message which can be accepted by implementations of `/[`SendMessage`\]` #[derive(Encode, Decode, TypeInfo, PartialEq, Clone, RuntimeDebug)] pub struct Message { + /// Origin Location + pub origin_location: Location, /// Origin pub origin: H256, /// ID @@ -150,8 +153,6 @@ pub enum Command { }, /// Unlock ERC20 tokens UnlockNativeToken { - /// ID of the agent - agent_id: H256, /// Address of the ERC20 token token: H160, /// The recipient of the tokens diff --git a/bridges/snowbridge/primitives/core/src/registry.rs b/bridges/snowbridge/primitives/core/src/registry.rs index f3b87bdbbfac..da7e2738905e 100644 --- a/bridges/snowbridge/primitives/core/src/registry.rs +++ b/bridges/snowbridge/primitives/core/src/registry.rs @@ -4,10 +4,8 @@ use frame_support::dispatch::DispatchResult; use xcm::prelude::Location; -pub trait TokenRegistry { - fn register(location: Location) -> DispatchResult; -} +pub trait Registry { + fn register_agent(location: &Location) -> DispatchResult; -pub trait AgentRegistry { - fn register(location: Location) -> DispatchResult; + fn register_token(location: &Location) -> DispatchResult; } diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs index 14f814d0db5c..00382ba7c349 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs @@ -8,7 +8,7 @@ use frame_support::{ensure, traits::Get, BoundedVec}; use snowbridge_core::{ outbound::v2::{Command, Message}, transact::{CallContractParams, RegisterTokenParams, TransactInfo, TransactKind::*}, - AgentId, TokenId, TokenIdOf, TokenIdOf as LocationIdOf, + TokenId, TokenIdOf, TokenIdOf as LocationIdOf, }; use sp_core::H160; use sp_runtime::traits::MaybeEquivalence; @@ -55,7 +55,6 @@ macro_rules! match_expression { pub struct XcmConverter<'a, ConvertAssetId, WETHAddress, Call> { iter: Peekable>>, ethereum_network: NetworkId, - agent_id: AgentId, _marker: PhantomData<(ConvertAssetId, WETHAddress)>, } impl<'a, ConvertAssetId, WETHAddress, Call> XcmConverter<'a, ConvertAssetId, WETHAddress, Call> @@ -63,11 +62,10 @@ where ConvertAssetId: MaybeEquivalence, WETHAddress: Get, { - pub fn new(message: &'a Xcm, ethereum_network: NetworkId, agent_id: AgentId) -> Self { + pub fn new(message: &'a Xcm, ethereum_network: NetworkId) -> Self { Self { iter: message.inner().iter().peekable(), ethereum_network, - agent_id, _marker: Default::default(), } } @@ -148,9 +146,9 @@ where let _ = self.next(); } // Check AliasOrigin. - let origin_loc = match_expression!(self.next()?, AliasOrigin(origin), origin) + let origin_location = match_expression!(self.next()?, AliasOrigin(origin), origin) .ok_or(AliasOriginExpected)?; - let origin = LocationIdOf::convert_location(&origin_loc).ok_or(InvalidOrigin)?; + let origin = LocationIdOf::convert_location(origin_location).ok_or(InvalidOrigin)?; let (deposit_assets, beneficiary) = match_expression!( self.next()?, @@ -204,12 +202,7 @@ where weth_amount = amount; } - commands.push(Command::UnlockNativeToken { - agent_id: self.agent_id, - token, - recipient, - amount, - }); + commands.push(Command::UnlockNativeToken { token, recipient, amount }); } } @@ -284,6 +277,7 @@ where let message = Message { id: (*topic_id).into(), + origin_location: origin_location.clone(), origin, fee: fee_amount, commands: BoundedVec::try_from(commands).map_err(|_| TooManyCommands)?, diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs index 0a29818acbf3..b52315f0add1 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs @@ -89,7 +89,7 @@ where } // Cloning universal_source to avoid modifying the value so subsequent exporters can use it. - let (local_net, local_sub) = universal_source.clone() + let (local_net, _) = universal_source.clone() .ok_or_else(|| { log::error!(target: TARGET, "universal source not provided."); SendError::MissingArgument @@ -105,16 +105,6 @@ where return Err(SendError::NotApplicable) } - let source_location = Location::new(1, local_sub.clone()); - - let agent_id = match AgentHashedDescription::convert_location(&source_location) { - Some(id) => id, - None => { - log::error!(target: TARGET, "unroutable due to not being able to create agent id. '{source_location:?}'"); - return Err(SendError::NotApplicable) - }, - }; - let message = message.clone().ok_or_else(|| { log::error!(target: TARGET, "xcm message not provided."); SendError::MissingArgument @@ -133,11 +123,8 @@ where ); ensure!(result.is_err(), SendError::NotApplicable); - let mut converter = XcmConverter::::new( - &message, - expected_network, - agent_id, - ); + let mut converter = + XcmConverter::::new(&message, expected_network); let message = converter.convert().map_err(|err| { log::error!(target: TARGET, "unroutable due to pattern matching error '{err:?}'."); SendError::Unroutable diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs index c75698c2e476..f639fb6c3b22 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs @@ -388,7 +388,7 @@ fn create_agent() { // Check that Ethereum message was queue in the Outbound Queue assert_expected_events!( BridgeHubWestend, - vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] + vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageAccepted{ .. }) => {},] ); }); } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index d9e1ff1a3d3c..b25e659a1968 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -149,6 +149,7 @@ impl snowbridge_pallet_outbound_queue_v2::Config for Runtime { type ConvertAssetId = EthereumSystem; type EthereumNetwork = EthereumNetwork; type WETHAddress = WETHAddress; + type Registry = EthereumSystem; } #[cfg(any(feature = "std", feature = "fast-runtime", feature = "runtime-benchmarks", test))] From aba918d8fd6ceaa23fd92b1fb64173a33de4cb31 Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 29 Nov 2024 00:08:54 +0800 Subject: [PATCH 28/68] Fix breaking tests --- .../pallets/outbound-queue-v2/src/lib.rs | 15 +- .../pallets/outbound-queue-v2/src/mock.rs | 9 +- .../pallets/outbound-queue-v2/src/test.rs | 1 + .../router/src/outbound/v2/convert.rs | 184 +++++++++++++----- .../primitives/router/src/outbound/v2/mod.rs | 30 ++- .../src/bridge_to_ethereum_config.rs | 1 - 6 files changed, 162 insertions(+), 78 deletions(-) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index 88772d2e3aa0..80309d530baf 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -75,9 +75,8 @@ use snowbridge_core::{ inbound::{Message as DeliveryMessage, VerificationError, Verifier}, outbound::v2::{ abi::{CommandWrapper, InboundMessage, InboundMessageWrapper}, - Command, GasMeter, Message, + GasMeter, Message, }, - registry::Registry, BasicOperatingMode, RewardLedger, TokenId, }; use snowbridge_merkle_tree::merkle_root; @@ -142,8 +141,6 @@ pub mod pallet { type EthereumNetwork: Get; type WETHAddress: Get; - - type Registry: Registry; } #[pallet::event] @@ -334,8 +331,6 @@ pub mod pallet { let nonce = Nonce::::get(); - let original_location = message.origin_location; - let commands: Vec = message .commands .clone() @@ -376,14 +371,6 @@ pub mod pallet { Nonce::::set(nonce.checked_add(1).ok_or(Unsupported)?); - for command in message.commands.into_iter() { - match command { - Command::CreateAgent {} => - T::Registry::register_agent(&original_location).map_err(|_| Corrupt)?, - _ => (), - } - } - Self::deposit_event(Event::MessageAccepted { id: message.id, nonce }); Ok(true) diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs index 353747b23a5f..2215f388b70d 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs @@ -83,8 +83,6 @@ impl Verifier for MockVerifier { const GATEWAY_ADDRESS: [u8; 20] = hex!["eda338e4dc46038493b885327842fd3e301cab39"]; const WETH: [u8; 20] = hex!["C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"]; -const ASSET_HUB_AGENT: [u8; 32] = - hex!["81c5ab2571199e3188135178f3c2c8e2d268be1313d029b30f534fa579b69b79"]; parameter_types! { pub const OwnParaId: ParaId = ParaId::new(1013); @@ -96,7 +94,7 @@ parameter_types! { }; pub const GatewayAddress: H160 = H160(GATEWAY_ADDRESS); pub EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; - + pub storage WETHAddress: H160 = H160(hex_literal::hex!("c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2")); } pub const DOT: u128 = 10_000_000_000; @@ -115,6 +113,7 @@ impl crate::Config for Test { type RewardLedger = (); type ConvertAssetId = (); type EthereumNetwork = EthereumNetwork; + type WETHAddress = WETHAddress; } fn setup() { @@ -151,6 +150,7 @@ where let _marker = PhantomData::; // for clippy Message { + origin_location: Default::default(), origin: primary_governance_origin(), id: Default::default(), fee: 0, @@ -171,6 +171,7 @@ where let _marker = PhantomData::; // for clippy Message { + origin_location: Default::default(), origin: Default::default(), id: Default::default(), fee: 0, @@ -188,11 +189,11 @@ where pub fn mock_message(sibling_para_id: u32) -> Message { Message { + origin_location: Default::default(), origin: H256::from_low_u64_be(sibling_para_id as u64), id: H256::from_low_u64_be(1), fee: 1_000, commands: BoundedVec::try_from(vec![Command::UnlockNativeToken { - agent_id: H256(ASSET_HUB_AGENT), token: H160(WETH), recipient: H160(GATEWAY_ADDRESS), amount: 1_000_000, diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs index 0c1c2868cb90..abbbfd64f54a 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs @@ -71,6 +71,7 @@ fn process_message_yields_on_max_messages_per_block() { let _channel_id: ChannelId = ParaId::from(1000).into(); let origin = AggregateMessageOrigin::SnowbridgeV2(H256::zero()); let message = Message { + origin_location: Default::default(), origin: Default::default(), id: Default::default(), fee: 0, diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs index 00382ba7c349..d98bf1686bce 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs @@ -295,11 +295,12 @@ where #[cfg(test)] mod tests { use super::*; - use crate::outbound::v2::tests::{BridgedNetwork, MockTokenIdConvert, NonBridgedNetwork}; + use crate::outbound::v2::tests::{ + BridgedNetwork, MockTokenIdConvert, NonBridgedNetwork, WETHAddress, + }; use hex_literal::hex; use snowbridge_core::AgentIdOf; - use sp_std::default::Default; - use xcm::latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; + use xcm::latest::WESTEND_GENESIS_HASH; #[test] fn xcm_converter_convert_success() { @@ -315,9 +316,15 @@ mod tests { .into(); let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -328,7 +335,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert!(result.is_ok()); } @@ -346,10 +353,15 @@ mod tests { }] .into(); let filter: AssetFilter = Wild(All); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -360,7 +372,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.is_ok(), true); } @@ -378,10 +390,15 @@ mod tests { }] .into(); let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -392,13 +409,13 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::SetTopicExpected)); } #[test] - fn xcm_converter_convert_with_partial_message_yields_unexpected_end_of_xcm() { + fn xcm_converter_convert_with_partial_message_yields_invalid_fee_asset() { let network = BridgedNetwork::get(); let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); @@ -410,7 +427,7 @@ mod tests { let message: Xcm<()> = vec![WithdrawAsset(assets)].into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); } @@ -423,13 +440,15 @@ mod tests { let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); let asset_location = [AccountKey20 { network: None, key: token_address }].into(); - let fee_asset = - Asset { id: AssetId(Location { parents: 0, interior: Here }), fun: Fungible(1000) }; - let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), @@ -444,7 +463,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.is_ok(), true); } @@ -457,7 +476,11 @@ mod tests { let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); - let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(1001) }; + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); @@ -477,7 +500,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.is_ok(), true); } @@ -489,7 +512,7 @@ mod tests { let message: Xcm<()> = vec![].into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); @@ -508,10 +531,15 @@ mod tests { }] .into(); let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -523,7 +551,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::EndOfXcmMessageExpected)); @@ -554,7 +582,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::WithdrawAssetExpected)); @@ -572,16 +600,22 @@ mod tests { }] .into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), SetTopic([0; 32]), ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::DepositAssetExpected)); @@ -591,22 +625,20 @@ mod tests { fn xcm_converter_convert_without_assets_yields_no_reserve_assets() { let network = BridgedNetwork::get(); - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); let assets: Assets = vec![].into(); let filter: AssetFilter = assets.clone().into(); - let fee = Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), fun: Fungible(1000), - }; + } + .into(); let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: fee.clone() }, - WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { assets: filter, @@ -616,7 +648,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::NoReserveAssets)); @@ -642,10 +674,15 @@ mod tests { ] .into(); let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -656,7 +693,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.is_ok(), true); @@ -675,10 +712,15 @@ mod tests { }] .into(); let filter: AssetFilter = Wild(WildAsset::AllCounted(0)); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -689,7 +731,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::FilterDoesNotConsumeAllAssets)); @@ -708,10 +750,15 @@ mod tests { }] .into(); let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -722,7 +769,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::ZeroAssetTransfer)); @@ -740,10 +787,14 @@ mod tests { }] .into(); let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; let message: Xcm<()> = vec![ WithdrawAsset(assets.clone().into()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -754,7 +805,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); @@ -775,10 +826,14 @@ mod tests { }] .into(); let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; let message: Xcm<()> = vec![ WithdrawAsset(assets.clone().into()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -789,7 +844,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); @@ -811,10 +866,14 @@ mod tests { }] .into(); let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; let message: Xcm<()> = vec![ WithdrawAsset(assets.clone().into()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -825,7 +884,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); @@ -846,9 +905,14 @@ mod tests { }] .into(); let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; + let message: Xcm<()> = vec![ WithdrawAsset(assets.clone().into()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -860,7 +924,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); @@ -880,10 +944,14 @@ mod tests { }] .into(); let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -898,7 +966,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); @@ -943,10 +1011,14 @@ mod tests { let assets: Assets = vec![Asset { id: AssetId(asset_location.clone()), fun: Fungible(amount) }].into(); let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, ReserveAssetDeposited(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -957,13 +1029,14 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let expected_payload = Command::MintForeignToken { recipient: beneficiary_address.into(), amount, token_id }; let expected_message = Message { + origin_location: Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)]), id: [0; 32].into(), origin: hex!("aa16eddac8725928eaeda4aae518bf10d02bee80382517d21464a5cdf8d1d8e1").into(), - fee: 1000000, + fee: 1000, commands: BoundedVec::try_from(vec![expected_payload]).unwrap(), }; let result = converter.convert(); @@ -980,16 +1053,21 @@ mod tests { // Invalid asset location from a different consensus let asset_location = Location { parents: 2, - interior: [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))].into(), + interior: [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))].into(), }; let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; + let message: Xcm<()> = vec![ WithdrawAsset(assets.clone()), - PayFees { asset: assets.get(0).unwrap().clone() }, + PayFees { asset: fee_asset }, ReserveAssetDeposited(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -1000,7 +1078,7 @@ mod tests { ] .into(); let mut converter = - XcmConverter::::new(&message, network, Default::default()); + XcmConverter::::new(&message, network); let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::InvalidAsset)); } diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs index b52315f0add1..0fbfc2784efa 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs @@ -203,10 +203,7 @@ mod tests { AgentIdOf, }; use sp_std::default::Default; - use xcm::{ - latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, - prelude::SendError as XcmSendError, - }; + use xcm::{latest::WESTEND_GENESIS_HASH, prelude::SendError as XcmSendError}; parameter_types! { const MaxMessageSize: u32 = u32::MAX; @@ -214,6 +211,7 @@ mod tests { UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(1013)].into(); pub const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; pub const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; + pub WETHAddress: H160 = H160(hex_literal::hex!("c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2")); } struct MockOkOutboundQueue; @@ -286,6 +284,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::NotApplicable)); } @@ -305,6 +304,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::MissingArgument)); } @@ -330,6 +330,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::NotApplicable)); } @@ -349,6 +350,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::MissingArgument)); } @@ -368,6 +370,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::NotApplicable)); } @@ -387,6 +390,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::NotApplicable)); } @@ -407,6 +411,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::NotApplicable)); } @@ -426,6 +431,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::MissingArgument)); } @@ -446,6 +452,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::MissingArgument)); } @@ -466,6 +473,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::MissingArgument)); } @@ -514,6 +522,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::NotApplicable)); @@ -542,6 +551,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert_eq!(result, Err(XcmSendError::NotApplicable)); @@ -564,13 +574,17 @@ mod tests { fun: Fungible(1000), }] .into(); - let fee = assets.clone().get(0).unwrap().clone(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); let filter: AssetFilter = assets.clone().into(); let mut message: Option> = Some( vec![ WithdrawAsset(assets.clone()), - PayFees { asset: fee.clone() }, + PayFees { asset: fee_asset }, WithdrawAsset(assets.clone()), AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), DepositAsset { @@ -589,6 +603,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); assert!(result.is_ok()); @@ -602,6 +617,7 @@ mod tests { MockErrOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) } @@ -646,6 +662,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate( network, channel, @@ -702,6 +719,7 @@ mod tests { MockOkOutboundQueue, AgentIdOf, MockTokenIdConvert, + WETHAddress, >::validate( network, channel, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index b25e659a1968..d9e1ff1a3d3c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -149,7 +149,6 @@ impl snowbridge_pallet_outbound_queue_v2::Config for Runtime { type ConvertAssetId = EthereumSystem; type EthereumNetwork = EthereumNetwork; type WETHAddress = WETHAddress; - type Registry = EthereumSystem; } #[cfg(any(feature = "std", feature = "fast-runtime", feature = "runtime-benchmarks", test))] From a7ced85f713cf438c6916a045883648663a1a9a3 Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 29 Nov 2024 00:45:49 +0800 Subject: [PATCH 29/68] Clean up --- bridges/snowbridge/pallets/system/src/lib.rs | 22 --- bridges/snowbridge/primitives/core/src/lib.rs | 2 - .../primitives/core/src/outbound/mod.rs | 11 +- .../primitives/core/src/registry.rs | 11 -- .../primitives/core/src/transact.rs | 36 ----- .../router/src/outbound/v2/convert.rs | 43 ++---- .../src/tests/snowbridge_v2.rs | 139 +++++++++--------- 7 files changed, 90 insertions(+), 174 deletions(-) delete mode 100644 bridges/snowbridge/primitives/core/src/registry.rs delete mode 100644 bridges/snowbridge/primitives/core/src/transact.rs diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 5ce1f00cd468..e603e562201f 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -73,7 +73,6 @@ use snowbridge_core::{ v2::{Command as CommandV2, Message as MessageV2, SendMessage as SendMessageV2}, OperatingMode, SendError, }, - registry::Registry, sibling_sovereign_account, AgentId, AssetMetadata, Channel, ChannelId, ParaId, PricingParameters as PricingParametersRecord, TokenId, TokenIdOf, PRIMARY_GOVERNANCE_CHANNEL, SECONDARY_GOVERNANCE_CHANNEL, @@ -877,25 +876,4 @@ pub mod pallet { NativeToForeignId::::get(location) } } - - impl Registry for pallet::Pallet { - fn register_agent(location: &Location) -> DispatchResult { - let agent_id = agent_id_of::(&location)?; - ensure!(!Agents::::contains_key(agent_id), Error::::AgentAlreadyCreated); - Agents::::insert(agent_id, ()); - Ok(()) - } - - fn register_token(location: &Location) -> DispatchResult { - ensure!( - NativeToForeignId::::contains_key(location.clone()), - Error::::TokenAlreadyCreated - ); - let token_id = TokenIdOf::convert_location(&location) - .ok_or(Error::::LocationConversionFailed)?; - ForeignToNativeId::::insert(token_id, location.clone()); - NativeToForeignId::::insert(location.clone(), token_id); - Ok(()) - } - } } diff --git a/bridges/snowbridge/primitives/core/src/lib.rs b/bridges/snowbridge/primitives/core/src/lib.rs index a4432227beef..88ac8124a15b 100644 --- a/bridges/snowbridge/primitives/core/src/lib.rs +++ b/bridges/snowbridge/primitives/core/src/lib.rs @@ -13,10 +13,8 @@ pub mod location; pub mod operating_mode; pub mod outbound; pub mod pricing; -pub mod registry; pub mod reward; pub mod ringbuffer; -pub mod transact; pub use location::{AgentId, AgentIdOf, TokenId, TokenIdOf}; pub use polkadot_parachain_primitives::primitives::{ diff --git a/bridges/snowbridge/primitives/core/src/outbound/mod.rs b/bridges/snowbridge/primitives/core/src/outbound/mod.rs index 0aa60f479195..972f16fb2139 100644 --- a/bridges/snowbridge/primitives/core/src/outbound/mod.rs +++ b/bridges/snowbridge/primitives/core/src/outbound/mod.rs @@ -3,11 +3,12 @@ //! # Outbound //! //! Common traits and types +use crate::Vec; use codec::{Decode, Encode}; use frame_support::PalletError; use scale_info::TypeInfo; use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; -use sp_core::RuntimeDebug; +use sp_core::{RuntimeDebug, H160}; pub mod v1; pub mod v2; @@ -47,3 +48,11 @@ pub enum DryRunError { ConvertLocationFailed, ConvertXcmFailed, } + +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +pub struct TransactInfo { + pub target: H160, + pub data: Vec, + pub gas_limit: u64, + pub value: u128, +} diff --git a/bridges/snowbridge/primitives/core/src/registry.rs b/bridges/snowbridge/primitives/core/src/registry.rs deleted file mode 100644 index da7e2738905e..000000000000 --- a/bridges/snowbridge/primitives/core/src/registry.rs +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork - -use frame_support::dispatch::DispatchResult; -use xcm::prelude::Location; - -pub trait Registry { - fn register_agent(location: &Location) -> DispatchResult; - - fn register_token(location: &Location) -> DispatchResult; -} diff --git a/bridges/snowbridge/primitives/core/src/transact.rs b/bridges/snowbridge/primitives/core/src/transact.rs deleted file mode 100644 index 0dc77555ae45..000000000000 --- a/bridges/snowbridge/primitives/core/src/transact.rs +++ /dev/null @@ -1,36 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork - -use crate::{AssetMetadata, Vec}; -use codec::{Decode, Encode}; -use scale_info::TypeInfo; -use sp_core::H160; -use sp_runtime::RuntimeDebug; -use xcm::prelude::Location; - -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct TransactInfo { - pub kind: TransactKind, - pub params: Vec, -} - -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub enum TransactKind { - RegisterToken, - RegisterAgent, - CallContract, -} - -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct RegisterTokenParams { - pub location: Location, - pub metadata: AssetMetadata, -} - -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct CallContractParams { - pub target: H160, - pub data: Vec, - pub gas_limit: u64, - pub value: u128, -} diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs index d98bf1686bce..77616bde2796 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs @@ -6,8 +6,10 @@ use codec::DecodeAll; use core::slice::Iter; use frame_support::{ensure, traits::Get, BoundedVec}; use snowbridge_core::{ - outbound::v2::{Command, Message}, - transact::{CallContractParams, RegisterTokenParams, TransactInfo, TransactKind::*}, + outbound::{ + v2::{Command, Message}, + TransactInfo, + }, TokenId, TokenIdOf, TokenIdOf as LocationIdOf, }; use sp_core::H160; @@ -239,37 +241,18 @@ where let transact_call = match_expression!(self.peek(), Ok(Transact { call, .. }), call); if let Some(transact_call) = transact_call { let _ = self.next(); - let message = + let transact = TransactInfo::decode_all(&mut transact_call.clone().into_encoded().as_slice()) .map_err(|_| TransactDecodeFailed)?; - match message.kind { - RegisterAgent => commands.push(Command::CreateAgent {}), - RegisterToken => { - let params = RegisterTokenParams::decode_all(&mut message.params.as_slice()) - .map_err(|_| TransactParamsDecodeFailed)?; - let token_id = - TokenIdOf::convert_location(¶ms.location).ok_or(InvalidAsset)?; - commands.push(Command::RegisterForeignToken { - token_id, - name: params.metadata.name.into_inner(), - symbol: params.metadata.symbol.into_inner(), - decimals: params.metadata.decimals, - }); - }, - CallContract => { - let params = CallContractParams::decode_all(&mut message.params.as_slice()) - .map_err(|_| TransactParamsDecodeFailed)?; - if params.value > 0 { - ensure!(weth_amount > params.value, CallContractValueInsufficient); - } - commands.push(Command::CallContract { - target: params.target, - data: params.data, - gas_limit: params.gas_limit, - value: params.value, - }); - }, + if transact.value > 0 { + ensure!(weth_amount > transact.value, CallContractValueInsufficient); } + commands.push(Command::CallContract { + target: transact.target, + data: transact.data, + gas_limit: transact.gas_limit, + value: transact.value, + }); } // ensure SetTopic exists diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs index f639fb6c3b22..b07f7faf554c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs @@ -15,10 +15,7 @@ use crate::imports::*; use frame_support::traits::fungibles::Mutate; use hex_literal::hex; -use snowbridge_core::{ - transact::{CallContractParams, TransactInfo, TransactKind}, - AssetMetadata, -}; +use snowbridge_core::{outbound::TransactInfo, AssetMetadata}; use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; use sp_runtime::MultiAddress; use testnet_parachains_constants::westend::snowbridge::EthereumNetwork; @@ -327,71 +324,71 @@ fn send_weth_and_dot_from_asset_hub_to_ethereum() { }); } -#[test] -fn create_agent() { - fund_sovereign(); - - register_weth(); - - BridgeHubWestend::execute_with(|| {}); - - AssetHubWestend::execute_with(|| { - type RuntimeOrigin = ::RuntimeOrigin; - - let local_fee_asset = - Asset { id: AssetId(Location::parent()), fun: Fungible(LOCAL_FEE_AMOUNT_IN_DOT) }; - - // All WETH as fee and reserve_asset is zero, so there is no transfer in this case - let remote_fee_asset = Asset { id: AssetId(weth_location()), fun: Fungible(TOKEN_AMOUNT) }; - let reserve_asset = Asset { id: AssetId(weth_location()), fun: Fungible(0) }; - - let assets = vec![ - Asset { id: weth_location().into(), fun: Fungible(TOKEN_AMOUNT) }, - local_fee_asset.clone(), - ]; - - let transact_info = TransactInfo { kind: TransactKind::RegisterAgent, params: vec![] }; - - let xcms = VersionedXcm::from(Xcm(vec![ - WithdrawAsset(assets.clone().into()), - PayFees { asset: local_fee_asset.clone() }, - InitiateTransfer { - destination: destination(), - remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( - remote_fee_asset.clone().into(), - ))), - preserve_origin: true, - assets: vec![AssetTransferFilter::ReserveWithdraw(Definite( - reserve_asset.clone().into(), - ))], - remote_xcm: Xcm(vec![ - DepositAsset { assets: Wild(AllCounted(2)), beneficiary: beneficiary() }, - Transact { - origin_kind: OriginKind::SovereignAccount, - call: transact_info.encode().into(), - }, - ]), - }, - ])); - - // Send the Weth back to Ethereum - ::PolkadotXcm::execute( - RuntimeOrigin::signed(AssetHubWestendReceiver::get()), - bx!(xcms), - Weight::from(8_000_000_000), - ) - .unwrap(); - }); - - BridgeHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - // Check that Ethereum message was queue in the Outbound Queue - assert_expected_events!( - BridgeHubWestend, - vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageAccepted{ .. }) => {},] - ); - }); -} +// #[test] +// fn create_agent() { +// fund_sovereign(); +// +// register_weth(); +// +// BridgeHubWestend::execute_with(|| {}); +// +// AssetHubWestend::execute_with(|| { +// type RuntimeOrigin = ::RuntimeOrigin; +// +// let local_fee_asset = +// Asset { id: AssetId(Location::parent()), fun: Fungible(LOCAL_FEE_AMOUNT_IN_DOT) }; +// +// // All WETH as fee and reserve_asset is zero, so there is no transfer in this case +// let remote_fee_asset = Asset { id: AssetId(weth_location()), fun: Fungible(TOKEN_AMOUNT) }; +// let reserve_asset = Asset { id: AssetId(weth_location()), fun: Fungible(0) }; +// +// let assets = vec![ +// Asset { id: weth_location().into(), fun: Fungible(TOKEN_AMOUNT) }, +// local_fee_asset.clone(), +// ]; +// +// let transact_info = TransactInfo { kind: TransactKind::RegisterAgent, params: vec![] }; +// +// let xcms = VersionedXcm::from(Xcm(vec![ +// WithdrawAsset(assets.clone().into()), +// PayFees { asset: local_fee_asset.clone() }, +// InitiateTransfer { +// destination: destination(), +// remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( +// remote_fee_asset.clone().into(), +// ))), +// preserve_origin: true, +// assets: vec![AssetTransferFilter::ReserveWithdraw(Definite( +// reserve_asset.clone().into(), +// ))], +// remote_xcm: Xcm(vec![ +// DepositAsset { assets: Wild(AllCounted(2)), beneficiary: beneficiary() }, +// Transact { +// origin_kind: OriginKind::SovereignAccount, +// call: transact_info.encode().into(), +// }, +// ]), +// }, +// ])); +// +// // Send the Weth back to Ethereum +// ::PolkadotXcm::execute( +// RuntimeOrigin::signed(AssetHubWestendReceiver::get()), +// bx!(xcms), +// Weight::from(8_000_000_000), +// ) +// .unwrap(); +// }); +// +// BridgeHubWestend::execute_with(|| { +// type RuntimeEvent = ::RuntimeEvent; +// // Check that Ethereum message was queue in the Outbound Queue +// assert_expected_events!( +// BridgeHubWestend, +// vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageAccepted{ .. }) => {},] +// ); +// }); +// } #[test] fn transact_with_agent() { @@ -427,15 +424,13 @@ fn transact_with_agent() { let beneficiary = Location::new(0, [AccountKey20 { network: None, key: AGENT_ADDRESS.into() }]); - let call_params = CallContractParams { + let transact_info = TransactInfo { target: Default::default(), data: vec![], gas_limit: 40000, // value should be less than the transfer amount, require validation on BH Exporter value: 4 * (TOKEN_AMOUNT - REMOTE_FEE_AMOUNT_IN_WETH) / 5, }; - let transact_info = - TransactInfo { kind: TransactKind::CallContract, params: call_params.encode() }; let xcms = VersionedXcm::from(Xcm(vec![ WithdrawAsset(assets.clone().into()), From 5696fdfb7f24b2b069c838225c3e9f9247ab4c7c Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 29 Nov 2024 02:39:19 +0800 Subject: [PATCH 30/68] Seperate outbound router crates --- Cargo.lock | 25 +- Cargo.toml | 2 + .../pallets/outbound-queue-v2/Cargo.toml | 6 +- .../pallets/outbound-queue-v2/src/api.rs | 2 +- .../pallets/outbound-queue-v2/src/lib.rs | 2 +- .../pallets/outbound-queue/src/lib.rs | 2 +- .../primitives/outbound-router/Cargo.toml | 57 + .../primitives/outbound-router/README.md | 4 + .../mod.rs => outbound-router/src/lib.rs} | 3 +- .../primitives/outbound-router/src/v1/mod.rs | 423 ++++ .../outbound-router/src/v1/tests.rs | 1274 ++++++++++++ .../outbound-router/src/v2/convert.rs | 276 +++ .../primitives/outbound-router/src/v2/mod.rs | 197 ++ .../outbound-router/src/v2/tests.rs | 1288 +++++++++++++ .../snowbridge/primitives/router/src/lib.rs | 1 - .../primitives/router/src/outbound/v1/mod.rs | 1703 ----------------- .../router/src/outbound/v2/convert.rs | 1068 ----------- .../primitives/router/src/outbound/v2/mod.rs | 738 ------- .../bridges/bridge-hub-westend/Cargo.toml | 1 + .../assets/asset-hub-westend/Cargo.toml | 3 + .../asset-hub-westend/src/xcm_config.rs | 4 +- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 3 + .../src/bridge_to_ethereum_config.rs | 3 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 3 + .../src/bridge_to_ethereum_config.rs | 6 +- 25 files changed, 3570 insertions(+), 3524 deletions(-) create mode 100644 bridges/snowbridge/primitives/outbound-router/Cargo.toml create mode 100644 bridges/snowbridge/primitives/outbound-router/README.md rename bridges/snowbridge/primitives/{router/src/outbound/mod.rs => outbound-router/src/lib.rs} (65%) create mode 100644 bridges/snowbridge/primitives/outbound-router/src/v1/mod.rs create mode 100644 bridges/snowbridge/primitives/outbound-router/src/v1/tests.rs create mode 100644 bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs create mode 100644 bridges/snowbridge/primitives/outbound-router/src/v2/mod.rs create mode 100644 bridges/snowbridge/primitives/outbound-router/src/v2/tests.rs delete mode 100644 bridges/snowbridge/primitives/router/src/outbound/v1/mod.rs delete mode 100644 bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs delete mode 100644 bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 09576d599ca1..25877bbd36bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1044,6 +1044,7 @@ dependencies = [ "primitive-types 0.13.1", "scale-info", "serde_json", + "snowbridge-outbound-router-primitives", "snowbridge-router-primitives 0.9.0", "sp-api 26.0.0", "sp-block-builder 26.0.0", @@ -2594,6 +2595,7 @@ dependencies = [ "snowbridge-core 0.2.0", "snowbridge-merkle-tree", "snowbridge-outbound-queue-runtime-api 0.2.0", + "snowbridge-outbound-router-primitives", "snowbridge-pallet-ethereum-client 0.2.0", "snowbridge-pallet-inbound-queue 0.2.0", "snowbridge-pallet-outbound-queue 0.2.0", @@ -2751,6 +2753,7 @@ dependencies = [ "rococo-westend-system-emulated-network", "scale-info", "snowbridge-core 0.2.0", + "snowbridge-outbound-router-primitives", "snowbridge-pallet-inbound-queue 0.2.0", "snowbridge-pallet-inbound-queue-fixtures 0.10.0", "snowbridge-pallet-outbound-queue 0.2.0", @@ -2834,6 +2837,7 @@ dependencies = [ "snowbridge-merkle-tree", "snowbridge-outbound-queue-runtime-api 0.2.0", "snowbridge-outbound-queue-runtime-api-v2", + "snowbridge-outbound-router-primitives", "snowbridge-pallet-ethereum-client 0.2.0", "snowbridge-pallet-inbound-queue 0.2.0", "snowbridge-pallet-outbound-queue 0.2.0", @@ -24900,6 +24904,25 @@ dependencies = [ "staging-xcm 7.0.0", ] +[[package]] +name = "snowbridge-outbound-router-primitives" +version = "0.9.0" +dependencies = [ + "frame-support 28.0.0", + "hex-literal", + "log", + "parity-scale-codec", + "scale-info", + "snowbridge-core 0.2.0", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", + "sp-std 14.0.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + [[package]] name = "snowbridge-pallet-ethereum-client" version = "0.2.0" @@ -25121,7 +25144,7 @@ dependencies = [ "serde", "snowbridge-core 0.2.0", "snowbridge-merkle-tree", - "snowbridge-router-primitives 0.9.0", + "snowbridge-outbound-router-primitives", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", diff --git a/Cargo.toml b/Cargo.toml index 86aa6c5c31f2..b753c867b51e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,6 +60,7 @@ members = [ "bridges/snowbridge/primitives/core", "bridges/snowbridge/primitives/ethereum", "bridges/snowbridge/primitives/merkle-tree", + "bridges/snowbridge/primitives/outbound-router", "bridges/snowbridge/primitives/router", "bridges/snowbridge/runtime/runtime-common", "bridges/snowbridge/runtime/test-common", @@ -1227,6 +1228,7 @@ snowbridge-ethereum = { path = "bridges/snowbridge/primitives/ethereum", default snowbridge-merkle-tree = { path = "bridges/snowbridge/primitives/merkle-tree", default-features = false } snowbridge-outbound-queue-runtime-api = { path = "bridges/snowbridge/pallets/outbound-queue/runtime-api", default-features = false } snowbridge-outbound-queue-runtime-api-v2 = { path = "bridges/snowbridge/pallets/outbound-queue-v2/runtime-api", default-features = false } +snowbridge-outbound-router-primitives = { path = "bridges/snowbridge/primitives/outbound-router", default-features = false } snowbridge-pallet-ethereum-client = { path = "bridges/snowbridge/pallets/ethereum-client", default-features = false } snowbridge-pallet-ethereum-client-fixtures = { path = "bridges/snowbridge/pallets/ethereum-client/fixtures", default-features = false } snowbridge-pallet-inbound-queue = { path = "bridges/snowbridge/pallets/inbound-queue", default-features = false } diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml index 560192c759f8..ac8dee02f116 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml @@ -36,7 +36,7 @@ snowbridge-core = { features = ["serde"], workspace = true } ethabi = { workspace = true } hex-literal = { workspace = true, default-features = true } snowbridge-merkle-tree = { workspace = true } -snowbridge-router-primitives = { workspace = true } +snowbridge-outbound-router-primitives = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } xcm-builder = { workspace = true } @@ -61,7 +61,7 @@ std = [ "serde/std", "snowbridge-core/std", "snowbridge-merkle-tree/std", - "snowbridge-router-primitives/std", + "snowbridge-outbound-router-primitives/std", "sp-arithmetic/std", "sp-core/std", "sp-io/std", @@ -79,7 +79,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", - "snowbridge-router-primitives/runtime-benchmarks", + "snowbridge-outbound-router-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs index 2912705dd151..75e51be90112 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs @@ -12,7 +12,7 @@ use snowbridge_core::outbound::{ DryRunError, }; use snowbridge_merkle_tree::{merkle_proof, MerkleProof}; -use snowbridge_router_primitives::outbound::v2::convert::XcmConverter; +use snowbridge_outbound_router_primitives::v2::convert::XcmConverter; use sp_core::Get; use sp_std::{default::Default, vec::Vec}; use xcm::prelude::Xcm; diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index 80309d530baf..6b669a75e5c9 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -7,7 +7,7 @@ //! Messages come either from sibling parachains via XCM, or BridgeHub itself //! via the `snowbridge-pallet-system`: //! -//! 1. `snowbridge_router_primitives::outbound::v2::EthereumBlobExporter::deliver` +//! 1. `snowbridge_outbound_router_primitives::v2::EthereumBlobExporter::deliver` //! 2. `snowbridge_pallet_system::Pallet::send_v2` //! //! The message submission pipeline works like this: diff --git a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs index 0d43519167af..feb86bce5dd8 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs @@ -7,7 +7,7 @@ //! Messages come either from sibling parachains via XCM, or BridgeHub itself //! via the `snowbridge-pallet-system`: //! -//! 1. `snowbridge_router_primitives::outbound::EthereumBlobExporter::deliver` +//! 1. `snowbridge_outbound_router_primitives::EthereumBlobExporter::deliver` //! 2. `snowbridge_pallet_system::Pallet::send` //! //! The message submission pipeline works like this: diff --git a/bridges/snowbridge/primitives/outbound-router/Cargo.toml b/bridges/snowbridge/primitives/outbound-router/Cargo.toml new file mode 100644 index 000000000000..17601d440973 --- /dev/null +++ b/bridges/snowbridge/primitives/outbound-router/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "snowbridge-outbound-router-primitives" +description = "Snowbridge Router Primitives" +version = "0.9.0" +authors = ["Snowfork "] +edition.workspace = true +repository.workspace = true +license = "Apache-2.0" +categories = ["cryptography::cryptocurrencies"] + +[lints] +workspace = true + +[dependencies] +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +log = { workspace = true } + +frame-support = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } + +snowbridge-core = { workspace = true } + +hex-literal = { workspace = true, default-features = true } + +[dev-dependencies] + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "log/std", + "scale-info/std", + "snowbridge-core/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "snowbridge-core/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] diff --git a/bridges/snowbridge/primitives/outbound-router/README.md b/bridges/snowbridge/primitives/outbound-router/README.md new file mode 100644 index 000000000000..0544d08e43c7 --- /dev/null +++ b/bridges/snowbridge/primitives/outbound-router/README.md @@ -0,0 +1,4 @@ +# Outbound Router Primitives + +Outbound router logic. Does XCM conversion to a lowered, simpler format the Ethereum contracts can +understand. diff --git a/bridges/snowbridge/primitives/router/src/outbound/mod.rs b/bridges/snowbridge/primitives/outbound-router/src/lib.rs similarity index 65% rename from bridges/snowbridge/primitives/router/src/outbound/mod.rs rename to bridges/snowbridge/primitives/outbound-router/src/lib.rs index 22756b222812..7ab04608543d 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/mod.rs +++ b/bridges/snowbridge/primitives/outbound-router/src/lib.rs @@ -1,5 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork -// SPDX-FileCopyrightText: 2021-2022 Parity Technologies (UK) Ltd. +#![cfg_attr(not(feature = "std"), no_std)] + pub mod v1; pub mod v2; diff --git a/bridges/snowbridge/primitives/outbound-router/src/v1/mod.rs b/bridges/snowbridge/primitives/outbound-router/src/v1/mod.rs new file mode 100644 index 000000000000..6394ba927d8a --- /dev/null +++ b/bridges/snowbridge/primitives/outbound-router/src/v1/mod.rs @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Converts XCM messages into simpler commands that can be processed by the Gateway contract + +#[cfg(test)] +mod tests; + +use core::slice::Iter; + +use codec::{Decode, Encode}; + +use frame_support::{ensure, traits::Get}; +use snowbridge_core::{ + outbound::v1::{AgentExecuteCommand, Command, Message, SendMessage}, + AgentId, ChannelId, ParaId, TokenId, TokenIdOf, +}; +use sp_core::{H160, H256}; +use sp_runtime::traits::MaybeEquivalence; +use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; +use xcm::prelude::*; +use xcm_executor::traits::{ConvertLocation, ExportXcm}; + +pub struct EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, +>( + PhantomData<( + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + )>, +); + +impl + ExportXcm + for EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + > +where + UniversalLocation: Get, + EthereumNetwork: Get, + OutboundQueue: SendMessage, + AgentHashedDescription: ConvertLocation, + ConvertAssetId: MaybeEquivalence, +{ + type Ticket = (Vec, XcmHash); + + fn validate( + network: NetworkId, + _channel: u32, + universal_source: &mut Option, + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + let expected_network = EthereumNetwork::get(); + let universal_location = UniversalLocation::get(); + + if network != expected_network { + log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched bridge network {network:?}."); + return Err(SendError::NotApplicable) + } + + // Cloning destination to avoid modifying the value so subsequent exporters can use it. + let dest = destination.clone().take().ok_or(SendError::MissingArgument)?; + if dest != Here { + log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched remote destination {dest:?}."); + return Err(SendError::NotApplicable) + } + + // Cloning universal_source to avoid modifying the value so subsequent exporters can use it. + let (local_net, local_sub) = universal_source.clone() + .take() + .ok_or_else(|| { + log::error!(target: "xcm::ethereum_blob_exporter", "universal source not provided."); + SendError::MissingArgument + })? + .split_global() + .map_err(|()| { + log::error!(target: "xcm::ethereum_blob_exporter", "could not get global consensus from universal source '{universal_source:?}'."); + SendError::NotApplicable + })?; + + if Ok(local_net) != universal_location.global_consensus() { + log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched relay network {local_net:?}."); + return Err(SendError::NotApplicable) + } + + let para_id = match local_sub.as_slice() { + [Parachain(para_id)] => *para_id, + _ => { + log::error!(target: "xcm::ethereum_blob_exporter", "could not get parachain id from universal source '{local_sub:?}'."); + return Err(SendError::NotApplicable) + }, + }; + + let source_location = Location::new(1, local_sub.clone()); + + let agent_id = match AgentHashedDescription::convert_location(&source_location) { + Some(id) => id, + None => { + log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to not being able to create agent id. '{source_location:?}'"); + return Err(SendError::NotApplicable) + }, + }; + + let message = message.take().ok_or_else(|| { + log::error!(target: "xcm::ethereum_blob_exporter", "xcm message not provided."); + SendError::MissingArgument + })?; + + let mut converter = + XcmConverter::::new(&message, expected_network, agent_id); + let (command, message_id) = converter.convert().map_err(|err|{ + log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to pattern matching error '{err:?}'."); + SendError::Unroutable + })?; + + let channel_id: ChannelId = ParaId::from(para_id).into(); + + let outbound_message = Message { id: Some(message_id.into()), channel_id, command }; + + // validate the message + let (ticket, fee) = OutboundQueue::validate(&outbound_message).map_err(|err| { + log::error!(target: "xcm::ethereum_blob_exporter", "OutboundQueue validation of message failed. {err:?}"); + SendError::Unroutable + })?; + + // convert fee to Asset + let fee = Asset::from((Location::parent(), fee.total())).into(); + + Ok(((ticket.encode(), message_id), fee)) + } + + fn deliver(blob: (Vec, XcmHash)) -> Result { + let ticket: OutboundQueue::Ticket = OutboundQueue::Ticket::decode(&mut blob.0.as_ref()) + .map_err(|_| { + log::trace!(target: "xcm::ethereum_blob_exporter", "undeliverable due to decoding error"); + SendError::NotApplicable + })?; + + let message_id = OutboundQueue::deliver(ticket).map_err(|_| { + log::error!(target: "xcm::ethereum_blob_exporter", "OutboundQueue submit of message failed"); + SendError::Transport("other transport error") + })?; + + log::info!(target: "xcm::ethereum_blob_exporter", "message delivered {message_id:#?}."); + Ok(message_id.into()) + } +} + +/// Errors that can be thrown to the pattern matching step. +#[derive(PartialEq, Debug)] +enum XcmConverterError { + UnexpectedEndOfXcm, + EndOfXcmMessageExpected, + WithdrawAssetExpected, + DepositAssetExpected, + NoReserveAssets, + FilterDoesNotConsumeAllAssets, + TooManyAssets, + ZeroAssetTransfer, + BeneficiaryResolutionFailed, + AssetResolutionFailed, + InvalidFeeAsset, + SetTopicExpected, + ReserveAssetDepositedExpected, + InvalidAsset, + UnexpectedInstruction, +} + +macro_rules! match_expression { + ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $value:expr $(,)?) => { + match $expression { + $( $pattern )|+ $( if $guard )? => Some($value), + _ => None, + } + }; +} + +struct XcmConverter<'a, ConvertAssetId, Call> { + iter: Peekable>>, + ethereum_network: NetworkId, + agent_id: AgentId, + _marker: PhantomData, +} +impl<'a, ConvertAssetId, Call> XcmConverter<'a, ConvertAssetId, Call> +where + ConvertAssetId: MaybeEquivalence, +{ + fn new(message: &'a Xcm, ethereum_network: NetworkId, agent_id: AgentId) -> Self { + Self { + iter: message.inner().iter().peekable(), + ethereum_network, + agent_id, + _marker: Default::default(), + } + } + + fn convert(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { + let result = match self.peek() { + Ok(ReserveAssetDeposited { .. }) => self.make_mint_foreign_token_command(), + // Get withdraw/deposit and make native tokens create message. + Ok(WithdrawAsset { .. }) => self.make_unlock_native_token_command(), + Err(e) => Err(e), + _ => return Err(XcmConverterError::UnexpectedInstruction), + }?; + + // All xcm instructions must be consumed before exit. + if self.next().is_ok() { + return Err(XcmConverterError::EndOfXcmMessageExpected) + } + + Ok(result) + } + + fn make_unlock_native_token_command( + &mut self, + ) -> Result<(Command, [u8; 32]), XcmConverterError> { + use XcmConverterError::*; + + // Get the reserve assets from WithdrawAsset. + let reserve_assets = + match_expression!(self.next()?, WithdrawAsset(reserve_assets), reserve_assets) + .ok_or(WithdrawAssetExpected)?; + + // Check if clear origin exists and skip over it. + if match_expression!(self.peek(), Ok(ClearOrigin), ()).is_some() { + let _ = self.next(); + } + + // Get the fee asset item from BuyExecution or continue parsing. + let fee_asset = match_expression!(self.peek(), Ok(BuyExecution { fees, .. }), fees); + if fee_asset.is_some() { + let _ = self.next(); + } + + let (deposit_assets, beneficiary) = match_expression!( + self.next()?, + DepositAsset { assets, beneficiary }, + (assets, beneficiary) + ) + .ok_or(DepositAssetExpected)?; + + // assert that the beneficiary is AccountKey20. + let recipient = match_expression!( + beneficiary.unpack(), + (0, [AccountKey20 { network, key }]) + if self.network_matches(network), + H160(*key) + ) + .ok_or(BeneficiaryResolutionFailed)?; + + // Make sure there are reserved assets. + if reserve_assets.len() == 0 { + return Err(NoReserveAssets) + } + + // Check the the deposit asset filter matches what was reserved. + if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { + return Err(FilterDoesNotConsumeAllAssets) + } + + // We only support a single asset at a time. + ensure!(reserve_assets.len() == 1, TooManyAssets); + let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; + + // Fees are collected on AH, up front and directly from the user, to cover the + // complete cost of the transfer. Any additional fees provided in the XCM program are + // refunded to the beneficiary. We only validate the fee here if its provided to make sure + // the XCM program is well formed. Another way to think about this from an XCM perspective + // would be that the user offered to pay X amount in fees, but we charge 0 of that X amount + // (no fee) and refund X to the user. + if let Some(fee_asset) = fee_asset { + // The fee asset must be the same as the reserve asset. + if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { + return Err(InvalidFeeAsset) + } + } + + let (token, amount) = match reserve_asset { + Asset { id: AssetId(inner_location), fun: Fungible(amount) } => + match inner_location.unpack() { + (0, [AccountKey20 { network, key }]) if self.network_matches(network) => + Some((H160(*key), *amount)), + _ => None, + }, + _ => None, + } + .ok_or(AssetResolutionFailed)?; + + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); + + // Check if there is a SetTopic and skip over it if found. + let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; + + Ok(( + Command::AgentExecute { + agent_id: self.agent_id, + command: AgentExecuteCommand::TransferToken { token, recipient, amount }, + }, + *topic_id, + )) + } + + fn next(&mut self) -> Result<&'a Instruction, XcmConverterError> { + self.iter.next().ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + + fn peek(&mut self) -> Result<&&'a Instruction, XcmConverterError> { + self.iter.peek().ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + + fn network_matches(&self, network: &Option) -> bool { + if let Some(network) = network { + *network == self.ethereum_network + } else { + true + } + } + + /// Convert the xcm for Polkadot-native token from AH into the Command + /// To match transfers of Polkadot-native tokens, we expect an input of the form: + /// # ReserveAssetDeposited + /// # ClearOrigin + /// # BuyExecution + /// # DepositAsset + /// # SetTopic + fn make_mint_foreign_token_command( + &mut self, + ) -> Result<(Command, [u8; 32]), XcmConverterError> { + use XcmConverterError::*; + + // Get the reserve assets. + let reserve_assets = + match_expression!(self.next()?, ReserveAssetDeposited(reserve_assets), reserve_assets) + .ok_or(ReserveAssetDepositedExpected)?; + + // Check if clear origin exists and skip over it. + if match_expression!(self.peek(), Ok(ClearOrigin), ()).is_some() { + let _ = self.next(); + } + + // Get the fee asset item from BuyExecution or continue parsing. + let fee_asset = match_expression!(self.peek(), Ok(BuyExecution { fees, .. }), fees); + if fee_asset.is_some() { + let _ = self.next(); + } + + let (deposit_assets, beneficiary) = match_expression!( + self.next()?, + DepositAsset { assets, beneficiary }, + (assets, beneficiary) + ) + .ok_or(DepositAssetExpected)?; + + // assert that the beneficiary is AccountKey20. + let recipient = match_expression!( + beneficiary.unpack(), + (0, [AccountKey20 { network, key }]) + if self.network_matches(network), + H160(*key) + ) + .ok_or(BeneficiaryResolutionFailed)?; + + // Make sure there are reserved assets. + if reserve_assets.len() == 0 { + return Err(NoReserveAssets) + } + + // Check the the deposit asset filter matches what was reserved. + if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { + return Err(FilterDoesNotConsumeAllAssets) + } + + // We only support a single asset at a time. + ensure!(reserve_assets.len() == 1, TooManyAssets); + let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; + + // Fees are collected on AH, up front and directly from the user, to cover the + // complete cost of the transfer. Any additional fees provided in the XCM program are + // refunded to the beneficiary. We only validate the fee here if its provided to make sure + // the XCM program is well formed. Another way to think about this from an XCM perspective + // would be that the user offered to pay X amount in fees, but we charge 0 of that X amount + // (no fee) and refund X to the user. + if let Some(fee_asset) = fee_asset { + // The fee asset must be the same as the reserve asset. + if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { + return Err(InvalidFeeAsset) + } + } + + let (asset_id, amount) = match reserve_asset { + Asset { id: AssetId(inner_location), fun: Fungible(amount) } => + Some((inner_location.clone(), *amount)), + _ => None, + } + .ok_or(AssetResolutionFailed)?; + + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); + + let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; + + let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; + + ensure!(asset_id == expected_asset_id, InvalidAsset); + + // Check if there is a SetTopic and skip over it if found. + let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; + + Ok((Command::MintForeignToken { token_id, recipient, amount }, *topic_id)) + } +} diff --git a/bridges/snowbridge/primitives/outbound-router/src/v1/tests.rs b/bridges/snowbridge/primitives/outbound-router/src/v1/tests.rs new file mode 100644 index 000000000000..607e2ea611a4 --- /dev/null +++ b/bridges/snowbridge/primitives/outbound-router/src/v1/tests.rs @@ -0,0 +1,1274 @@ +use frame_support::parameter_types; +use hex_literal::hex; +use snowbridge_core::{ + outbound::{v1::Fee, SendError, SendMessageFeeProvider}, + AgentIdOf, +}; +use sp_std::default::Default; +use xcm::{ + latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, + prelude::SendError as XcmSendError, +}; + +use super::*; + +parameter_types! { + const MaxMessageSize: u32 = u32::MAX; + const RelayNetwork: NetworkId = Polkadot; + UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(1013)].into(); + const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; + const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; +} + +struct MockOkOutboundQueue; +impl SendMessage for MockOkOutboundQueue { + type Ticket = (); + + fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { + Ok(((), Fee { local: 1, remote: 1 })) + } + + fn deliver(_: Self::Ticket) -> Result { + Ok(H256::zero()) + } +} + +impl SendMessageFeeProvider for MockOkOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } +} +struct MockErrOutboundQueue; +impl SendMessage for MockErrOutboundQueue { + type Ticket = (); + + fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { + Err(SendError::MessageTooLarge) + } + + fn deliver(_: Self::Ticket) -> Result { + Err(SendError::MessageTooLarge) + } +} + +impl SendMessageFeeProvider for MockErrOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } +} + +pub struct MockTokenIdConvert; +impl MaybeEquivalence for MockTokenIdConvert { + fn convert(_id: &TokenId) -> Option { + Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) + } + fn convert_back(_loc: &Location) -> Option { + None + } +} + +#[test] +fn exporter_validate_with_unknown_network_yields_not_applicable() { + let network = Ethereum { chain_id: 1337 }; + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_with_invalid_destination_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_with_x8_destination_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Some( + [OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild] + .into(), + ); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_without_universal_source_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_without_global_universal_location_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_without_global_bridge_location_yields_not_applicable() { + let network = NonBridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_with_remote_universal_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Kusama), Parachain(1000)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_without_para_id_in_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Some(GlobalConsensus(Polkadot).into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_complex_para_id_in_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(12)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_without_xcm_message_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_with_max_target_fee_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; + let fees: Assets = vec![fee.clone()].into(); + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(fees), + BuyExecution { fees: fee, weight_limit: Unlimited }, + WithdrawAsset(assets), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: Some(network), key: beneficiary_address } + .into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert_eq!(result, Err(XcmSendError::Unroutable)); +} + +#[test] +fn exporter_validate_with_unparsable_xcm_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let channel: u32 = 0; + let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; + let fees: Assets = vec![fee.clone()].into(); + + let mut message: Option> = + Some(vec![WithdrawAsset(fees), BuyExecution { fees: fee, weight_limit: Unlimited }].into()); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert_eq!(result, Err(XcmSendError::Unroutable)); +} + +#[test] +fn exporter_validate_xcm_success_case_1() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert!(result.is_ok()); +} + +#[test] +fn exporter_deliver_with_submit_failure_yields_unroutable() { + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockErrOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); + assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) +} + +#[test] +fn xcm_converter_convert_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = Command::AgentExecute { + agent_id: Default::default(), + command: AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); +} + +#[test] +fn xcm_converter_convert_without_buy_execution_yields_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = Command::AgentExecute { + agent_id: Default::default(), + command: AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); +} + +#[test] +fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(All); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = Command::AgentExecute { + agent_id: Default::default(), + command: AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); +} + +#[test] +fn xcm_converter_convert_with_fees_less_than_reserve_yields_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); + let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(500) }; + + let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = Command::AgentExecute { + agent_id: Default::default(), + command: AgentExecuteCommand::TransferToken { + token: token_address.into(), + recipient: beneficiary_address.into(), + amount: 1000, + }, + }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); +} + +#[test] +fn xcm_converter_convert_without_set_topic_yields_set_topic_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + ClearTopic, + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::SetTopicExpected)); +} + +#[test] +fn xcm_converter_convert_with_partial_message_yields_unexpected_end_of_xcm() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let message: Xcm<()> = vec![WithdrawAsset(assets)].into(); + + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); +} + +#[test] +fn xcm_converter_with_different_fee_asset_fails() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location = [AccountKey20 { network: None, key: token_address }].into(); + let fee_asset = + Asset { id: AssetId(Location { parents: 0, interior: Here }), fun: Fungible(1000) }; + + let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); +} + +#[test] +fn xcm_converter_with_fees_greater_than_reserve_fails() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); + let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(1001) }; + + let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee_asset, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); +} + +#[test] +fn xcm_converter_convert_with_empty_xcm_yields_unexpected_end_of_xcm() { + let network = BridgedNetwork::get(); + + let message: Xcm<()> = vec![].into(); + + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); +} + +#[test] +fn xcm_converter_convert_with_extra_instructions_yields_end_of_xcm_message_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ClearError, + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::EndOfXcmMessageExpected)); +} + +#[test] +fn xcm_converter_convert_without_withdraw_asset_yields_withdraw_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedInstruction)); +} + +#[test] +fn xcm_converter_convert_without_withdraw_asset_yields_deposit_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::DepositAssetExpected)); +} + +#[test] +fn xcm_converter_convert_without_assets_yields_no_reserve_assets() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![].into(); + let filter: AssetFilter = assets.clone().into(); + + let fee = Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::NoReserveAssets)); +} + +#[test] +fn xcm_converter_convert_with_two_assets_yields_too_many_assets() { + let network = BridgedNetwork::get(); + + let token_address_1: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let token_address_2: [u8; 20] = hex!("1100000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![ + Asset { + id: AssetId(AccountKey20 { network: None, key: token_address_1 }.into()), + fun: Fungible(1000), + }, + Asset { + id: AssetId(AccountKey20 { network: None, key: token_address_2 }.into()), + fun: Fungible(500), + }, + ] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::TooManyAssets)); +} + +#[test] +fn xcm_converter_convert_without_consuming_filter_yields_filter_does_not_consume_all_assets() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(0)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::FilterDoesNotConsumeAllAssets)); +} + +#[test] +fn xcm_converter_convert_with_zero_amount_asset_yields_zero_asset_transfer() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(0), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::ZeroAssetTransfer)); +} + +#[test] +fn xcm_converter_convert_non_ethereum_asset_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([GlobalConsensus(Polkadot), Parachain(1000), GeneralIndex(0)].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_non_ethereum_chain_asset_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId( + AccountKey20 { network: Some(Ethereum { chain_id: 2 }), key: token_address }.into(), + ), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_non_ethereum_chain_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId( + [AccountKey20 { network: Some(NonBridgedNetwork::get()), key: token_address }].into(), + ), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_with_non_ethereum_beneficiary_yields_beneficiary_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let beneficiary_address: [u8; 32] = + hex!("2000000000000000000000000000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: [ + GlobalConsensus(Polkadot), + Parachain(1000), + AccountId32 { network: Some(Polkadot), id: beneficiary_address }, + ] + .into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_with_non_ethereum_chain_beneficiary_yields_beneficiary_resolution_failed() +{ + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { + network: Some(Ethereum { chain_id: 2 }), + key: beneficiary_address, + } + .into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); +} + +#[test] +fn test_describe_asset_hub() { + let legacy_location: Location = Location::new(0, [Parachain(1000)]); + let legacy_agent_id = AgentIdOf::convert_location(&legacy_location).unwrap(); + assert_eq!( + legacy_agent_id, + hex!("72456f48efed08af20e5b317abf8648ac66e86bb90a411d9b0b713f7364b75b4").into() + ); + let location: Location = Location::new(1, [Parachain(1000)]); + let agent_id = AgentIdOf::convert_location(&location).unwrap(); + assert_eq!( + agent_id, + hex!("81c5ab2571199e3188135178f3c2c8e2d268be1313d029b30f534fa579b69b79").into() + ) +} + +#[test] +fn test_describe_here() { + let location: Location = Location::new(0, []); + let agent_id = AgentIdOf::convert_location(&location).unwrap(); + assert_eq!( + agent_id, + hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into() + ) +} + +#[test] +fn xcm_converter_transfer_native_token_success() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let amount = 1000000; + let asset_location = Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]); + let token_id = TokenIdOf::convert_location(&asset_location).unwrap(); + + let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ReserveAssetDeposited(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let expected_payload = + Command::MintForeignToken { recipient: beneficiary_address.into(), amount, token_id }; + let result = converter.convert(); + assert_eq!(result, Ok((expected_payload, [0; 32]))); +} + +#[test] +fn xcm_converter_transfer_native_token_with_invalid_location_will_fail() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let amount = 1000000; + // Invalid asset location from a different consensus + let asset_location = + Location { parents: 2, interior: [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))].into() }; + + let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ReserveAssetDeposited(assets.clone()), + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = + XcmConverter::::new(&message, network, Default::default()); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidAsset)); +} + +#[test] +fn exporter_validate_with_invalid_dest_does_not_alter_destination() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Parachain(1000).into(); + + let universal_source: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate( + network, channel, &mut universal_source_wrapper, &mut dest_wrapper, &mut msg_wrapper + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); +} + +#[test] +fn exporter_validate_with_invalid_universal_source_does_not_alter_universal_source() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Here.into(); + + let universal_source: InteriorLocation = + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate( + network, channel, &mut universal_source_wrapper, &mut dest_wrapper, &mut msg_wrapper + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); +} diff --git a/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs b/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs new file mode 100644 index 000000000000..8253322c34d5 --- /dev/null +++ b/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Converts XCM messages into InboundMessage that can be processed by the Gateway contract + +use codec::DecodeAll; +use core::slice::Iter; +use frame_support::{ensure, traits::Get, BoundedVec}; +use snowbridge_core::{ + outbound::{ + v2::{Command, Message}, + TransactInfo, + }, + TokenId, TokenIdOf, TokenIdOf as LocationIdOf, +}; +use sp_core::H160; +use sp_runtime::traits::MaybeEquivalence; +use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; +use xcm::prelude::*; +use xcm_executor::traits::ConvertLocation; + +/// Errors that can be thrown to the pattern matching step. +#[derive(PartialEq, Debug)] +pub enum XcmConverterError { + UnexpectedEndOfXcm, + EndOfXcmMessageExpected, + WithdrawAssetExpected, + DepositAssetExpected, + NoReserveAssets, + FilterDoesNotConsumeAllAssets, + TooManyAssets, + ZeroAssetTransfer, + BeneficiaryResolutionFailed, + AssetResolutionFailed, + InvalidFeeAsset, + SetTopicExpected, + ReserveAssetDepositedExpected, + InvalidAsset, + UnexpectedInstruction, + TooManyCommands, + AliasOriginExpected, + InvalidOrigin, + TransactDecodeFailed, + TransactParamsDecodeFailed, + FeeAssetResolutionFailed, + CallContractValueInsufficient, +} + +macro_rules! match_expression { + ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $value:expr $(,)?) => { + match $expression { + $( $pattern )|+ $( if $guard )? => Some($value), + _ => None, + } + }; +} + +pub struct XcmConverter<'a, ConvertAssetId, WETHAddress, Call> { + iter: Peekable>>, + ethereum_network: NetworkId, + _marker: PhantomData<(ConvertAssetId, WETHAddress)>, +} +impl<'a, ConvertAssetId, WETHAddress, Call> XcmConverter<'a, ConvertAssetId, WETHAddress, Call> +where + ConvertAssetId: MaybeEquivalence, + WETHAddress: Get, +{ + pub fn new(message: &'a Xcm, ethereum_network: NetworkId) -> Self { + Self { + iter: message.inner().iter().peekable(), + ethereum_network, + _marker: Default::default(), + } + } + + pub fn convert(&mut self) -> Result { + let result = self.to_ethereum_message()?; + Ok(result) + } + + fn next(&mut self) -> Result<&'a Instruction, XcmConverterError> { + self.iter.next().ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + + fn peek(&mut self) -> Result<&&'a Instruction, XcmConverterError> { + self.iter.peek().ok_or(XcmConverterError::UnexpectedEndOfXcm) + } + + fn network_matches(&self, network: &Option) -> bool { + if let Some(network) = network { + *network == self.ethereum_network + } else { + true + } + } + + /// Extract the fee asset item from PayFees(V5) + fn extract_remote_fee(&mut self) -> Result { + use XcmConverterError::*; + let _ = match_expression!(self.next()?, WithdrawAsset(fee), fee) + .ok_or(WithdrawAssetExpected)?; + let fee_asset = + match_expression!(self.next()?, PayFees { asset: fee }, fee).ok_or(InvalidFeeAsset)?; + let (fee_asset_id, fee_amount) = match fee_asset { + Asset { id: asset_id, fun: Fungible(amount) } => Some((asset_id, *amount)), + _ => None, + } + .ok_or(AssetResolutionFailed)?; + let weth_address = match_expression!( + fee_asset_id.0.unpack(), + (0, [AccountKey20 { network, key }]) + if self.network_matches(network), + H160(*key) + ) + .ok_or(FeeAssetResolutionFailed)?; + ensure!(weth_address == WETHAddress::get(), InvalidFeeAsset); + Ok(fee_amount) + } + + /// Convert the xcm for into the Message which will be executed + /// on Ethereum Gateway contract, we expect an input of the form: + /// # WithdrawAsset(WETH) + /// # PayFees(WETH) + /// # ReserveAssetDeposited(PNA) | WithdrawAsset(ENA) + /// # AliasOrigin(Origin) + /// # DepositAsset(PNA|ENA) + /// # Transact() ---Optional + /// # SetTopic + fn to_ethereum_message(&mut self) -> Result { + use XcmConverterError::*; + + // Get fee amount + let fee_amount = self.extract_remote_fee()?; + + // Get ENA reserve asset from WithdrawAsset. + let enas = + match_expression!(self.peek(), Ok(WithdrawAsset(reserve_assets)), reserve_assets); + if enas.is_some() { + let _ = self.next(); + } + + // Get PNA reserve asset from ReserveAssetDeposited + let pnas = match_expression!( + self.peek(), + Ok(ReserveAssetDeposited(reserve_assets)), + reserve_assets + ); + if pnas.is_some() { + let _ = self.next(); + } + // Check AliasOrigin. + let origin_location = match_expression!(self.next()?, AliasOrigin(origin), origin) + .ok_or(AliasOriginExpected)?; + let origin = LocationIdOf::convert_location(origin_location).ok_or(InvalidOrigin)?; + + let (deposit_assets, beneficiary) = match_expression!( + self.next()?, + DepositAsset { assets, beneficiary }, + (assets, beneficiary) + ) + .ok_or(DepositAssetExpected)?; + + // assert that the beneficiary is AccountKey20. + let recipient = match_expression!( + beneficiary.unpack(), + (0, [AccountKey20 { network, key }]) + if self.network_matches(network), + H160(*key) + ) + .ok_or(BeneficiaryResolutionFailed)?; + + // Make sure there are reserved assets. + if enas.is_none() && pnas.is_none() { + return Err(NoReserveAssets) + } + + let mut commands: Vec = Vec::new(); + let mut weth_amount = 0; + + // ENA transfer commands + if let Some(enas) = enas { + for ena in enas.clone().inner().iter() { + // Check the the deposit asset filter matches what was reserved. + if !deposit_assets.matches(ena) { + return Err(FilterDoesNotConsumeAllAssets) + } + + // only fungible asset is allowed + let (token, amount) = match ena { + Asset { id: AssetId(inner_location), fun: Fungible(amount) } => + match inner_location.unpack() { + (0, [AccountKey20 { network, key }]) + if self.network_matches(network) => + Some((H160(*key), *amount)), + _ => None, + }, + _ => None, + } + .ok_or(AssetResolutionFailed)?; + + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); + + if token == WETHAddress::get() { + weth_amount = amount; + } + + commands.push(Command::UnlockNativeToken { token, recipient, amount }); + } + } + + // PNA transfer commands + if let Some(pnas) = pnas { + ensure!(pnas.len() > 0, NoReserveAssets); + for pna in pnas.clone().inner().iter() { + // Check the the deposit asset filter matches what was reserved. + if !deposit_assets.matches(pna) { + return Err(FilterDoesNotConsumeAllAssets) + } + + // Only fungible is allowed + let (asset_id, amount) = match pna { + Asset { id: AssetId(inner_location), fun: Fungible(amount) } => + Some((inner_location.clone(), *amount)), + _ => None, + } + .ok_or(AssetResolutionFailed)?; + + // transfer amount must be greater than 0. + ensure!(amount > 0, ZeroAssetTransfer); + + // Ensure PNA already registered + let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; + let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; + ensure!(asset_id == expected_asset_id, InvalidAsset); + + commands.push(Command::MintForeignToken { token_id, recipient, amount }); + } + } + + // Transact commands + let transact_call = match_expression!(self.peek(), Ok(Transact { call, .. }), call); + if let Some(transact_call) = transact_call { + let _ = self.next(); + let transact = + TransactInfo::decode_all(&mut transact_call.clone().into_encoded().as_slice()) + .map_err(|_| TransactDecodeFailed)?; + if transact.value > 0 { + ensure!(weth_amount > transact.value, CallContractValueInsufficient); + } + commands.push(Command::CallContract { + target: transact.target, + data: transact.data, + gas_limit: transact.gas_limit, + value: transact.value, + }); + } + + // ensure SetTopic exists + let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; + + let message = Message { + id: (*topic_id).into(), + origin_location: origin_location.clone(), + origin, + fee: fee_amount, + commands: BoundedVec::try_from(commands).map_err(|_| TooManyCommands)?, + }; + + // All xcm instructions must be consumed before exit. + if self.next().is_ok() { + return Err(EndOfXcmMessageExpected) + } + + Ok(message) + } +} diff --git a/bridges/snowbridge/primitives/outbound-router/src/v2/mod.rs b/bridges/snowbridge/primitives/outbound-router/src/v2/mod.rs new file mode 100644 index 000000000000..fe719e68ea04 --- /dev/null +++ b/bridges/snowbridge/primitives/outbound-router/src/v2/mod.rs @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +//! Converts XCM messages into simpler commands that can be processed by the Gateway contract + +#[cfg(test)] +mod tests; + +pub mod convert; +use convert::XcmConverter; + +use codec::{Decode, Encode}; +use frame_support::{ + ensure, + traits::{Contains, Get, ProcessMessageError}, +}; +use snowbridge_core::{outbound::v2::SendMessage, TokenId}; +use sp_core::{H160, H256}; +use sp_runtime::traits::MaybeEquivalence; +use sp_std::{marker::PhantomData, ops::ControlFlow, prelude::*}; +use xcm::prelude::*; +use xcm_builder::{CreateMatcher, ExporterFor, MatchXcm}; +use xcm_executor::traits::{ConvertLocation, ExportXcm}; + +pub const TARGET: &'static str = "xcm::ethereum_blob_exporter::v2"; + +pub struct EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + WETHAddress, +>( + PhantomData<( + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + WETHAddress, + )>, +); + +impl< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + WETHAddress, + > ExportXcm + for EthereumBlobExporter< + UniversalLocation, + EthereumNetwork, + OutboundQueue, + AgentHashedDescription, + ConvertAssetId, + WETHAddress, + > +where + UniversalLocation: Get, + EthereumNetwork: Get, + OutboundQueue: SendMessage, + AgentHashedDescription: ConvertLocation, + ConvertAssetId: MaybeEquivalence, + WETHAddress: Get, +{ + type Ticket = (Vec, XcmHash); + + fn validate( + network: NetworkId, + _channel: u32, + universal_source: &mut Option, + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + log::debug!(target: TARGET, "message route through bridge {message:?}."); + + let expected_network = EthereumNetwork::get(); + let universal_location = UniversalLocation::get(); + + if network != expected_network { + log::trace!(target: TARGET, "skipped due to unmatched bridge network {network:?}."); + return Err(SendError::NotApplicable) + } + + // Cloning destination to avoid modifying the value so subsequent exporters can use it. + let dest = destination.clone().ok_or(SendError::MissingArgument)?; + if dest != Here { + log::trace!(target: TARGET, "skipped due to unmatched remote destination {dest:?}."); + return Err(SendError::NotApplicable) + } + + // Cloning universal_source to avoid modifying the value so subsequent exporters can use it. + let (local_net, _) = universal_source.clone() + .ok_or_else(|| { + log::error!(target: TARGET, "universal source not provided."); + SendError::MissingArgument + })? + .split_global() + .map_err(|()| { + log::error!(target: TARGET, "could not get global consensus from universal source '{universal_source:?}'."); + SendError::NotApplicable + })?; + + if Ok(local_net) != universal_location.global_consensus() { + log::trace!(target: TARGET, "skipped due to unmatched relay network {local_net:?}."); + return Err(SendError::NotApplicable) + } + + let message = message.clone().ok_or_else(|| { + log::error!(target: TARGET, "xcm message not provided."); + SendError::MissingArgument + })?; + + // Inspect AliasOrigin as V2 message + let mut instructions = message.clone().0; + let result = instructions.matcher().match_next_inst_while( + |_| true, + |inst| { + return match inst { + AliasOrigin(..) => Err(ProcessMessageError::Yield), + _ => Ok(ControlFlow::Continue(())), + } + }, + ); + ensure!(result.is_err(), SendError::NotApplicable); + + let mut converter = + XcmConverter::::new(&message, expected_network); + let message = converter.convert().map_err(|err| { + log::error!(target: TARGET, "unroutable due to pattern matching error '{err:?}'."); + SendError::Unroutable + })?; + + // validate the message + let (ticket, _) = OutboundQueue::validate(&message).map_err(|err| { + log::error!(target: TARGET, "OutboundQueue validation of message failed. {err:?}"); + SendError::Unroutable + })?; + + Ok(((ticket.encode(), XcmHash::from(message.id)), Assets::default())) + } + + fn deliver(blob: (Vec, XcmHash)) -> Result { + let ticket: OutboundQueue::Ticket = OutboundQueue::Ticket::decode(&mut blob.0.as_ref()) + .map_err(|_| { + log::trace!(target: TARGET, "undeliverable due to decoding error"); + SendError::NotApplicable + })?; + + let message_id = OutboundQueue::deliver(ticket).map_err(|_| { + log::error!(target: TARGET, "OutboundQueue submit of message failed"); + SendError::Transport("other transport error") + })?; + + log::info!(target: TARGET, "message delivered {message_id:#?}."); + Ok(message_id.into()) + } +} + +/// An adapter for the implementation of `ExporterFor`, which attempts to find the +/// `(bridge_location, payment)` for the requested `network` and `remote_location` and `xcm` +/// in the provided `T` table containing various exporters. +pub struct XcmFilterExporter(core::marker::PhantomData<(T, M)>); +impl>> ExporterFor for XcmFilterExporter { + fn exporter_for( + network: &NetworkId, + remote_location: &InteriorLocation, + xcm: &Xcm<()>, + ) -> Option<(Location, Option)> { + // check the XCM + if !M::contains(xcm) { + return None + } + // check `network` and `remote_location` + T::exporter_for(network, remote_location, xcm) + } +} + +/// Xcm for SnowbridgeV2 which requires XCMV5 +pub struct XcmForSnowbridgeV2; +impl Contains> for XcmForSnowbridgeV2 { + fn contains(xcm: &Xcm<()>) -> bool { + let mut instructions = xcm.clone().0; + let result = instructions.matcher().match_next_inst_while( + |_| true, + |inst| { + return match inst { + AliasOrigin(..) => Err(ProcessMessageError::Yield), + _ => Ok(ControlFlow::Continue(())), + } + }, + ); + result.is_err() + } +} diff --git a/bridges/snowbridge/primitives/outbound-router/src/v2/tests.rs b/bridges/snowbridge/primitives/outbound-router/src/v2/tests.rs new file mode 100644 index 000000000000..835c7abc59aa --- /dev/null +++ b/bridges/snowbridge/primitives/outbound-router/src/v2/tests.rs @@ -0,0 +1,1288 @@ +use super::*; +use crate::v2::convert::XcmConverterError; +use frame_support::{parameter_types, BoundedVec}; +use hex_literal::hex; +use snowbridge_core::{ + outbound::{ + v2::{Command, Message}, + SendError, SendMessageFeeProvider, + }, + AgentIdOf, TokenIdOf, +}; +use sp_std::default::Default; +use xcm::{latest::WESTEND_GENESIS_HASH, prelude::SendError as XcmSendError}; + +parameter_types! { + const MaxMessageSize: u32 = u32::MAX; + const RelayNetwork: NetworkId = Polkadot; + UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(1013)].into(); + pub const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; + pub const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; + pub WETHAddress: H160 = H160(hex_literal::hex!("c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2")); +} + +struct MockOkOutboundQueue; +impl SendMessage for MockOkOutboundQueue { + type Ticket = (); + + type Balance = u128; + + fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { + Ok(((), 1_u128)) + } + + fn deliver(_: Self::Ticket) -> Result { + Ok(H256::zero()) + } +} + +impl SendMessageFeeProvider for MockOkOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } +} +struct MockErrOutboundQueue; +impl SendMessage for MockErrOutboundQueue { + type Ticket = (); + + type Balance = u128; + + fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { + Err(SendError::MessageTooLarge) + } + + fn deliver(_: Self::Ticket) -> Result { + Err(SendError::MessageTooLarge) + } +} + +impl SendMessageFeeProvider for MockErrOutboundQueue { + type Balance = u128; + + fn local_fee() -> Self::Balance { + 1 + } +} + +pub struct MockTokenIdConvert; +impl MaybeEquivalence for MockTokenIdConvert { + fn convert(_id: &TokenId) -> Option { + Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) + } + fn convert_back(_loc: &Location) -> Option { + None + } +} + +#[test] +fn exporter_validate_with_unknown_network_yields_not_applicable() { + let network = Ethereum { chain_id: 1337 }; + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_with_invalid_destination_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = None; + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_with_x8_destination_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Some( + [OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild] + .into(), + ); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_without_universal_source_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = None; + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_without_global_universal_location_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_without_global_bridge_location_yields_not_applicable() { + let network = NonBridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Here.into(); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_with_remote_universal_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Kusama), Parachain(1000)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_without_para_id_in_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = Some(GlobalConsensus(Polkadot).into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_complex_para_id_in_source_yields_not_applicable() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(12)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_without_xcm_message_yields_missing_argument() { + let network = BridgedNetwork::get(); + let channel: u32 = 0; + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + let mut destination: Option = Here.into(); + let mut message: Option> = None; + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + assert_eq!(result, Err(XcmSendError::MissingArgument)); +} + +#[test] +fn exporter_validate_with_max_target_fee_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; + let fees: Assets = vec![fee.clone()].into(); + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(fees), + BuyExecution { fees: fee.clone(), weight_limit: Unlimited }, + ExpectAsset(fee.into()), + WithdrawAsset(assets), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: Some(network), key: beneficiary_address } + .into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_with_unparsable_xcm_yields_unroutable() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let channel: u32 = 0; + let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; + let fees: Assets = vec![fee.clone()].into(); + + let mut message: Option> = + Some(vec![WithdrawAsset(fees), BuyExecution { fees: fee, weight_limit: Unlimited }].into()); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); +} + +#[test] +fn exporter_validate_xcm_success_case_1() { + let network = BridgedNetwork::get(); + let mut destination: Option = Here.into(); + + let mut universal_source: Option = + Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + let filter: AssetFilter = assets.clone().into(); + + let mut message: Option> = Some( + vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(), + ); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate(network, channel, &mut universal_source, &mut destination, &mut message); + + assert!(result.is_ok()); +} + +#[test] +fn exporter_deliver_with_submit_failure_yields_unroutable() { + let result = EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockErrOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); + assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) +} + +#[test] +fn exporter_validate_with_invalid_dest_does_not_alter_destination() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Parachain(1000).into(); + + let universal_source: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate( + network, channel, &mut universal_source_wrapper, &mut dest_wrapper, &mut msg_wrapper + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); +} + +#[test] +fn exporter_validate_with_invalid_universal_source_does_not_alter_universal_source() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Here.into(); + + let universal_source: InteriorLocation = + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + WETHAddress, + >::validate( + network, channel, &mut universal_source_wrapper, &mut dest_wrapper, &mut msg_wrapper + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); +} + +#[test] +fn xcm_converter_convert_success() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + let result = converter.convert(); + assert!(result.is_ok()); +} + +#[test] +fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(All); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + let result = converter.convert(); + assert_eq!(result.is_ok(), true); +} + +#[test] +fn xcm_converter_convert_without_set_topic_yields_set_topic_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + ClearTopic, + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::SetTopicExpected)); +} + +#[test] +fn xcm_converter_convert_with_partial_message_yields_invalid_fee_asset() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let message: Xcm<()> = vec![WithdrawAsset(assets)].into(); + + let mut converter = XcmConverter::::new(&message, network); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); +} + +#[test] +fn xcm_converter_with_different_fee_asset_succeed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location = [AccountKey20 { network: None, key: token_address }].into(); + let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + let result = converter.convert(); + assert_eq!(result.is_ok(), true); +} + +#[test] +fn xcm_converter_with_fees_greater_than_reserve_succeed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); + + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + let result = converter.convert(); + assert_eq!(result.is_ok(), true); +} + +#[test] +fn xcm_converter_convert_with_empty_xcm_yields_unexpected_end_of_xcm() { + let network = BridgedNetwork::get(); + + let message: Xcm<()> = vec![].into(); + + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); +} + +#[test] +fn xcm_converter_convert_with_extra_instructions_yields_end_of_xcm_message_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ClearError, + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::EndOfXcmMessageExpected)); +} + +#[test] +fn xcm_converter_convert_without_withdraw_asset_yields_withdraw_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = assets.clone().into(); + + let message: Xcm<()> = vec![ + ClearOrigin, + BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::WithdrawAssetExpected)); +} + +#[test] +fn xcm_converter_convert_without_withdraw_asset_yields_deposit_expected() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::DepositAssetExpected)); +} + +#[test] +fn xcm_converter_convert_without_assets_yields_no_reserve_assets() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![].into(); + let filter: AssetFilter = assets.clone().into(); + + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::NoReserveAssets)); +} + +#[test] +fn xcm_converter_convert_with_two_assets_yields() { + let network = BridgedNetwork::get(); + + let token_address_1: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let token_address_2: [u8; 20] = hex!("1100000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![ + Asset { + id: AssetId(AccountKey20 { network: None, key: token_address_1 }.into()), + fun: Fungible(1000), + }, + Asset { + id: AssetId(AccountKey20 { network: None, key: token_address_2 }.into()), + fun: Fungible(500), + }, + ] + .into(); + let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.is_ok(), true); +} + +#[test] +fn xcm_converter_convert_without_consuming_filter_yields_filter_does_not_consume_all_assets() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(0)); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::FilterDoesNotConsumeAllAssets)); +} + +#[test] +fn xcm_converter_convert_with_zero_amount_asset_yields_zero_asset_transfer() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(0), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), + fun: Fungible(1000), + } + .into(); + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::ZeroAssetTransfer)); +} + +#[test] +fn xcm_converter_convert_non_ethereum_asset_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId([GlobalConsensus(Polkadot), Parachain(1000), GeneralIndex(0)].into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_non_ethereum_chain_asset_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId( + AccountKey20 { network: Some(Ethereum { chain_id: 2 }), key: token_address }.into(), + ), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_non_ethereum_chain_yields_asset_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId( + [AccountKey20 { network: Some(NonBridgedNetwork::get()), key: token_address }].into(), + ), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_with_non_ethereum_beneficiary_yields_beneficiary_resolution_failed() { + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + + let beneficiary_address: [u8; 32] = + hex!("2000000000000000000000000000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountId32 { network: Some(Polkadot), id: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); +} + +#[test] +fn xcm_converter_convert_with_non_ethereum_chain_beneficiary_yields_beneficiary_resolution_failed() +{ + let network = BridgedNetwork::get(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let assets: Assets = vec![Asset { + id: AssetId(AccountKey20 { network: None, key: token_address }.into()), + fun: Fungible(1000), + }] + .into(); + let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + WithdrawAsset(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { + network: Some(Ethereum { chain_id: 2 }), + key: beneficiary_address, + } + .into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); +} + +#[test] +fn test_describe_asset_hub() { + let legacy_location: Location = Location::new(0, [Parachain(1000)]); + let legacy_agent_id = AgentIdOf::convert_location(&legacy_location).unwrap(); + assert_eq!( + legacy_agent_id, + hex!("72456f48efed08af20e5b317abf8648ac66e86bb90a411d9b0b713f7364b75b4").into() + ); + let location: Location = Location::new(1, [Parachain(1000)]); + let agent_id = AgentIdOf::convert_location(&location).unwrap(); + assert_eq!( + agent_id, + hex!("81c5ab2571199e3188135178f3c2c8e2d268be1313d029b30f534fa579b69b79").into() + ) +} + +#[test] +fn test_describe_here() { + let location: Location = Location::new(0, []); + let agent_id = AgentIdOf::convert_location(&location).unwrap(); + assert_eq!( + agent_id, + hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into() + ) +} + +#[test] +fn xcm_converter_transfer_native_token_success() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let amount = 1000000; + let asset_location = Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]); + let token_id = TokenIdOf::convert_location(&asset_location).unwrap(); + + let assets: Assets = + vec![Asset { id: AssetId(asset_location.clone()), fun: Fungible(amount) }].into(); + let filter: AssetFilter = assets.clone().into(); + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + ReserveAssetDeposited(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + let expected_payload = + Command::MintForeignToken { recipient: beneficiary_address.into(), amount, token_id }; + let expected_message = Message { + origin_location: Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)]), + id: [0; 32].into(), + origin: hex!("aa16eddac8725928eaeda4aae518bf10d02bee80382517d21464a5cdf8d1d8e1").into(), + fee: 1000, + commands: BoundedVec::try_from(vec![expected_payload]).unwrap(), + }; + let result = converter.convert(); + assert_eq!(result, Ok(expected_message)); +} + +#[test] +fn xcm_converter_transfer_native_token_with_invalid_location_will_fail() { + let network = BridgedNetwork::get(); + + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let amount = 1000000; + // Invalid asset location from a different consensus + let asset_location = Location { + parents: 2, + interior: [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))].into(), + }; + + let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); + let filter: AssetFilter = assets.clone().into(); + + let fee_asset: Asset = Asset { + id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), + fun: Fungible(1000), + }; + + let message: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + PayFees { asset: fee_asset }, + ReserveAssetDeposited(assets.clone()), + AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut converter = XcmConverter::::new(&message, network); + let result = converter.convert(); + assert_eq!(result.err(), Some(XcmConverterError::InvalidAsset)); +} diff --git a/bridges/snowbridge/primitives/router/src/lib.rs b/bridges/snowbridge/primitives/router/src/lib.rs index d9031c69b22b..d745687c496b 100644 --- a/bridges/snowbridge/primitives/router/src/lib.rs +++ b/bridges/snowbridge/primitives/router/src/lib.rs @@ -3,4 +3,3 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod inbound; -pub mod outbound; diff --git a/bridges/snowbridge/primitives/router/src/outbound/v1/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/v1/mod.rs deleted file mode 100644 index f952d5c613f9..000000000000 --- a/bridges/snowbridge/primitives/router/src/outbound/v1/mod.rs +++ /dev/null @@ -1,1703 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -//! Converts XCM messages into simpler commands that can be processed by the Gateway contract - -use core::slice::Iter; - -use codec::{Decode, Encode}; - -use frame_support::{ensure, traits::Get}; -use snowbridge_core::{ - outbound::v1::{AgentExecuteCommand, Command, Message, SendMessage}, - AgentId, ChannelId, ParaId, TokenId, TokenIdOf, -}; -use sp_core::{H160, H256}; -use sp_runtime::traits::MaybeEquivalence; -use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; -use xcm::prelude::*; -use xcm_executor::traits::{ConvertLocation, ExportXcm}; - -pub struct EthereumBlobExporter< - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, ->( - PhantomData<( - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, - )>, -); - -impl - ExportXcm - for EthereumBlobExporter< - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, - > -where - UniversalLocation: Get, - EthereumNetwork: Get, - OutboundQueue: SendMessage, - AgentHashedDescription: ConvertLocation, - ConvertAssetId: MaybeEquivalence, -{ - type Ticket = (Vec, XcmHash); - - fn validate( - network: NetworkId, - _channel: u32, - universal_source: &mut Option, - destination: &mut Option, - message: &mut Option>, - ) -> SendResult { - let expected_network = EthereumNetwork::get(); - let universal_location = UniversalLocation::get(); - - if network != expected_network { - log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched bridge network {network:?}."); - return Err(SendError::NotApplicable) - } - - // Cloning destination to avoid modifying the value so subsequent exporters can use it. - let dest = destination.clone().take().ok_or(SendError::MissingArgument)?; - if dest != Here { - log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched remote destination {dest:?}."); - return Err(SendError::NotApplicable) - } - - // Cloning universal_source to avoid modifying the value so subsequent exporters can use it. - let (local_net, local_sub) = universal_source.clone() - .take() - .ok_or_else(|| { - log::error!(target: "xcm::ethereum_blob_exporter", "universal source not provided."); - SendError::MissingArgument - })? - .split_global() - .map_err(|()| { - log::error!(target: "xcm::ethereum_blob_exporter", "could not get global consensus from universal source '{universal_source:?}'."); - SendError::NotApplicable - })?; - - if Ok(local_net) != universal_location.global_consensus() { - log::trace!(target: "xcm::ethereum_blob_exporter", "skipped due to unmatched relay network {local_net:?}."); - return Err(SendError::NotApplicable) - } - - let para_id = match local_sub.as_slice() { - [Parachain(para_id)] => *para_id, - _ => { - log::error!(target: "xcm::ethereum_blob_exporter", "could not get parachain id from universal source '{local_sub:?}'."); - return Err(SendError::NotApplicable) - }, - }; - - let source_location = Location::new(1, local_sub.clone()); - - let agent_id = match AgentHashedDescription::convert_location(&source_location) { - Some(id) => id, - None => { - log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to not being able to create agent id. '{source_location:?}'"); - return Err(SendError::NotApplicable) - }, - }; - - let message = message.clone().ok_or_else(|| { - log::error!(target: "xcm::ethereum_blob_exporter", "xcm message not provided."); - SendError::MissingArgument - })?; - - let mut converter = - XcmConverter::::new(&message, expected_network, agent_id); - let (command, message_id) = converter.convert().map_err(|err|{ - log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to pattern matching error '{err:?}'."); - SendError::Unroutable - })?; - - let channel_id: ChannelId = ParaId::from(para_id).into(); - - let outbound_message = Message { id: Some(message_id.into()), channel_id, command }; - - // validate the message - let (ticket, fee) = OutboundQueue::validate(&outbound_message).map_err(|err| { - log::error!(target: "xcm::ethereum_blob_exporter", "OutboundQueue validation of message failed. {err:?}"); - SendError::Unroutable - })?; - - // convert fee to Asset - let fee = Asset::from((Location::parent(), fee.total())).into(); - - Ok(((ticket.encode(), message_id), fee)) - } - - fn deliver(blob: (Vec, XcmHash)) -> Result { - let ticket: OutboundQueue::Ticket = OutboundQueue::Ticket::decode(&mut blob.0.as_ref()) - .map_err(|_| { - log::trace!(target: "xcm::ethereum_blob_exporter", "undeliverable due to decoding error"); - SendError::NotApplicable - })?; - - let message_id = OutboundQueue::deliver(ticket).map_err(|_| { - log::error!(target: "xcm::ethereum_blob_exporter", "OutboundQueue submit of message failed"); - SendError::Transport("other transport error") - })?; - - log::info!(target: "xcm::ethereum_blob_exporter", "message delivered {message_id:#?}."); - Ok(message_id.into()) - } -} - -/// Errors that can be thrown to the pattern matching step. -#[derive(PartialEq, Debug)] -enum XcmConverterError { - UnexpectedEndOfXcm, - EndOfXcmMessageExpected, - WithdrawAssetExpected, - DepositAssetExpected, - NoReserveAssets, - FilterDoesNotConsumeAllAssets, - TooManyAssets, - ZeroAssetTransfer, - BeneficiaryResolutionFailed, - AssetResolutionFailed, - InvalidFeeAsset, - SetTopicExpected, - ReserveAssetDepositedExpected, - InvalidAsset, - UnexpectedInstruction, -} - -macro_rules! match_expression { - ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $value:expr $(,)?) => { - match $expression { - $( $pattern )|+ $( if $guard )? => Some($value), - _ => None, - } - }; -} - -struct XcmConverter<'a, ConvertAssetId, Call> { - iter: Peekable>>, - ethereum_network: NetworkId, - agent_id: AgentId, - _marker: PhantomData, -} -impl<'a, ConvertAssetId, Call> XcmConverter<'a, ConvertAssetId, Call> -where - ConvertAssetId: MaybeEquivalence, -{ - fn new(message: &'a Xcm, ethereum_network: NetworkId, agent_id: AgentId) -> Self { - Self { - iter: message.inner().iter().peekable(), - ethereum_network, - agent_id, - _marker: Default::default(), - } - } - - fn convert(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { - let result = match self.peek() { - Ok(ReserveAssetDeposited { .. }) => self.send_native_tokens_message(), - // Get withdraw/deposit and make native tokens create message. - Ok(WithdrawAsset { .. }) => self.send_tokens_message(), - Err(e) => Err(e), - _ => return Err(XcmConverterError::UnexpectedInstruction), - }?; - - // All xcm instructions must be consumed before exit. - if self.next().is_ok() { - return Err(XcmConverterError::EndOfXcmMessageExpected) - } - - Ok(result) - } - - fn send_tokens_message(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { - use XcmConverterError::*; - - // Get the reserve assets from WithdrawAsset. - let reserve_assets = - match_expression!(self.next()?, WithdrawAsset(reserve_assets), reserve_assets) - .ok_or(WithdrawAssetExpected)?; - - // Check if clear origin exists and skip over it. - if match_expression!(self.peek(), Ok(ClearOrigin), ()).is_some() { - let _ = self.next(); - } - - // Get the fee asset item from BuyExecution or continue parsing. - let fee_asset = match_expression!(self.peek(), Ok(BuyExecution { fees, .. }), fees); - if fee_asset.is_some() { - let _ = self.next(); - } - - let (deposit_assets, beneficiary) = match_expression!( - self.next()?, - DepositAsset { assets, beneficiary }, - (assets, beneficiary) - ) - .ok_or(DepositAssetExpected)?; - - // assert that the beneficiary is AccountKey20. - let recipient = match_expression!( - beneficiary.unpack(), - (0, [AccountKey20 { network, key }]) - if self.network_matches(network), - H160(*key) - ) - .ok_or(BeneficiaryResolutionFailed)?; - - // Make sure there are reserved assets. - if reserve_assets.len() == 0 { - return Err(NoReserveAssets) - } - - // Check the the deposit asset filter matches what was reserved. - if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { - return Err(FilterDoesNotConsumeAllAssets) - } - - // We only support a single asset at a time. - ensure!(reserve_assets.len() == 1, TooManyAssets); - let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; - - // If there was a fee specified verify it. - if let Some(fee_asset) = fee_asset { - // The fee asset must be the same as the reserve asset. - if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { - return Err(InvalidFeeAsset) - } - } - - let (token, amount) = match reserve_asset { - Asset { id: AssetId(inner_location), fun: Fungible(amount) } => - match inner_location.unpack() { - (0, [AccountKey20 { network, key }]) if self.network_matches(network) => - Some((H160(*key), *amount)), - _ => None, - }, - _ => None, - } - .ok_or(AssetResolutionFailed)?; - - // transfer amount must be greater than 0. - ensure!(amount > 0, ZeroAssetTransfer); - - // Check if there is a SetTopic. - let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; - - Ok(( - Command::AgentExecute { - agent_id: self.agent_id, - command: AgentExecuteCommand::TransferToken { token, recipient, amount }, - }, - *topic_id, - )) - } - - fn next(&mut self) -> Result<&'a Instruction, XcmConverterError> { - self.iter.next().ok_or(XcmConverterError::UnexpectedEndOfXcm) - } - - fn peek(&mut self) -> Result<&&'a Instruction, XcmConverterError> { - self.iter.peek().ok_or(XcmConverterError::UnexpectedEndOfXcm) - } - - fn network_matches(&self, network: &Option) -> bool { - if let Some(network) = network { - *network == self.ethereum_network - } else { - true - } - } - - /// Convert the xcm for Polkadot-native token from AH into the Command - /// To match transfers of Polkadot-native tokens, we expect an input of the form: - /// # ReserveAssetDeposited - /// # ClearOrigin - /// # BuyExecution - /// # DepositAsset - /// # SetTopic - fn send_native_tokens_message(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { - use XcmConverterError::*; - - // Get the reserve assets. - let reserve_assets = - match_expression!(self.next()?, ReserveAssetDeposited(reserve_assets), reserve_assets) - .ok_or(ReserveAssetDepositedExpected)?; - - // Check if clear origin exists and skip over it. - if match_expression!(self.peek(), Ok(ClearOrigin), ()).is_some() { - let _ = self.next(); - } - - // Get the fee asset item from BuyExecution or continue parsing. - let fee_asset = match_expression!(self.peek(), Ok(BuyExecution { fees, .. }), fees); - if fee_asset.is_some() { - let _ = self.next(); - } - - let (deposit_assets, beneficiary) = match_expression!( - self.next()?, - DepositAsset { assets, beneficiary }, - (assets, beneficiary) - ) - .ok_or(DepositAssetExpected)?; - - // assert that the beneficiary is AccountKey20. - let recipient = match_expression!( - beneficiary.unpack(), - (0, [AccountKey20 { network, key }]) - if self.network_matches(network), - H160(*key) - ) - .ok_or(BeneficiaryResolutionFailed)?; - - // Make sure there are reserved assets. - if reserve_assets.len() == 0 { - return Err(NoReserveAssets) - } - - // Check the the deposit asset filter matches what was reserved. - if reserve_assets.inner().iter().any(|asset| !deposit_assets.matches(asset)) { - return Err(FilterDoesNotConsumeAllAssets) - } - - // We only support a single asset at a time. - ensure!(reserve_assets.len() == 1, TooManyAssets); - let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; - - // If there was a fee specified verify it. - if let Some(fee_asset) = fee_asset { - // The fee asset must be the same as the reserve asset. - if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { - return Err(InvalidFeeAsset) - } - } - - let (asset_id, amount) = match reserve_asset { - Asset { id: AssetId(inner_location), fun: Fungible(amount) } => - Some((inner_location.clone(), *amount)), - _ => None, - } - .ok_or(AssetResolutionFailed)?; - - // transfer amount must be greater than 0. - ensure!(amount > 0, ZeroAssetTransfer); - - let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; - - let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; - - ensure!(asset_id == expected_asset_id, InvalidAsset); - - // Check if there is a SetTopic. - let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; - - Ok((Command::MintForeignToken { token_id, recipient, amount }, *topic_id)) - } -} - -#[cfg(test)] -mod tests { - use frame_support::parameter_types; - use hex_literal::hex; - use snowbridge_core::{ - outbound::{v1::Fee, SendError, SendMessageFeeProvider}, - AgentIdOf, - }; - use sp_std::default::Default; - use xcm::{ - latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, - prelude::SendError as XcmSendError, - }; - - use super::*; - - parameter_types! { - const MaxMessageSize: u32 = u32::MAX; - const RelayNetwork: NetworkId = Polkadot; - UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(1013)].into(); - const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; - const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; - } - - struct MockOkOutboundQueue; - impl SendMessage for MockOkOutboundQueue { - type Ticket = (); - - fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { - Ok(((), Fee { local: 1, remote: 1 })) - } - - fn deliver(_: Self::Ticket) -> Result { - Ok(H256::zero()) - } - } - - impl SendMessageFeeProvider for MockOkOutboundQueue { - type Balance = u128; - - fn local_fee() -> Self::Balance { - 1 - } - } - struct MockErrOutboundQueue; - impl SendMessage for MockErrOutboundQueue { - type Ticket = (); - - fn validate(_: &Message) -> Result<(Self::Ticket, Fee), SendError> { - Err(SendError::MessageTooLarge) - } - - fn deliver(_: Self::Ticket) -> Result { - Err(SendError::MessageTooLarge) - } - } - - impl SendMessageFeeProvider for MockErrOutboundQueue { - type Balance = u128; - - fn local_fee() -> Self::Balance { - 1 - } - } - - pub struct MockTokenIdConvert; - impl MaybeEquivalence for MockTokenIdConvert { - fn convert(_id: &TokenId) -> Option { - Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) - } - fn convert_back(_loc: &Location) -> Option { - None - } - } - - #[test] - fn exporter_validate_with_unknown_network_yields_not_applicable() { - let network = Ethereum { chain_id: 1337 }; - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = None; - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_with_invalid_destination_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = None; - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_with_x8_destination_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = Some( - [ - OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, - OnlyChild, - ] - .into(), - ); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_without_universal_source_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_without_global_universal_location_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Here.into(); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_without_global_bridge_location_yields_not_applicable() { - let network = NonBridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Here.into(); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_with_remote_universal_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Kusama), Parachain(1000)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_without_para_id_in_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Some(GlobalConsensus(Polkadot).into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_complex_para_id_in_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(12)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_without_xcm_message_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_with_max_target_fee_yields_unroutable() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; - let fees: Assets = vec![fee.clone()].into(); - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let mut message: Option> = Some( - vec![ - WithdrawAsset(fees), - BuyExecution { fees: fee, weight_limit: Unlimited }, - WithdrawAsset(assets), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: Some(network), key: beneficiary_address } - .into(), - }, - SetTopic([0; 32]), - ] - .into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert_eq!(result, Err(XcmSendError::Unroutable)); - } - - #[test] - fn exporter_validate_with_unparsable_xcm_yields_unroutable() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let channel: u32 = 0; - let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; - let fees: Assets = vec![fee.clone()].into(); - - let mut message: Option> = Some( - vec![WithdrawAsset(fees), BuyExecution { fees: fee, weight_limit: Unlimited }].into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert_eq!(result, Err(XcmSendError::Unroutable)); - } - - #[test] - fn exporter_validate_xcm_success_case_1() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - - let mut message: Option> = Some( - vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert!(result.is_ok()); - } - - #[test] - fn exporter_deliver_with_submit_failure_yields_unroutable() { - let result = EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockErrOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); - assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) - } - - #[test] - fn xcm_converter_convert_success() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = Command::AgentExecute { - agent_id: Default::default(), - command: AgentExecuteCommand::TransferToken { - token: token_address.into(), - recipient: beneficiary_address.into(), - amount: 1000, - }, - }; - let result = converter.convert(); - assert_eq!(result, Ok((expected_payload, [0; 32]))); - } - - #[test] - fn xcm_converter_convert_without_buy_execution_yields_success() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = Command::AgentExecute { - agent_id: Default::default(), - command: AgentExecuteCommand::TransferToken { - token: token_address.into(), - recipient: beneficiary_address.into(), - amount: 1000, - }, - }; - let result = converter.convert(); - assert_eq!(result, Ok((expected_payload, [0; 32]))); - } - - #[test] - fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(All); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = Command::AgentExecute { - agent_id: Default::default(), - command: AgentExecuteCommand::TransferToken { - token: token_address.into(), - recipient: beneficiary_address.into(), - amount: 1000, - }, - }; - let result = converter.convert(); - assert_eq!(result, Ok((expected_payload, [0; 32]))); - } - - #[test] - fn xcm_converter_convert_with_fees_less_than_reserve_yields_success() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); - let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(500) }; - - let assets: Assets = - vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); - - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee_asset, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = Command::AgentExecute { - agent_id: Default::default(), - command: AgentExecuteCommand::TransferToken { - token: token_address.into(), - recipient: beneficiary_address.into(), - amount: 1000, - }, - }; - let result = converter.convert(); - assert_eq!(result, Ok((expected_payload, [0; 32]))); - } - - #[test] - fn xcm_converter_convert_without_set_topic_yields_set_topic_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - ClearTopic, - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::SetTopicExpected)); - } - - #[test] - fn xcm_converter_convert_with_partial_message_yields_unexpected_end_of_xcm() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let message: Xcm<()> = vec![WithdrawAsset(assets)].into(); - - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); - } - - #[test] - fn xcm_converter_with_different_fee_asset_fails() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let asset_location = [AccountKey20 { network: None, key: token_address }].into(); - let fee_asset = - Asset { id: AssetId(Location { parents: 0, interior: Here }), fun: Fungible(1000) }; - - let assets: Assets = - vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); - - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee_asset, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); - } - - #[test] - fn xcm_converter_with_fees_greater_than_reserve_fails() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); - let fee_asset = Asset { id: AssetId(asset_location.clone()), fun: Fungible(1001) }; - - let assets: Assets = - vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); - - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee_asset, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::InvalidFeeAsset)); - } - - #[test] - fn xcm_converter_convert_with_empty_xcm_yields_unexpected_end_of_xcm() { - let network = BridgedNetwork::get(); - - let message: Xcm<()> = vec![].into(); - - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); - } - - #[test] - fn xcm_converter_convert_with_extra_instructions_yields_end_of_xcm_message_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ClearError, - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::EndOfXcmMessageExpected)); - } - - #[test] - fn xcm_converter_convert_without_withdraw_asset_yields_withdraw_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::UnexpectedInstruction)); - } - - #[test] - fn xcm_converter_convert_without_withdraw_asset_yields_deposit_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::DepositAssetExpected)); - } - - #[test] - fn xcm_converter_convert_without_assets_yields_no_reserve_assets() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![].into(); - let filter: AssetFilter = assets.clone().into(); - - let fee = Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }; - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::NoReserveAssets)); - } - - #[test] - fn xcm_converter_convert_with_two_assets_yields_too_many_assets() { - let network = BridgedNetwork::get(); - - let token_address_1: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let token_address_2: [u8; 20] = hex!("1100000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![ - Asset { - id: AssetId(AccountKey20 { network: None, key: token_address_1 }.into()), - fun: Fungible(1000), - }, - Asset { - id: AssetId(AccountKey20 { network: None, key: token_address_2 }.into()), - fun: Fungible(500), - }, - ] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::TooManyAssets)); - } - - #[test] - fn xcm_converter_convert_without_consuming_filter_yields_filter_does_not_consume_all_assets() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(0)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::FilterDoesNotConsumeAllAssets)); - } - - #[test] - fn xcm_converter_convert_with_zero_amount_asset_yields_zero_asset_transfer() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(0), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::ZeroAssetTransfer)); - } - - #[test] - fn xcm_converter_convert_non_ethereum_asset_yields_asset_resolution_failed() { - let network = BridgedNetwork::get(); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([GlobalConsensus(Polkadot), Parachain(1000), GeneralIndex(0)].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); - } - - #[test] - fn xcm_converter_convert_non_ethereum_chain_asset_yields_asset_resolution_failed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId( - AccountKey20 { network: Some(Ethereum { chain_id: 2 }), key: token_address }.into(), - ), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); - } - - #[test] - fn xcm_converter_convert_non_ethereum_chain_yields_asset_resolution_failed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId( - [AccountKey20 { network: Some(NonBridgedNetwork::get()), key: token_address }] - .into(), - ), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); - } - - #[test] - fn xcm_converter_convert_with_non_ethereum_beneficiary_yields_beneficiary_resolution_failed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - - let beneficiary_address: [u8; 32] = - hex!("2000000000000000000000000000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: [ - GlobalConsensus(Polkadot), - Parachain(1000), - AccountId32 { network: Some(Polkadot), id: beneficiary_address }, - ] - .into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); - } - - #[test] - fn xcm_converter_convert_with_non_ethereum_chain_beneficiary_yields_beneficiary_resolution_failed( - ) { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { - network: Some(Ethereum { chain_id: 2 }), - key: beneficiary_address, - } - .into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); - } - - #[test] - fn test_describe_asset_hub() { - let legacy_location: Location = Location::new(0, [Parachain(1000)]); - let legacy_agent_id = AgentIdOf::convert_location(&legacy_location).unwrap(); - assert_eq!( - legacy_agent_id, - hex!("72456f48efed08af20e5b317abf8648ac66e86bb90a411d9b0b713f7364b75b4").into() - ); - let location: Location = Location::new(1, [Parachain(1000)]); - let agent_id = AgentIdOf::convert_location(&location).unwrap(); - assert_eq!( - agent_id, - hex!("81c5ab2571199e3188135178f3c2c8e2d268be1313d029b30f534fa579b69b79").into() - ) - } - - #[test] - fn test_describe_here() { - let location: Location = Location::new(0, []); - let agent_id = AgentIdOf::convert_location(&location).unwrap(); - assert_eq!( - agent_id, - hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into() - ) - } - - #[test] - fn xcm_converter_transfer_native_token_success() { - let network = BridgedNetwork::get(); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let amount = 1000000; - let asset_location = Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]); - let token_id = TokenIdOf::convert_location(&asset_location).unwrap(); - - let assets: Assets = - vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - ReserveAssetDeposited(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let expected_payload = - Command::MintForeignToken { recipient: beneficiary_address.into(), amount, token_id }; - let result = converter.convert(); - assert_eq!(result, Ok((expected_payload, [0; 32]))); - } - - #[test] - fn xcm_converter_transfer_native_token_with_invalid_location_will_fail() { - let network = BridgedNetwork::get(); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let amount = 1000000; - // Invalid asset location from a different consensus - let asset_location = Location { - parents: 2, - interior: [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))].into(), - }; - - let assets: Assets = - vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - ReserveAssetDeposited(assets.clone()), - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network, Default::default()); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::InvalidAsset)); - } - - #[test] - fn exporter_validate_with_invalid_dest_does_not_alter_destination() { - let network = BridgedNetwork::get(); - let destination: InteriorLocation = Parachain(1000).into(); - - let universal_source: InteriorLocation = - [GlobalConsensus(Polkadot), Parachain(1000)].into(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - let msg: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut msg_wrapper: Option> = Some(msg.clone()); - let mut dest_wrapper = Some(destination.clone()); - let mut universal_source_wrapper = Some(universal_source.clone()); - - let result = EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate( - network, - channel, - &mut universal_source_wrapper, - &mut dest_wrapper, - &mut msg_wrapper, - ); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - - // ensure mutable variables are not changed - assert_eq!(Some(destination), dest_wrapper); - assert_eq!(Some(msg), msg_wrapper); - assert_eq!(Some(universal_source), universal_source_wrapper); - } - - #[test] - fn exporter_validate_with_invalid_universal_source_does_not_alter_universal_source() { - let network = BridgedNetwork::get(); - let destination: InteriorLocation = Here.into(); - - let universal_source: InteriorLocation = - [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)].into(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - let msg: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut msg_wrapper: Option> = Some(msg.clone()); - let mut dest_wrapper = Some(destination.clone()); - let mut universal_source_wrapper = Some(universal_source.clone()); - - let result = EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - >::validate( - network, - channel, - &mut universal_source_wrapper, - &mut dest_wrapper, - &mut msg_wrapper, - ); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - - // ensure mutable variables are not changed - assert_eq!(Some(destination), dest_wrapper); - assert_eq!(Some(msg), msg_wrapper); - assert_eq!(Some(universal_source), universal_source_wrapper); - } -} diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs deleted file mode 100644 index 77616bde2796..000000000000 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/convert.rs +++ /dev/null @@ -1,1068 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -//! Converts XCM messages into InboundMessage that can be processed by the Gateway contract - -use codec::DecodeAll; -use core::slice::Iter; -use frame_support::{ensure, traits::Get, BoundedVec}; -use snowbridge_core::{ - outbound::{ - v2::{Command, Message}, - TransactInfo, - }, - TokenId, TokenIdOf, TokenIdOf as LocationIdOf, -}; -use sp_core::H160; -use sp_runtime::traits::MaybeEquivalence; -use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; -use xcm::prelude::*; -use xcm_executor::traits::ConvertLocation; - -/// Errors that can be thrown to the pattern matching step. -#[derive(PartialEq, Debug)] -pub enum XcmConverterError { - UnexpectedEndOfXcm, - EndOfXcmMessageExpected, - WithdrawAssetExpected, - DepositAssetExpected, - NoReserveAssets, - FilterDoesNotConsumeAllAssets, - TooManyAssets, - ZeroAssetTransfer, - BeneficiaryResolutionFailed, - AssetResolutionFailed, - InvalidFeeAsset, - SetTopicExpected, - ReserveAssetDepositedExpected, - InvalidAsset, - UnexpectedInstruction, - TooManyCommands, - AliasOriginExpected, - InvalidOrigin, - TransactDecodeFailed, - TransactParamsDecodeFailed, - FeeAssetResolutionFailed, - CallContractValueInsufficient, -} - -macro_rules! match_expression { - ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $value:expr $(,)?) => { - match $expression { - $( $pattern )|+ $( if $guard )? => Some($value), - _ => None, - } - }; -} - -pub struct XcmConverter<'a, ConvertAssetId, WETHAddress, Call> { - iter: Peekable>>, - ethereum_network: NetworkId, - _marker: PhantomData<(ConvertAssetId, WETHAddress)>, -} -impl<'a, ConvertAssetId, WETHAddress, Call> XcmConverter<'a, ConvertAssetId, WETHAddress, Call> -where - ConvertAssetId: MaybeEquivalence, - WETHAddress: Get, -{ - pub fn new(message: &'a Xcm, ethereum_network: NetworkId) -> Self { - Self { - iter: message.inner().iter().peekable(), - ethereum_network, - _marker: Default::default(), - } - } - - pub fn convert(&mut self) -> Result { - let result = self.to_ethereum_message()?; - Ok(result) - } - - fn next(&mut self) -> Result<&'a Instruction, XcmConverterError> { - self.iter.next().ok_or(XcmConverterError::UnexpectedEndOfXcm) - } - - fn peek(&mut self) -> Result<&&'a Instruction, XcmConverterError> { - self.iter.peek().ok_or(XcmConverterError::UnexpectedEndOfXcm) - } - - fn network_matches(&self, network: &Option) -> bool { - if let Some(network) = network { - *network == self.ethereum_network - } else { - true - } - } - - /// Extract the fee asset item from PayFees(V5) - fn extract_remote_fee(&mut self) -> Result { - use XcmConverterError::*; - let _ = match_expression!(self.next()?, WithdrawAsset(fee), fee) - .ok_or(WithdrawAssetExpected)?; - let fee_asset = - match_expression!(self.next()?, PayFees { asset: fee }, fee).ok_or(InvalidFeeAsset)?; - let (fee_asset_id, fee_amount) = match fee_asset { - Asset { id: asset_id, fun: Fungible(amount) } => Some((asset_id, *amount)), - _ => None, - } - .ok_or(AssetResolutionFailed)?; - let weth_address = match_expression!( - fee_asset_id.0.unpack(), - (0, [AccountKey20 { network, key }]) - if self.network_matches(network), - H160(*key) - ) - .ok_or(FeeAssetResolutionFailed)?; - ensure!(weth_address == WETHAddress::get(), InvalidFeeAsset); - Ok(fee_amount) - } - - /// Convert the xcm for into the Message which will be executed - /// on Ethereum Gateway contract, we expect an input of the form: - /// # WithdrawAsset(WETH) - /// # PayFees(WETH) - /// # ReserveAssetDeposited(PNA) | WithdrawAsset(ENA) - /// # AliasOrigin(Origin) - /// # DepositAsset(PNA|ENA) - /// # Transact() ---Optional - /// # SetTopic - fn to_ethereum_message(&mut self) -> Result { - use XcmConverterError::*; - - // Get fee amount - let fee_amount = self.extract_remote_fee()?; - - // Get ENA reserve asset from WithdrawAsset. - let enas = - match_expression!(self.peek(), Ok(WithdrawAsset(reserve_assets)), reserve_assets); - if enas.is_some() { - let _ = self.next(); - } - - // Get PNA reserve asset from ReserveAssetDeposited - let pnas = match_expression!( - self.peek(), - Ok(ReserveAssetDeposited(reserve_assets)), - reserve_assets - ); - if pnas.is_some() { - let _ = self.next(); - } - // Check AliasOrigin. - let origin_location = match_expression!(self.next()?, AliasOrigin(origin), origin) - .ok_or(AliasOriginExpected)?; - let origin = LocationIdOf::convert_location(origin_location).ok_or(InvalidOrigin)?; - - let (deposit_assets, beneficiary) = match_expression!( - self.next()?, - DepositAsset { assets, beneficiary }, - (assets, beneficiary) - ) - .ok_or(DepositAssetExpected)?; - - // assert that the beneficiary is AccountKey20. - let recipient = match_expression!( - beneficiary.unpack(), - (0, [AccountKey20 { network, key }]) - if self.network_matches(network), - H160(*key) - ) - .ok_or(BeneficiaryResolutionFailed)?; - - // Make sure there are reserved assets. - if enas.is_none() && pnas.is_none() { - return Err(NoReserveAssets) - } - - let mut commands: Vec = Vec::new(); - let mut weth_amount = 0; - - // ENA transfer commands - if let Some(enas) = enas { - for ena in enas.clone().inner().iter() { - // Check the the deposit asset filter matches what was reserved. - if !deposit_assets.matches(ena) { - return Err(FilterDoesNotConsumeAllAssets) - } - - // only fungible asset is allowed - let (token, amount) = match ena { - Asset { id: AssetId(inner_location), fun: Fungible(amount) } => - match inner_location.unpack() { - (0, [AccountKey20 { network, key }]) - if self.network_matches(network) => - Some((H160(*key), *amount)), - _ => None, - }, - _ => None, - } - .ok_or(AssetResolutionFailed)?; - - // transfer amount must be greater than 0. - ensure!(amount > 0, ZeroAssetTransfer); - - if token == WETHAddress::get() { - weth_amount = amount; - } - - commands.push(Command::UnlockNativeToken { token, recipient, amount }); - } - } - - // PNA transfer commands - if let Some(pnas) = pnas { - ensure!(pnas.len() > 0, NoReserveAssets); - for pna in pnas.clone().inner().iter() { - // Check the the deposit asset filter matches what was reserved. - if !deposit_assets.matches(pna) { - return Err(FilterDoesNotConsumeAllAssets) - } - - // Only fungible is allowed - let (asset_id, amount) = match pna { - Asset { id: AssetId(inner_location), fun: Fungible(amount) } => - Some((inner_location.clone(), *amount)), - _ => None, - } - .ok_or(AssetResolutionFailed)?; - - // transfer amount must be greater than 0. - ensure!(amount > 0, ZeroAssetTransfer); - - // Ensure PNA already registered - let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; - let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; - ensure!(asset_id == expected_asset_id, InvalidAsset); - - commands.push(Command::MintForeignToken { token_id, recipient, amount }); - } - } - - // Transact commands - let transact_call = match_expression!(self.peek(), Ok(Transact { call, .. }), call); - if let Some(transact_call) = transact_call { - let _ = self.next(); - let transact = - TransactInfo::decode_all(&mut transact_call.clone().into_encoded().as_slice()) - .map_err(|_| TransactDecodeFailed)?; - if transact.value > 0 { - ensure!(weth_amount > transact.value, CallContractValueInsufficient); - } - commands.push(Command::CallContract { - target: transact.target, - data: transact.data, - gas_limit: transact.gas_limit, - value: transact.value, - }); - } - - // ensure SetTopic exists - let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; - - let message = Message { - id: (*topic_id).into(), - origin_location: origin_location.clone(), - origin, - fee: fee_amount, - commands: BoundedVec::try_from(commands).map_err(|_| TooManyCommands)?, - }; - - // All xcm instructions must be consumed before exit. - if self.next().is_ok() { - return Err(EndOfXcmMessageExpected) - } - - Ok(message) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::outbound::v2::tests::{ - BridgedNetwork, MockTokenIdConvert, NonBridgedNetwork, WETHAddress, - }; - use hex_literal::hex; - use snowbridge_core::AgentIdOf; - use xcm::latest::WESTEND_GENESIS_HASH; - - #[test] - fn xcm_converter_convert_success() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - let result = converter.convert(); - assert!(result.is_ok()); - } - - #[test] - fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(All); - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - let result = converter.convert(); - assert_eq!(result.is_ok(), true); - } - - #[test] - fn xcm_converter_convert_without_set_topic_yields_set_topic_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - ClearTopic, - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::SetTopicExpected)); - } - - #[test] - fn xcm_converter_convert_with_partial_message_yields_invalid_fee_asset() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let message: Xcm<()> = vec![WithdrawAsset(assets)].into(); - - let mut converter = - XcmConverter::::new(&message, network); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); - } - - #[test] - fn xcm_converter_with_different_fee_asset_succeed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let asset_location = [AccountKey20 { network: None, key: token_address }].into(); - let assets: Assets = - vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); - - let filter: AssetFilter = assets.clone().into(); - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - let result = converter.convert(); - assert_eq!(result.is_ok(), true); - } - - #[test] - fn xcm_converter_with_fees_greater_than_reserve_succeed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let asset_location: Location = [AccountKey20 { network: None, key: token_address }].into(); - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let assets: Assets = - vec![Asset { id: AssetId(asset_location), fun: Fungible(1000) }].into(); - - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - let result = converter.convert(); - assert_eq!(result.is_ok(), true); - } - - #[test] - fn xcm_converter_convert_with_empty_xcm_yields_unexpected_end_of_xcm() { - let network = BridgedNetwork::get(); - - let message: Xcm<()> = vec![].into(); - - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::UnexpectedEndOfXcm)); - } - - #[test] - fn xcm_converter_convert_with_extra_instructions_yields_end_of_xcm_message_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ClearError, - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::EndOfXcmMessageExpected)); - } - - #[test] - fn xcm_converter_convert_without_withdraw_asset_yields_withdraw_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let message: Xcm<()> = vec![ - ClearOrigin, - BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::WithdrawAssetExpected)); - } - - #[test] - fn xcm_converter_convert_without_withdraw_asset_yields_deposit_expected() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::DepositAssetExpected)); - } - - #[test] - fn xcm_converter_convert_without_assets_yields_no_reserve_assets() { - let network = BridgedNetwork::get(); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![].into(); - let filter: AssetFilter = assets.clone().into(); - - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::NoReserveAssets)); - } - - #[test] - fn xcm_converter_convert_with_two_assets_yields() { - let network = BridgedNetwork::get(); - - let token_address_1: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let token_address_2: [u8; 20] = hex!("1100000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![ - Asset { - id: AssetId(AccountKey20 { network: None, key: token_address_1 }.into()), - fun: Fungible(1000), - }, - Asset { - id: AssetId(AccountKey20 { network: None, key: token_address_2 }.into()), - fun: Fungible(500), - }, - ] - .into(); - let filter: AssetFilter = assets.clone().into(); - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.is_ok(), true); - } - - #[test] - fn xcm_converter_convert_without_consuming_filter_yields_filter_does_not_consume_all_assets() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(0)); - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::FilterDoesNotConsumeAllAssets)); - } - - #[test] - fn xcm_converter_convert_with_zero_amount_asset_yields_zero_asset_transfer() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(0), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::ZeroAssetTransfer)); - } - - #[test] - fn xcm_converter_convert_non_ethereum_asset_yields_asset_resolution_failed() { - let network = BridgedNetwork::get(); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId([GlobalConsensus(Polkadot), Parachain(1000), GeneralIndex(0)].into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - let fee_asset: Asset = Asset { - id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), - fun: Fungible(1000), - }; - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone().into()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); - } - - #[test] - fn xcm_converter_convert_non_ethereum_chain_asset_yields_asset_resolution_failed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId( - AccountKey20 { network: Some(Ethereum { chain_id: 2 }), key: token_address }.into(), - ), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - let fee_asset: Asset = Asset { - id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), - fun: Fungible(1000), - }; - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone().into()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); - } - - #[test] - fn xcm_converter_convert_non_ethereum_chain_yields_asset_resolution_failed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId( - [AccountKey20 { network: Some(NonBridgedNetwork::get()), key: token_address }] - .into(), - ), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - let fee_asset: Asset = Asset { - id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), - fun: Fungible(1000), - }; - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone().into()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::AssetResolutionFailed)); - } - - #[test] - fn xcm_converter_convert_with_non_ethereum_beneficiary_yields_beneficiary_resolution_failed() { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - - let beneficiary_address: [u8; 32] = - hex!("2000000000000000000000000000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - let fee_asset: Asset = Asset { - id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), - fun: Fungible(1000), - }; - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone().into()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountId32 { network: Some(Polkadot), id: beneficiary_address } - .into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); - } - - #[test] - fn xcm_converter_convert_with_non_ethereum_chain_beneficiary_yields_beneficiary_resolution_failed( - ) { - let network = BridgedNetwork::get(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = Wild(WildAsset::AllCounted(1)); - let fee_asset: Asset = Asset { - id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), - fun: Fungible(1000), - }; - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { - network: Some(Ethereum { chain_id: 2 }), - key: beneficiary_address, - } - .into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::BeneficiaryResolutionFailed)); - } - - #[test] - fn test_describe_asset_hub() { - let legacy_location: Location = Location::new(0, [Parachain(1000)]); - let legacy_agent_id = AgentIdOf::convert_location(&legacy_location).unwrap(); - assert_eq!( - legacy_agent_id, - hex!("72456f48efed08af20e5b317abf8648ac66e86bb90a411d9b0b713f7364b75b4").into() - ); - let location: Location = Location::new(1, [Parachain(1000)]); - let agent_id = AgentIdOf::convert_location(&location).unwrap(); - assert_eq!( - agent_id, - hex!("81c5ab2571199e3188135178f3c2c8e2d268be1313d029b30f534fa579b69b79").into() - ) - } - - #[test] - fn test_describe_here() { - let location: Location = Location::new(0, []); - let agent_id = AgentIdOf::convert_location(&location).unwrap(); - assert_eq!( - agent_id, - hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into() - ) - } - - #[test] - fn xcm_converter_transfer_native_token_success() { - let network = BridgedNetwork::get(); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let amount = 1000000; - let asset_location = Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]); - let token_id = TokenIdOf::convert_location(&asset_location).unwrap(); - - let assets: Assets = - vec![Asset { id: AssetId(asset_location.clone()), fun: Fungible(amount) }].into(); - let filter: AssetFilter = assets.clone().into(); - let fee_asset: Asset = Asset { - id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), - fun: Fungible(1000), - }; - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - ReserveAssetDeposited(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - let expected_payload = - Command::MintForeignToken { recipient: beneficiary_address.into(), amount, token_id }; - let expected_message = Message { - origin_location: Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)]), - id: [0; 32].into(), - origin: hex!("aa16eddac8725928eaeda4aae518bf10d02bee80382517d21464a5cdf8d1d8e1").into(), - fee: 1000, - commands: BoundedVec::try_from(vec![expected_payload]).unwrap(), - }; - let result = converter.convert(); - assert_eq!(result, Ok(expected_message)); - } - - #[test] - fn xcm_converter_transfer_native_token_with_invalid_location_will_fail() { - let network = BridgedNetwork::get(); - - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let amount = 1000000; - // Invalid asset location from a different consensus - let asset_location = Location { - parents: 2, - interior: [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))].into(), - }; - - let assets: Assets = - vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); - let filter: AssetFilter = assets.clone().into(); - - let fee_asset: Asset = Asset { - id: AssetId(AccountKey20 { network: None, key: WETHAddress::get().0 }.into()), - fun: Fungible(1000), - }; - - let message: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - ReserveAssetDeposited(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut converter = - XcmConverter::::new(&message, network); - let result = converter.convert(); - assert_eq!(result.err(), Some(XcmConverterError::InvalidAsset)); - } -} diff --git a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs deleted file mode 100644 index 0fbfc2784efa..000000000000 --- a/bridges/snowbridge/primitives/router/src/outbound/v2/mod.rs +++ /dev/null @@ -1,738 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -//! Converts XCM messages into simpler commands that can be processed by the Gateway contract - -pub mod convert; -use convert::XcmConverter; - -use codec::{Decode, Encode}; -use frame_support::{ - ensure, - traits::{Contains, Get, ProcessMessageError}, -}; -use snowbridge_core::{outbound::v2::SendMessage, TokenId}; -use sp_core::{H160, H256}; -use sp_runtime::traits::MaybeEquivalence; -use sp_std::{marker::PhantomData, ops::ControlFlow, prelude::*}; -use xcm::prelude::*; -use xcm_builder::{CreateMatcher, ExporterFor, MatchXcm}; -use xcm_executor::traits::{ConvertLocation, ExportXcm}; - -pub const TARGET: &'static str = "xcm::ethereum_blob_exporter::v2"; - -pub struct EthereumBlobExporter< - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, - WETHAddress, ->( - PhantomData<( - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, - WETHAddress, - )>, -); - -impl< - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, - WETHAddress, - > ExportXcm - for EthereumBlobExporter< - UniversalLocation, - EthereumNetwork, - OutboundQueue, - AgentHashedDescription, - ConvertAssetId, - WETHAddress, - > -where - UniversalLocation: Get, - EthereumNetwork: Get, - OutboundQueue: SendMessage, - AgentHashedDescription: ConvertLocation, - ConvertAssetId: MaybeEquivalence, - WETHAddress: Get, -{ - type Ticket = (Vec, XcmHash); - - fn validate( - network: NetworkId, - _channel: u32, - universal_source: &mut Option, - destination: &mut Option, - message: &mut Option>, - ) -> SendResult { - log::debug!(target: TARGET, "message route through bridge {message:?}."); - - let expected_network = EthereumNetwork::get(); - let universal_location = UniversalLocation::get(); - - if network != expected_network { - log::trace!(target: TARGET, "skipped due to unmatched bridge network {network:?}."); - return Err(SendError::NotApplicable) - } - - // Cloning destination to avoid modifying the value so subsequent exporters can use it. - let dest = destination.clone().ok_or(SendError::MissingArgument)?; - if dest != Here { - log::trace!(target: TARGET, "skipped due to unmatched remote destination {dest:?}."); - return Err(SendError::NotApplicable) - } - - // Cloning universal_source to avoid modifying the value so subsequent exporters can use it. - let (local_net, _) = universal_source.clone() - .ok_or_else(|| { - log::error!(target: TARGET, "universal source not provided."); - SendError::MissingArgument - })? - .split_global() - .map_err(|()| { - log::error!(target: TARGET, "could not get global consensus from universal source '{universal_source:?}'."); - SendError::NotApplicable - })?; - - if Ok(local_net) != universal_location.global_consensus() { - log::trace!(target: TARGET, "skipped due to unmatched relay network {local_net:?}."); - return Err(SendError::NotApplicable) - } - - let message = message.clone().ok_or_else(|| { - log::error!(target: TARGET, "xcm message not provided."); - SendError::MissingArgument - })?; - - // Inspect AliasOrigin as V2 message - let mut instructions = message.clone().0; - let result = instructions.matcher().match_next_inst_while( - |_| true, - |inst| { - return match inst { - AliasOrigin(..) => Err(ProcessMessageError::Yield), - _ => Ok(ControlFlow::Continue(())), - } - }, - ); - ensure!(result.is_err(), SendError::NotApplicable); - - let mut converter = - XcmConverter::::new(&message, expected_network); - let message = converter.convert().map_err(|err| { - log::error!(target: TARGET, "unroutable due to pattern matching error '{err:?}'."); - SendError::Unroutable - })?; - - // validate the message - let (ticket, _) = OutboundQueue::validate(&message).map_err(|err| { - log::error!(target: TARGET, "OutboundQueue validation of message failed. {err:?}"); - SendError::Unroutable - })?; - - Ok(((ticket.encode(), XcmHash::from(message.id)), Assets::default())) - } - - fn deliver(blob: (Vec, XcmHash)) -> Result { - let ticket: OutboundQueue::Ticket = OutboundQueue::Ticket::decode(&mut blob.0.as_ref()) - .map_err(|_| { - log::trace!(target: TARGET, "undeliverable due to decoding error"); - SendError::NotApplicable - })?; - - let message_id = OutboundQueue::deliver(ticket).map_err(|_| { - log::error!(target: TARGET, "OutboundQueue submit of message failed"); - SendError::Transport("other transport error") - })?; - - log::info!(target: TARGET, "message delivered {message_id:#?}."); - Ok(message_id.into()) - } -} - -/// An adapter for the implementation of `ExporterFor`, which attempts to find the -/// `(bridge_location, payment)` for the requested `network` and `remote_location` and `xcm` -/// in the provided `T` table containing various exporters. -pub struct XcmFilterExporter(core::marker::PhantomData<(T, M)>); -impl>> ExporterFor for XcmFilterExporter { - fn exporter_for( - network: &NetworkId, - remote_location: &InteriorLocation, - xcm: &Xcm<()>, - ) -> Option<(Location, Option)> { - // check the XCM - if !M::contains(xcm) { - return None - } - // check `network` and `remote_location` - T::exporter_for(network, remote_location, xcm) - } -} - -/// Xcm for SnowbridgeV2 which requires XCMV5 -pub struct XcmForSnowbridgeV2; -impl Contains> for XcmForSnowbridgeV2 { - fn contains(xcm: &Xcm<()>) -> bool { - let mut instructions = xcm.clone().0; - let result = instructions.matcher().match_next_inst_while( - |_| true, - |inst| { - return match inst { - AliasOrigin(..) => Err(ProcessMessageError::Yield), - _ => Ok(ControlFlow::Continue(())), - } - }, - ); - result.is_err() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use frame_support::parameter_types; - use hex_literal::hex; - use snowbridge_core::{ - outbound::{v2::Message, SendError, SendMessageFeeProvider}, - AgentIdOf, - }; - use sp_std::default::Default; - use xcm::{latest::WESTEND_GENESIS_HASH, prelude::SendError as XcmSendError}; - - parameter_types! { - const MaxMessageSize: u32 = u32::MAX; - const RelayNetwork: NetworkId = Polkadot; - UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(1013)].into(); - pub const BridgedNetwork: NetworkId = Ethereum{ chain_id: 1 }; - pub const NonBridgedNetwork: NetworkId = Ethereum{ chain_id: 2 }; - pub WETHAddress: H160 = H160(hex_literal::hex!("c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2")); - } - - struct MockOkOutboundQueue; - impl SendMessage for MockOkOutboundQueue { - type Ticket = (); - - type Balance = u128; - - fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { - Ok(((), 1_u128)) - } - - fn deliver(_: Self::Ticket) -> Result { - Ok(H256::zero()) - } - } - - impl SendMessageFeeProvider for MockOkOutboundQueue { - type Balance = u128; - - fn local_fee() -> Self::Balance { - 1 - } - } - struct MockErrOutboundQueue; - impl SendMessage for MockErrOutboundQueue { - type Ticket = (); - - type Balance = u128; - - fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { - Err(SendError::MessageTooLarge) - } - - fn deliver(_: Self::Ticket) -> Result { - Err(SendError::MessageTooLarge) - } - } - - impl SendMessageFeeProvider for MockErrOutboundQueue { - type Balance = u128; - - fn local_fee() -> Self::Balance { - 1 - } - } - - pub struct MockTokenIdConvert; - impl MaybeEquivalence for MockTokenIdConvert { - fn convert(_id: &TokenId) -> Option { - Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) - } - fn convert_back(_loc: &Location) -> Option { - None - } - } - - #[test] - fn exporter_validate_with_unknown_network_yields_not_applicable() { - let network = Ethereum { chain_id: 1337 }; - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = None; - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_with_invalid_destination_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = None; - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_with_x8_destination_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = Some( - [ - OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, OnlyChild, - OnlyChild, - ] - .into(), - ); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_without_universal_source_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = None; - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_without_global_universal_location_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Here.into(); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_without_global_bridge_location_yields_not_applicable() { - let network = NonBridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Here.into(); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_with_remote_universal_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Kusama), Parachain(1000)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_without_para_id_in_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = Some(GlobalConsensus(Polkadot).into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_complex_para_id_in_source_yields_not_applicable() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000), PalletInstance(12)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_without_xcm_message_yields_missing_argument() { - let network = BridgedNetwork::get(); - let channel: u32 = 0; - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - let mut destination: Option = Here.into(); - let mut message: Option> = None; - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); - } - - #[test] - fn exporter_validate_with_max_target_fee_yields_unroutable() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; - let fees: Assets = vec![fee.clone()].into(); - let assets: Assets = vec![Asset { - id: AssetId(AccountKey20 { network: None, key: token_address }.into()), - fun: Fungible(1000), - }] - .into(); - let filter: AssetFilter = assets.clone().into(); - - let mut message: Option> = Some( - vec![ - WithdrawAsset(fees), - BuyExecution { fees: fee.clone(), weight_limit: Unlimited }, - ExpectAsset(fee.into()), - WithdrawAsset(assets), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: Some(network), key: beneficiary_address } - .into(), - }, - SetTopic([0; 32]), - ] - .into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_with_unparsable_xcm_yields_unroutable() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let channel: u32 = 0; - let fee = Asset { id: AssetId(Here.into()), fun: Fungible(1000) }; - let fees: Assets = vec![fee.clone()].into(); - - let mut message: Option> = Some( - vec![WithdrawAsset(fees), BuyExecution { fees: fee, weight_limit: Unlimited }].into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - } - - #[test] - fn exporter_validate_xcm_success_case_1() { - let network = BridgedNetwork::get(); - let mut destination: Option = Here.into(); - - let mut universal_source: Option = - Some([GlobalConsensus(Polkadot), Parachain(1000)].into()); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee_asset: Asset = Asset { - id: AssetId([AccountKey20 { network: None, key: WETHAddress::get().0 }].into()), - fun: Fungible(1000), - } - .into(); - let filter: AssetFilter = assets.clone().into(); - - let mut message: Option> = Some( - vec![ - WithdrawAsset(assets.clone()), - PayFees { asset: fee_asset }, - WithdrawAsset(assets.clone()), - AliasOrigin(Location::new(1, [GlobalConsensus(Polkadot), Parachain(1000)])), - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(), - ); - - let result = - EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - - assert!(result.is_ok()); - } - - #[test] - fn exporter_deliver_with_submit_failure_yields_unroutable() { - let result = EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockErrOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::deliver((hex!("deadbeef").to_vec(), XcmHash::default())); - assert_eq!(result, Err(XcmSendError::Transport("other transport error"))) - } - - #[test] - fn exporter_validate_with_invalid_dest_does_not_alter_destination() { - let network = BridgedNetwork::get(); - let destination: InteriorLocation = Parachain(1000).into(); - - let universal_source: InteriorLocation = - [GlobalConsensus(Polkadot), Parachain(1000)].into(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - let msg: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut msg_wrapper: Option> = Some(msg.clone()); - let mut dest_wrapper = Some(destination.clone()); - let mut universal_source_wrapper = Some(universal_source.clone()); - - let result = EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate( - network, - channel, - &mut universal_source_wrapper, - &mut dest_wrapper, - &mut msg_wrapper, - ); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - - // ensure mutable variables are not changed - assert_eq!(Some(destination), dest_wrapper); - assert_eq!(Some(msg), msg_wrapper); - assert_eq!(Some(universal_source), universal_source_wrapper); - } - - #[test] - fn exporter_validate_with_invalid_universal_source_does_not_alter_universal_source() { - let network = BridgedNetwork::get(); - let destination: InteriorLocation = Here.into(); - - let universal_source: InteriorLocation = - [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)].into(); - - let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); - let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); - - let channel: u32 = 0; - let assets: Assets = vec![Asset { - id: AssetId([AccountKey20 { network: None, key: token_address }].into()), - fun: Fungible(1000), - }] - .into(); - let fee = assets.clone().get(0).unwrap().clone(); - let filter: AssetFilter = assets.clone().into(); - let msg: Xcm<()> = vec![ - WithdrawAsset(assets.clone()), - ClearOrigin, - BuyExecution { fees: fee, weight_limit: Unlimited }, - DepositAsset { - assets: filter, - beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), - }, - SetTopic([0; 32]), - ] - .into(); - let mut msg_wrapper: Option> = Some(msg.clone()); - let mut dest_wrapper = Some(destination.clone()); - let mut universal_source_wrapper = Some(universal_source.clone()); - - let result = EthereumBlobExporter::< - UniversalLocation, - BridgedNetwork, - MockOkOutboundQueue, - AgentIdOf, - MockTokenIdConvert, - WETHAddress, - >::validate( - network, - channel, - &mut universal_source_wrapper, - &mut dest_wrapper, - &mut msg_wrapper, - ); - - assert_eq!(result, Err(XcmSendError::NotApplicable)); - - // ensure mutable variables are not changed - assert_eq!(Some(destination), dest_wrapper); - assert_eq!(Some(msg), msg_wrapper); - assert_eq!(Some(universal_source), universal_source_wrapper); - } -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 7f2f42792ec0..d375c4a3cc43 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -47,6 +47,7 @@ bridge-hub-westend-runtime = { workspace = true } # Snowbridge snowbridge-core = { workspace = true } snowbridge-router-primitives = { workspace = true } +snowbridge-outbound-router-primitives = { workspace = true } snowbridge-pallet-system = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-pallet-inbound-queue = { workspace = true } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index a3eaebb59153..8a8e62c5c1b9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -98,6 +98,7 @@ bp-asset-hub-westend = { workspace = true } bp-bridge-hub-rococo = { workspace = true } bp-bridge-hub-westend = { workspace = true } snowbridge-router-primitives = { workspace = true } +snowbridge-outbound-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } @@ -143,6 +144,7 @@ runtime-benchmarks = [ "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", + "snowbridge-outbound-router-primitives/runtime-benchmarks", "snowbridge-router-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", @@ -243,6 +245,7 @@ std = [ "primitive-types/std", "scale-info/std", "serde_json/std", + "snowbridge-outbound-router-primitives/std", "snowbridge-router-primitives/std", "sp-api/std", "sp-block-builder/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index d3db7a18a12d..b474b70c1ddc 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -715,9 +715,9 @@ pub mod bridging { } pub type EthereumNetworkExportTableV2 = - snowbridge_router_primitives::outbound::v2::XcmFilterExporter< + snowbridge_outbound_router_primitives::v2::XcmFilterExporter< xcm_builder::NetworkExportTable, - snowbridge_router_primitives::outbound::v2::XcmForSnowbridgeV2, + snowbridge_outbound_router_primitives::v2::XcmForSnowbridgeV2, >; pub type EthereumNetworkExportTable = xcm_builder::NetworkExportTable; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index daffa32d1b6b..eb4a7d40de6f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -114,6 +114,7 @@ snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-merkle-tree = { workspace = true } snowbridge-router-primitives = { workspace = true } +snowbridge-outbound-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } bridge-hub-common = { workspace = true } @@ -193,6 +194,7 @@ std = [ "snowbridge-core/std", "snowbridge-merkle-tree/std", "snowbridge-outbound-queue-runtime-api/std", + "snowbridge-outbound-router-primitives/std", "snowbridge-pallet-ethereum-client/std", "snowbridge-pallet-inbound-queue/std", "snowbridge-pallet-outbound-queue/std", @@ -253,6 +255,7 @@ runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", + "snowbridge-outbound-router-primitives/runtime-benchmarks", "snowbridge-pallet-ethereum-client/runtime-benchmarks", "snowbridge-pallet-inbound-queue/runtime-benchmarks", "snowbridge-pallet-outbound-queue/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs index 3d208dc68208..801e6470512e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs @@ -24,7 +24,8 @@ use crate::{ use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards}; -use snowbridge_router_primitives::{inbound::v1::MessageToXcm, outbound::v1::EthereumBlobExporter}; +use snowbridge_outbound_router_primitives::v1::EthereumBlobExporter; +use snowbridge_router_primitives::inbound::v1::MessageToXcm; use sp_core::{H160, H256}; use testnet_parachains_constants::rococo::{ currency::*, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 8b2b3b3cfde2..40506e99c6f6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -113,6 +113,7 @@ snowbridge-pallet-inbound-queue = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-router-primitives = { workspace = true } +snowbridge-outbound-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } snowbridge-pallet-outbound-queue-v2 = { workspace = true } snowbridge-outbound-queue-runtime-api-v2 = { workspace = true } @@ -192,6 +193,7 @@ std = [ "snowbridge-merkle-tree/std", "snowbridge-outbound-queue-runtime-api-v2/std", "snowbridge-outbound-queue-runtime-api/std", + "snowbridge-outbound-router-primitives/std", "snowbridge-pallet-ethereum-client/std", "snowbridge-pallet-inbound-queue/std", "snowbridge-pallet-outbound-queue-v2/std", @@ -254,6 +256,7 @@ runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", + "snowbridge-outbound-router-primitives/runtime-benchmarks", "snowbridge-pallet-ethereum-client/runtime-benchmarks", "snowbridge-pallet-inbound-queue/runtime-benchmarks", "snowbridge-pallet-outbound-queue-v2/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index d9e1ff1a3d3c..a3fed13fe384 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -25,10 +25,10 @@ use crate::{ use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards}; -use snowbridge_router_primitives::{ - inbound::v1::MessageToXcm, - outbound::{v1::EthereumBlobExporter, v2::EthereumBlobExporter as EthereumBlobExporterV2}, +use snowbridge_outbound_router_primitives::{ + v1::EthereumBlobExporter, v2::EthereumBlobExporter as EthereumBlobExporterV2, }; +use snowbridge_router_primitives::inbound::v1::MessageToXcm; use sp_core::H160; use testnet_parachains_constants::westend::{ currency::*, From e97e23575db92ade68f96c2df1f74722def07a56 Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 29 Nov 2024 10:40:03 +0800 Subject: [PATCH 31/68] Clean up --- Cargo.lock | 1 - .../pallets/inbound-queue/src/lib.rs | 2 +- .../pallets/inbound-queue/src/mock.rs | 2 +- bridges/snowbridge/pallets/system/src/lib.rs | 85 +-- bridges/snowbridge/pallets/system/src/mock.rs | 33 +- .../snowbridge/primitives/router/Cargo.toml | 3 - .../primitives/router/src/inbound/mod.rs | 458 ++++++++++++++- .../primitives/router/src/inbound/tests.rs | 83 +++ .../primitives/router/src/inbound/v1.rs | 520 ------------------ .../primitives/router/src/inbound/v2.rs | 520 ------------------ .../src/tests/snowbridge.rs | 3 +- .../src/bridge_to_ethereum_config.rs | 32 +- .../src/bridge_to_ethereum_config.rs | 7 +- 13 files changed, 543 insertions(+), 1206 deletions(-) create mode 100644 bridges/snowbridge/primitives/router/src/inbound/tests.rs delete mode 100644 bridges/snowbridge/primitives/router/src/inbound/v1.rs delete mode 100644 bridges/snowbridge/primitives/router/src/inbound/v2.rs diff --git a/Cargo.lock b/Cargo.lock index 25877bbd36bd..0ec70bb40a92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25218,7 +25218,6 @@ dependencies = [ "sp-runtime 31.0.1", "sp-std 14.0.0", "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", "staging-xcm-executor 7.0.0", ] diff --git a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs index 5814886fe355..423b92b9fae0 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs @@ -61,7 +61,7 @@ use snowbridge_core::{ sibling_sovereign_account, BasicOperatingMode, Channel, ChannelId, ParaId, PricingParameters, StaticLookup, }; -use snowbridge_router_primitives::inbound::v1::{ +use snowbridge_router_primitives::inbound::{ ConvertMessage, ConvertMessageError, VersionedMessage, }; use sp_runtime::{traits::Saturating, SaturatedConversion, TokenError}; diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 82862616466d..675d4b691593 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -12,7 +12,7 @@ use snowbridge_core::{ inbound::{Log, Proof, VerificationError}, meth, Channel, ChannelId, PricingParameters, Rewards, StaticLookup, TokenId, }; -use snowbridge_router_primitives::inbound::v1::MessageToXcm; +use snowbridge_router_primitives::inbound::MessageToXcm; use sp_core::{H160, H256}; use sp_runtime::{ traits::{IdentifyAccount, IdentityLookup, MaybeEquivalence, Verify}, diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index e603e562201f..64b093884622 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -70,7 +70,6 @@ use snowbridge_core::{ meth, outbound::{ v1::{Command, Initializer, Message, SendMessage}, - v2::{Command as CommandV2, Message as MessageV2, SendMessage as SendMessageV2}, OperatingMode, SendError, }, sibling_sovereign_account, AgentId, AssetMetadata, Channel, ChannelId, ParaId, @@ -141,7 +140,7 @@ where #[frame_support::pallet] pub mod pallet { use frame_support::dispatch::PostDispatchInfo; - use snowbridge_core::{outbound::v2::second_governance_origin, StaticLookup}; + use snowbridge_core::StaticLookup; use sp_core::U256; use super::*; @@ -156,8 +155,6 @@ pub mod pallet { /// Send messages to Ethereum type OutboundQueue: SendMessage>; - type OutboundQueueV2: SendMessageV2>; - /// Origin check for XCM locations that can create agents type SiblingOrigin: EnsureOrigin; @@ -255,7 +252,6 @@ pub mod pallet { InvalidTokenTransferFees, InvalidPricingParameters, InvalidUpgradeParameters, - TokenAlreadyCreated, } /// The set of registered agents @@ -642,34 +638,6 @@ pub mod pallet { pays_fee: Pays::No, }) } - - /// Registers a Polkadot-native token as a wrapped ERC20 token on Ethereum. - /// Privileged. Can only be called by root. - /// - /// Fee required: No - /// - /// - `origin`: Must be root - /// - `location`: Location of the asset (relative to this chain) - /// - `metadata`: Metadata to include in the instantiated ERC20 contract on Ethereum - #[pallet::call_index(11)] - #[pallet::weight(T::WeightInfo::register_token())] - pub fn register_token_v2( - origin: OriginFor, - location: Box, - metadata: AssetMetadata, - ) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - - let location: Location = - (*location).try_into().map_err(|_| Error::::UnsupportedLocationVersion)?; - - Self::do_register_token_v2(&location, metadata)?; - - Ok(PostDispatchInfo { - actual_weight: Some(T::WeightInfo::register_token()), - pays_fee: Pays::No, - }) - } } impl Pallet { @@ -795,57 +763,6 @@ pub mod pallet { Ok(()) } - - pub(crate) fn do_register_token_v2( - location: &Location, - metadata: AssetMetadata, - ) -> Result<(), DispatchError> { - let ethereum_location = T::EthereumLocation::get(); - // reanchor to Ethereum context - let location = location - .clone() - .reanchored(ðereum_location, &T::UniversalLocation::get()) - .map_err(|_| Error::::LocationConversionFailed)?; - - let token_id = TokenIdOf::convert_location(&location) - .ok_or(Error::::LocationConversionFailed)?; - - if !ForeignToNativeId::::contains_key(token_id) { - NativeToForeignId::::insert(location.clone(), token_id); - ForeignToNativeId::::insert(token_id, location.clone()); - } - - let command = CommandV2::RegisterForeignToken { - token_id, - name: metadata.name.into_inner(), - symbol: metadata.symbol.into_inner(), - decimals: metadata.decimals, - }; - Self::send_governance_call(second_governance_origin(), command)?; - - Self::deposit_event(Event::::RegisterToken { - location: location.clone().into(), - foreign_token_id: token_id, - }); - - Ok(()) - } - - fn send_governance_call(origin: H256, command: CommandV2) -> DispatchResult { - let message = MessageV2 { - origin, - origin_location: Default::default(), - id: Default::default(), - fee: Default::default(), - commands: BoundedVec::try_from(vec![command]).unwrap(), - }; - - let (ticket, _) = - T::OutboundQueueV2::validate(&message).map_err(|err| Error::::Send(err))?; - - T::OutboundQueueV2::deliver(ticket).map_err(|err| Error::::Send(err))?; - Ok(()) - } } impl StaticLookup for Pallet { diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index 53ba8e87c140..5b83c0d856b6 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -11,13 +11,8 @@ use sp_core::H256; use xcm_executor::traits::ConvertLocation; use snowbridge_core::{ - gwei, meth, - outbound::{ - v1::ConstantGasMeter, - v2::{Message, SendMessage}, - SendError as OutboundSendError, SendMessageFeeProvider, - }, - sibling_sovereign_account, AgentId, AllowSiblingsOnly, ParaId, PricingParameters, Rewards, + gwei, meth, outbound::v1::ConstantGasMeter, sibling_sovereign_account, AgentId, + AllowSiblingsOnly, ParaId, PricingParameters, Rewards, }; use sp_runtime::{ traits::{AccountIdConversion, BlakeTwo256, IdentityLookup, Keccak256}, @@ -204,29 +199,6 @@ impl BenchmarkHelper for () { } } -pub struct MockOkOutboundQueue; -impl SendMessage for MockOkOutboundQueue { - type Ticket = (); - - type Balance = u128; - - fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), OutboundSendError> { - Ok(((), 1_u128)) - } - - fn deliver(_: Self::Ticket) -> Result { - Ok(H256::zero()) - } -} - -impl SendMessageFeeProvider for MockOkOutboundQueue { - type Balance = u128; - - fn local_fee() -> Self::Balance { - 1 - } -} - impl crate::Config for Test { type RuntimeEvent = RuntimeEvent; type OutboundQueue = OutboundQueue; @@ -241,7 +213,6 @@ impl crate::Config for Test { type EthereumLocation = EthereumDestination; #[cfg(feature = "runtime-benchmarks")] type Helper = (); - type OutboundQueueV2 = MockOkOutboundQueue; } // Build genesis storage according to the mock runtime. diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index 664f2dbf7930..ee8d481cec12 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -24,7 +24,6 @@ sp-std = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } -xcm-builder = { workspace = true } snowbridge-core = { workspace = true } @@ -44,7 +43,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", - "xcm-builder/std", "xcm-executor/std", "xcm/std", ] @@ -52,6 +50,5 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", ] diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index abd32aa3897f..e03560f66e24 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -1,16 +1,458 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork -// SPDX-FileCopyrightText: 2021-2022 Parity Technologies (UK) Ltd. +//! Converts messages from Ethereum to XCM messages -pub mod v1; -pub mod v2; +#[cfg(test)] +mod tests; -use codec::Encode; -use sp_core::blake2_256; -use sp_std::marker::PhantomData; -use xcm::prelude::{AccountKey20, Ethereum, GlobalConsensus, Location}; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use frame_support::{traits::tokens::Balance as BalanceT, PalletError}; +use scale_info::TypeInfo; +use snowbridge_core::TokenId; +use sp_core::{Get, RuntimeDebug, H160, H256}; +use sp_io::hashing::blake2_256; +use sp_runtime::{traits::MaybeEquivalence, MultiAddress}; +use sp_std::prelude::*; +use xcm::prelude::{Junction::AccountKey20, *}; use xcm_executor::traits::ConvertLocation; +const MINIMUM_DEPOSIT: u128 = 1; + +/// Messages from Ethereum are versioned. This is because in future, +/// we may want to evolve the protocol so that the ethereum side sends XCM messages directly. +/// Instead having BridgeHub transcode the messages into XCM. +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum VersionedMessage { + V1(MessageV1), +} + +/// For V1, the ethereum side sends messages which are transcoded into XCM. These messages are +/// self-contained, in that they can be transcoded using only information in the message. +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub struct MessageV1 { + /// EIP-155 chain id of the origin Ethereum network + pub chain_id: u64, + /// The command originating from the Gateway contract + pub command: Command, +} + +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum Command { + /// Register a wrapped token on the AssetHub `ForeignAssets` pallet + RegisterToken { + /// The address of the ERC20 token to be bridged over to AssetHub + token: H160, + /// XCM execution fee on AssetHub + fee: u128, + }, + /// Send Ethereum token to AssetHub or another parachain + SendToken { + /// The address of the ERC20 token to be bridged over to AssetHub + token: H160, + /// The destination for the transfer + destination: Destination, + /// Amount to transfer + amount: u128, + /// XCM execution fee on AssetHub + fee: u128, + }, + /// Send Polkadot token back to the original parachain + SendNativeToken { + /// The Id of the token + token_id: TokenId, + /// The destination for the transfer + destination: Destination, + /// Amount to transfer + amount: u128, + /// XCM execution fee on AssetHub + fee: u128, + }, +} + +/// Destination for bridged tokens +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum Destination { + /// The funds will be deposited into account `id` on AssetHub + AccountId32 { id: [u8; 32] }, + /// The funds will deposited into the sovereign account of destination parachain `para_id` on + /// AssetHub, Account `id` on the destination parachain will receive the funds via a + /// reserve-backed transfer. See + ForeignAccountId32 { + para_id: u32, + id: [u8; 32], + /// XCM execution fee on final destination + fee: u128, + }, + /// The funds will deposited into the sovereign account of destination parachain `para_id` on + /// AssetHub, Account `id` on the destination parachain will receive the funds via a + /// reserve-backed transfer. See + ForeignAccountId20 { + para_id: u32, + id: [u8; 20], + /// XCM execution fee on final destination + fee: u128, + }, +} + +pub struct MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, +> where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + Balance: BalanceT, + ConvertAssetId: MaybeEquivalence, + EthereumUniversalLocation: Get, + GlobalAssetHubLocation: Get, +{ + _phantom: PhantomData<( + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + )>, +} + +/// Reason why a message conversion failed. +#[derive(Copy, Clone, TypeInfo, PalletError, Encode, Decode, RuntimeDebug)] +pub enum ConvertMessageError { + /// The message version is not supported for conversion. + UnsupportedVersion, + InvalidDestination, + InvalidToken, + /// The fee asset is not supported for conversion. + UnsupportedFeeAsset, + CannotReanchor, +} + +/// convert the inbound message to xcm which will be forwarded to the destination chain +pub trait ConvertMessage { + type Balance: BalanceT + From; + type AccountId; + /// Converts a versioned message into an XCM message and an optional topicID + fn convert( + message_id: H256, + message: VersionedMessage, + ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError>; +} + +pub type CallIndex = [u8; 2]; + +impl< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > ConvertMessage + for MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > +where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + InboundQueuePalletInstance: Get, + Balance: BalanceT + From, + AccountId: Into<[u8; 32]>, + ConvertAssetId: MaybeEquivalence, + EthereumUniversalLocation: Get, + GlobalAssetHubLocation: Get, +{ + type Balance = Balance; + type AccountId = AccountId; + + fn convert( + message_id: H256, + message: VersionedMessage, + ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError> { + use Command::*; + use VersionedMessage::*; + match message { + V1(MessageV1 { chain_id, command: RegisterToken { token, fee } }) => + Ok(Self::convert_register_token(message_id, chain_id, token, fee)), + V1(MessageV1 { chain_id, command: SendToken { token, destination, amount, fee } }) => + Ok(Self::convert_send_token(message_id, chain_id, token, destination, amount, fee)), + V1(MessageV1 { + chain_id, + command: SendNativeToken { token_id, destination, amount, fee }, + }) => Self::convert_send_native_token( + message_id, + chain_id, + token_id, + destination, + amount, + fee, + ), + } + } +} + +impl< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > + MessageToXcm< + CreateAssetCall, + CreateAssetDeposit, + InboundQueuePalletInstance, + AccountId, + Balance, + ConvertAssetId, + EthereumUniversalLocation, + GlobalAssetHubLocation, + > +where + CreateAssetCall: Get, + CreateAssetDeposit: Get, + InboundQueuePalletInstance: Get, + Balance: BalanceT + From, + AccountId: Into<[u8; 32]>, + ConvertAssetId: MaybeEquivalence, + EthereumUniversalLocation: Get, + GlobalAssetHubLocation: Get, +{ + fn convert_register_token( + message_id: H256, + chain_id: u64, + token: H160, + fee: u128, + ) -> (Xcm<()>, Balance) { + let network = Ethereum { chain_id }; + let xcm_fee: Asset = (Location::parent(), fee).into(); + let deposit: Asset = (Location::parent(), CreateAssetDeposit::get()).into(); + + let total_amount = fee + CreateAssetDeposit::get(); + let total: Asset = (Location::parent(), total_amount).into(); + + let bridge_location = Location::new(2, GlobalConsensus(network)); + + let owner = EthereumLocationsConverterFor::<[u8; 32]>::from_chain_id(&chain_id); + let asset_id = Self::convert_token_address(network, token); + let create_call_index: [u8; 2] = CreateAssetCall::get(); + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let xcm: Xcm<()> = vec![ + // Teleport required fees. + ReceiveTeleportedAsset(total.into()), + // Pay for execution. + BuyExecution { fees: xcm_fee, weight_limit: Unlimited }, + // Fund the snowbridge sovereign with the required deposit for creation. + DepositAsset { assets: Definite(deposit.into()), beneficiary: bridge_location.clone() }, + // This `SetAppendix` ensures that `xcm_fee` not spent by `Transact` will be + // deposited to snowbridge sovereign, instead of being trapped, regardless of + // `Transact` success or not. + SetAppendix(Xcm(vec![ + RefundSurplus, + DepositAsset { assets: AllCounted(1).into(), beneficiary: bridge_location }, + ])), + // Only our inbound-queue pallet is allowed to invoke `UniversalOrigin`. + DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), + // Change origin to the bridge. + UniversalOrigin(GlobalConsensus(network)), + // Call create_asset on foreign assets pallet. + Transact { + origin_kind: OriginKind::Xcm, + call: ( + create_call_index, + asset_id, + MultiAddress::<[u8; 32], ()>::Id(owner), + MINIMUM_DEPOSIT, + ) + .encode() + .into(), + }, + // Forward message id to Asset Hub + SetTopic(message_id.into()), + // Once the program ends here, appendix program will run, which will deposit any + // leftover fee to snowbridge sovereign. + ] + .into(); + + (xcm, total_amount.into()) + } + + fn convert_send_token( + message_id: H256, + chain_id: u64, + token: H160, + destination: Destination, + amount: u128, + asset_hub_fee: u128, + ) -> (Xcm<()>, Balance) { + let network = Ethereum { chain_id }; + let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); + let asset: Asset = (Self::convert_token_address(network, token), amount).into(); + + let (dest_para_id, beneficiary, dest_para_fee) = match destination { + // Final destination is a 32-byte account on AssetHub + Destination::AccountId32 { id } => + (None, Location::new(0, [AccountId32 { network: None, id }]), 0), + // Final destination is a 32-byte account on a sibling of AssetHub + Destination::ForeignAccountId32 { para_id, id, fee } => ( + Some(para_id), + Location::new(0, [AccountId32 { network: None, id }]), + // Total fee needs to cover execution on AssetHub and Sibling + fee, + ), + // Final destination is a 20-byte account on a sibling of AssetHub + Destination::ForeignAccountId20 { para_id, id, fee } => ( + Some(para_id), + Location::new(0, [AccountKey20 { network: None, key: id }]), + // Total fee needs to cover execution on AssetHub and Sibling + fee, + ), + }; + + let total_fees = asset_hub_fee.saturating_add(dest_para_fee); + let total_fee_asset: Asset = (Location::parent(), total_fees).into(); + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let mut instructions = vec![ + ReceiveTeleportedAsset(total_fee_asset.into()), + BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, + DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), + UniversalOrigin(GlobalConsensus(network)), + ReserveAssetDeposited(asset.clone().into()), + ClearOrigin, + ]; + + match dest_para_id { + Some(dest_para_id) => { + let dest_para_fee_asset: Asset = (Location::parent(), dest_para_fee).into(); + let bridge_location = Location::new(2, GlobalConsensus(network)); + + instructions.extend(vec![ + // After program finishes deposit any leftover assets to the snowbridge + // sovereign. + SetAppendix(Xcm(vec![DepositAsset { + assets: Wild(AllCounted(2)), + beneficiary: bridge_location, + }])), + // Perform a deposit reserve to send to destination chain. + DepositReserveAsset { + assets: Definite(vec![dest_para_fee_asset.clone(), asset].into()), + dest: Location::new(1, [Parachain(dest_para_id)]), + xcm: vec![ + // Buy execution on target. + BuyExecution { fees: dest_para_fee_asset, weight_limit: Unlimited }, + // Deposit assets to beneficiary. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + // Forward message id to destination parachain. + SetTopic(message_id.into()), + ] + .into(), + }, + ]); + }, + None => { + instructions.extend(vec![ + // Deposit both asset and fees to beneficiary so the fees will not get + // trapped. Another benefit is when fees left more than ED on AssetHub could be + // used to create the beneficiary account in case it does not exist. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + ]); + }, + } + + // Forward message id to Asset Hub. + instructions.push(SetTopic(message_id.into())); + + // The `instructions` to forward to AssetHub, and the `total_fees` to locally burn (since + // they are teleported within `instructions`). + (instructions.into(), total_fees.into()) + } + + // Convert ERC20 token address to a location that can be understood by Assets Hub. + fn convert_token_address(network: NetworkId, token: H160) -> Location { + Location::new( + 2, + [GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }], + ) + } + + /// Constructs an XCM message destined for AssetHub that withdraws assets from the sovereign + /// account of the Gateway contract and either deposits those assets into a recipient account or + /// forwards the assets to another parachain. + fn convert_send_native_token( + message_id: H256, + chain_id: u64, + token_id: TokenId, + destination: Destination, + amount: u128, + asset_hub_fee: u128, + ) -> Result<(Xcm<()>, Balance), ConvertMessageError> { + let network = Ethereum { chain_id }; + let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); + + let beneficiary = match destination { + // Final destination is a 32-byte account on AssetHub + Destination::AccountId32 { id } => + Ok(Location::new(0, [AccountId32 { network: None, id }])), + // Forwarding to a destination parachain is not allowed for PNA and is validated on the + // Ethereum side. https://github.com/Snowfork/snowbridge/blob/e87ddb2215b513455c844463a25323bb9c01ff36/contracts/src/Assets.sol#L216-L224 + _ => Err(ConvertMessageError::InvalidDestination), + }?; + + let total_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); + + let asset_loc = + ConvertAssetId::convert(&token_id).ok_or(ConvertMessageError::InvalidToken)?; + + let mut reanchored_asset_loc = asset_loc.clone(); + reanchored_asset_loc + .reanchor(&GlobalAssetHubLocation::get(), &EthereumUniversalLocation::get()) + .map_err(|_| ConvertMessageError::CannotReanchor)?; + + let asset: Asset = (reanchored_asset_loc, amount).into(); + + let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); + + let instructions = vec![ + ReceiveTeleportedAsset(total_fee_asset.clone().into()), + BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, + DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), + UniversalOrigin(GlobalConsensus(network)), + WithdrawAsset(asset.clone().into()), + // Deposit both asset and fees to beneficiary so the fees will not get + // trapped. Another benefit is when fees left more than ED on AssetHub could be + // used to create the beneficiary account in case it does not exist. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, + SetTopic(message_id.into()), + ]; + + // `total_fees` to burn on this chain when sending `instructions` to run on AH (which also + // teleport fees) + Ok((instructions.into(), asset_hub_fee.into())) + } +} + pub struct EthereumLocationsConverterFor(PhantomData); impl ConvertLocation for EthereumLocationsConverterFor where @@ -35,5 +477,3 @@ impl EthereumLocationsConverterFor { (b"ethereum-chain", chain_id, key).using_encoded(blake2_256) } } - -pub type CallIndex = [u8; 2]; diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs new file mode 100644 index 000000000000..786aa594f653 --- /dev/null +++ b/bridges/snowbridge/primitives/router/src/inbound/tests.rs @@ -0,0 +1,83 @@ +use super::EthereumLocationsConverterFor; +use crate::inbound::CallIndex; +use frame_support::{assert_ok, parameter_types}; +use hex_literal::hex; +use xcm::prelude::*; +use xcm_executor::traits::ConvertLocation; + +const NETWORK: NetworkId = Ethereum { chain_id: 11155111 }; + +parameter_types! { + pub EthereumNetwork: NetworkId = NETWORK; + + pub const CreateAssetCall: CallIndex = [1, 1]; + pub const CreateAssetExecutionFee: u128 = 123; + pub const CreateAssetDeposit: u128 = 891; + pub const SendTokenExecutionFee: u128 = 592; +} + +#[test] +fn test_ethereum_network_converts_successfully() { + let expected_account: [u8; 32] = + hex!("ce796ae65569a670d0c1cc1ac12515a3ce21b5fbf729d63d7b289baad070139d"); + let contract_location = Location::new(2, [GlobalConsensus(NETWORK)]); + + let account = + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location).unwrap(); + + assert_eq!(account, expected_account); +} + +#[test] +fn test_contract_location_with_network_converts_successfully() { + let expected_account: [u8; 32] = + hex!("9038d35aba0e78e072d29b2d65be9df5bb4d7d94b4609c9cf98ea8e66e544052"); + let contract_location = Location::new( + 2, + [GlobalConsensus(NETWORK), AccountKey20 { network: None, key: [123u8; 20] }], + ); + + let account = + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location).unwrap(); + + assert_eq!(account, expected_account); +} + +#[test] +fn test_contract_location_with_incorrect_location_fails_convert() { + let contract_location = Location::new(2, [GlobalConsensus(Polkadot), Parachain(1000)]); + + assert_eq!( + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location), + None, + ); +} + +#[test] +fn test_reanchor_all_assets() { + let ethereum_context: InteriorLocation = [GlobalConsensus(Ethereum { chain_id: 1 })].into(); + let ethereum = Location::new(2, ethereum_context.clone()); + let ah_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); + let global_ah = Location::new(1, ah_context.clone()); + let assets = vec![ + // DOT + Location::new(1, []), + // GLMR (Some Polkadot parachain currency) + Location::new(1, [Parachain(2004)]), + // AH asset + Location::new(0, [PalletInstance(50), GeneralIndex(42)]), + // KSM + Location::new(2, [GlobalConsensus(Kusama)]), + // KAR (Some Kusama parachain currency) + Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), + ]; + for asset in assets.iter() { + // reanchor logic in pallet_xcm on AH + let mut reanchored_asset = asset.clone(); + assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); + // reanchor back to original location in context of Ethereum + let mut reanchored_asset_with_ethereum_context = reanchored_asset.clone(); + assert_ok!(reanchored_asset_with_ethereum_context.reanchor(&global_ah, ðereum_context)); + assert_eq!(reanchored_asset_with_ethereum_context, asset.clone()); + } +} diff --git a/bridges/snowbridge/primitives/router/src/inbound/v1.rs b/bridges/snowbridge/primitives/router/src/inbound/v1.rs deleted file mode 100644 index 73e5f5ada939..000000000000 --- a/bridges/snowbridge/primitives/router/src/inbound/v1.rs +++ /dev/null @@ -1,520 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -//! Converts messages from Ethereum to XCM messages - -use crate::inbound::{CallIndex, EthereumLocationsConverterFor}; -use codec::{Decode, Encode}; -use core::marker::PhantomData; -use frame_support::{traits::tokens::Balance as BalanceT, PalletError}; -use scale_info::TypeInfo; -use snowbridge_core::TokenId; -use sp_core::{Get, RuntimeDebug, H160, H256}; -use sp_runtime::{traits::MaybeEquivalence, MultiAddress}; -use sp_std::prelude::*; -use xcm::prelude::{Junction::AccountKey20, *}; - -const MINIMUM_DEPOSIT: u128 = 1; - -/// Messages from Ethereum are versioned. This is because in future, -/// we may want to evolve the protocol so that the ethereum side sends XCM messages directly. -/// Instead having BridgeHub transcode the messages into XCM. -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub enum VersionedMessage { - V1(MessageV1), -} - -/// For V1, the ethereum side sends messages which are transcoded into XCM. These messages are -/// self-contained, in that they can be transcoded using only information in the message. -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub struct MessageV1 { - /// EIP-155 chain id of the origin Ethereum network - pub chain_id: u64, - /// The command originating from the Gateway contract - pub command: Command, -} - -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub enum Command { - /// Register a wrapped token on the AssetHub `ForeignAssets` pallet - RegisterToken { - /// The address of the ERC20 token to be bridged over to AssetHub - token: H160, - /// XCM execution fee on AssetHub - fee: u128, - }, - /// Send Ethereum token to AssetHub or another parachain - SendToken { - /// The address of the ERC20 token to be bridged over to AssetHub - token: H160, - /// The destination for the transfer - destination: Destination, - /// Amount to transfer - amount: u128, - /// XCM execution fee on AssetHub - fee: u128, - }, - /// Send Polkadot token back to the original parachain - SendNativeToken { - /// The Id of the token - token_id: TokenId, - /// The destination for the transfer - destination: Destination, - /// Amount to transfer - amount: u128, - /// XCM execution fee on AssetHub - fee: u128, - }, -} - -/// Destination for bridged tokens -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub enum Destination { - /// The funds will be deposited into account `id` on AssetHub - AccountId32 { id: [u8; 32] }, - /// The funds will deposited into the sovereign account of destination parachain `para_id` on - /// AssetHub, Account `id` on the destination parachain will receive the funds via a - /// reserve-backed transfer. See - ForeignAccountId32 { - para_id: u32, - id: [u8; 32], - /// XCM execution fee on final destination - fee: u128, - }, - /// The funds will deposited into the sovereign account of destination parachain `para_id` on - /// AssetHub, Account `id` on the destination parachain will receive the funds via a - /// reserve-backed transfer. See - ForeignAccountId20 { - para_id: u32, - id: [u8; 20], - /// XCM execution fee on final destination - fee: u128, - }, -} - -pub struct MessageToXcm< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, -> where - CreateAssetCall: Get, - CreateAssetDeposit: Get, - Balance: BalanceT, - ConvertAssetId: MaybeEquivalence, - EthereumUniversalLocation: Get, - GlobalAssetHubLocation: Get, -{ - _phantom: PhantomData<( - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - )>, -} - -/// Reason why a message conversion failed. -#[derive(Copy, Clone, TypeInfo, PalletError, Encode, Decode, RuntimeDebug)] -pub enum ConvertMessageError { - /// The message version is not supported for conversion. - UnsupportedVersion, - InvalidDestination, - InvalidToken, - /// The fee asset is not supported for conversion. - UnsupportedFeeAsset, - CannotReanchor, -} - -/// convert the inbound message to xcm which will be forwarded to the destination chain -pub trait ConvertMessage { - type Balance: BalanceT + From; - type AccountId; - /// Converts a versioned message into an XCM message and an optional topicID - fn convert( - message_id: H256, - message: VersionedMessage, - ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError>; -} - -impl< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > ConvertMessage - for MessageToXcm< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > -where - CreateAssetCall: Get, - CreateAssetDeposit: Get, - InboundQueuePalletInstance: Get, - Balance: BalanceT + From, - AccountId: Into<[u8; 32]>, - ConvertAssetId: MaybeEquivalence, - EthereumUniversalLocation: Get, - GlobalAssetHubLocation: Get, -{ - type Balance = Balance; - type AccountId = AccountId; - - fn convert( - message_id: H256, - message: VersionedMessage, - ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError> { - use Command::*; - use VersionedMessage::*; - match message { - V1(MessageV1 { chain_id, command: RegisterToken { token, fee } }) => - Ok(Self::convert_register_token(message_id, chain_id, token, fee)), - V1(MessageV1 { chain_id, command: SendToken { token, destination, amount, fee } }) => - Ok(Self::convert_send_token(message_id, chain_id, token, destination, amount, fee)), - V1(MessageV1 { - chain_id, - command: SendNativeToken { token_id, destination, amount, fee }, - }) => Self::convert_send_native_token( - message_id, - chain_id, - token_id, - destination, - amount, - fee, - ), - } - } -} - -impl< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > - MessageToXcm< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > -where - CreateAssetCall: Get, - CreateAssetDeposit: Get, - InboundQueuePalletInstance: Get, - Balance: BalanceT + From, - AccountId: Into<[u8; 32]>, - ConvertAssetId: MaybeEquivalence, - EthereumUniversalLocation: Get, - GlobalAssetHubLocation: Get, -{ - fn convert_register_token( - message_id: H256, - chain_id: u64, - token: H160, - fee: u128, - ) -> (Xcm<()>, Balance) { - let network = Ethereum { chain_id }; - let xcm_fee: Asset = (Location::parent(), fee).into(); - let deposit: Asset = (Location::parent(), CreateAssetDeposit::get()).into(); - - let total_amount = fee + CreateAssetDeposit::get(); - let total: Asset = (Location::parent(), total_amount).into(); - - let bridge_location = Location::new(2, GlobalConsensus(network)); - - let owner = EthereumLocationsConverterFor::<[u8; 32]>::from_chain_id(&chain_id); - let asset_id = Self::convert_token_address(network, token); - let create_call_index: [u8; 2] = CreateAssetCall::get(); - let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); - - let xcm: Xcm<()> = vec![ - // Teleport required fees. - ReceiveTeleportedAsset(total.into()), - // Pay for execution. - BuyExecution { fees: xcm_fee, weight_limit: Unlimited }, - // Fund the snowbridge sovereign with the required deposit for creation. - DepositAsset { assets: Definite(deposit.into()), beneficiary: bridge_location.clone() }, - // This `SetAppendix` ensures that `xcm_fee` not spent by `Transact` will be - // deposited to snowbridge sovereign, instead of being trapped, regardless of - // `Transact` success or not. - SetAppendix(Xcm(vec![ - RefundSurplus, - DepositAsset { assets: AllCounted(1).into(), beneficiary: bridge_location }, - ])), - // Only our inbound-queue pallet is allowed to invoke `UniversalOrigin`. - DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), - // Change origin to the bridge. - UniversalOrigin(GlobalConsensus(network)), - // Call create_asset on foreign assets pallet. - Transact { - origin_kind: OriginKind::Xcm, - call: ( - create_call_index, - asset_id, - MultiAddress::<[u8; 32], ()>::Id(owner), - MINIMUM_DEPOSIT, - ) - .encode() - .into(), - }, - // Forward message id to Asset Hub - SetTopic(message_id.into()), - // Once the program ends here, appendix program will run, which will deposit any - // leftover fee to snowbridge sovereign. - ] - .into(); - - (xcm, total_amount.into()) - } - - fn convert_send_token( - message_id: H256, - chain_id: u64, - token: H160, - destination: Destination, - amount: u128, - asset_hub_fee: u128, - ) -> (Xcm<()>, Balance) { - let network = Ethereum { chain_id }; - let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); - let asset: Asset = (Self::convert_token_address(network, token), amount).into(); - - let (dest_para_id, beneficiary, dest_para_fee) = match destination { - // Final destination is a 32-byte account on AssetHub - Destination::AccountId32 { id } => - (None, Location::new(0, [AccountId32 { network: None, id }]), 0), - // Final destination is a 32-byte account on a sibling of AssetHub - Destination::ForeignAccountId32 { para_id, id, fee } => ( - Some(para_id), - Location::new(0, [AccountId32 { network: None, id }]), - // Total fee needs to cover execution on AssetHub and Sibling - fee, - ), - // Final destination is a 20-byte account on a sibling of AssetHub - Destination::ForeignAccountId20 { para_id, id, fee } => ( - Some(para_id), - Location::new(0, [AccountKey20 { network: None, key: id }]), - // Total fee needs to cover execution on AssetHub and Sibling - fee, - ), - }; - - let total_fees = asset_hub_fee.saturating_add(dest_para_fee); - let total_fee_asset: Asset = (Location::parent(), total_fees).into(); - let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); - - let mut instructions = vec![ - ReceiveTeleportedAsset(total_fee_asset.into()), - BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, - DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), - UniversalOrigin(GlobalConsensus(network)), - ReserveAssetDeposited(asset.clone().into()), - ClearOrigin, - ]; - - match dest_para_id { - Some(dest_para_id) => { - let dest_para_fee_asset: Asset = (Location::parent(), dest_para_fee).into(); - let bridge_location = Location::new(2, GlobalConsensus(network)); - - instructions.extend(vec![ - // After program finishes deposit any leftover assets to the snowbridge - // sovereign. - SetAppendix(Xcm(vec![DepositAsset { - assets: Wild(AllCounted(2)), - beneficiary: bridge_location, - }])), - // Perform a deposit reserve to send to destination chain. - DepositReserveAsset { - assets: Definite(vec![dest_para_fee_asset.clone(), asset].into()), - dest: Location::new(1, [Parachain(dest_para_id)]), - xcm: vec![ - // Buy execution on target. - BuyExecution { fees: dest_para_fee_asset, weight_limit: Unlimited }, - // Deposit assets to beneficiary. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - // Forward message id to destination parachain. - SetTopic(message_id.into()), - ] - .into(), - }, - ]); - }, - None => { - instructions.extend(vec![ - // Deposit both asset and fees to beneficiary so the fees will not get - // trapped. Another benefit is when fees left more than ED on AssetHub could be - // used to create the beneficiary account in case it does not exist. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - ]); - }, - } - - // Forward message id to Asset Hub. - instructions.push(SetTopic(message_id.into())); - - // The `instructions` to forward to AssetHub, and the `total_fees` to locally burn (since - // they are teleported within `instructions`). - (instructions.into(), total_fees.into()) - } - - // Convert ERC20 token address to a location that can be understood by Assets Hub. - fn convert_token_address(network: NetworkId, token: H160) -> Location { - Location::new( - 2, - [GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }], - ) - } - - /// Constructs an XCM message destined for AssetHub that withdraws assets from the sovereign - /// account of the Gateway contract and either deposits those assets into a recipient account or - /// forwards the assets to another parachain. - fn convert_send_native_token( - message_id: H256, - chain_id: u64, - token_id: TokenId, - destination: Destination, - amount: u128, - asset_hub_fee: u128, - ) -> Result<(Xcm<()>, Balance), ConvertMessageError> { - let network = Ethereum { chain_id }; - let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); - - let beneficiary = match destination { - // Final destination is a 32-byte account on AssetHub - Destination::AccountId32 { id } => - Ok(Location::new(0, [AccountId32 { network: None, id }])), - _ => Err(ConvertMessageError::InvalidDestination), - }?; - - let total_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); - - let asset_loc = - ConvertAssetId::convert(&token_id).ok_or(ConvertMessageError::InvalidToken)?; - - let mut reanchored_asset_loc = asset_loc.clone(); - reanchored_asset_loc - .reanchor(&GlobalAssetHubLocation::get(), &EthereumUniversalLocation::get()) - .map_err(|_| ConvertMessageError::CannotReanchor)?; - - let asset: Asset = (reanchored_asset_loc, amount).into(); - - let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); - - let instructions = vec![ - ReceiveTeleportedAsset(total_fee_asset.clone().into()), - BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, - DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), - UniversalOrigin(GlobalConsensus(network)), - WithdrawAsset(asset.clone().into()), - // Deposit both asset and fees to beneficiary so the fees will not get - // trapped. Another benefit is when fees left more than ED on AssetHub could be - // used to create the beneficiary account in case it does not exist. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - SetTopic(message_id.into()), - ]; - - // `total_fees` to burn on this chain when sending `instructions` to run on AH (which also - // teleport fees) - Ok((instructions.into(), asset_hub_fee.into())) - } -} - -#[cfg(test)] -mod tests { - use crate::inbound::{CallIndex, EthereumLocationsConverterFor}; - use frame_support::{assert_ok, parameter_types}; - use hex_literal::hex; - use xcm::prelude::*; - use xcm_executor::traits::ConvertLocation; - - const NETWORK: NetworkId = Ethereum { chain_id: 11155111 }; - - parameter_types! { - pub EthereumNetwork: NetworkId = NETWORK; - - pub const CreateAssetCall: CallIndex = [1, 1]; - pub const CreateAssetExecutionFee: u128 = 123; - pub const CreateAssetDeposit: u128 = 891; - pub const SendTokenExecutionFee: u128 = 592; - } - - #[test] - fn test_contract_location_with_network_converts_successfully() { - let expected_account: [u8; 32] = - hex!("ce796ae65569a670d0c1cc1ac12515a3ce21b5fbf729d63d7b289baad070139d"); - let contract_location = Location::new(2, [GlobalConsensus(NETWORK)]); - - let account = - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location) - .unwrap(); - - assert_eq!(account, expected_account); - } - - #[test] - fn test_contract_location_with_incorrect_location_fails_convert() { - let contract_location = Location::new(2, [GlobalConsensus(Polkadot), Parachain(1000)]); - - assert_eq!( - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location), - None, - ); - } - - #[test] - fn test_reanchor_all_assets() { - let ethereum_context: InteriorLocation = [GlobalConsensus(Ethereum { chain_id: 1 })].into(); - let ethereum = Location::new(2, ethereum_context.clone()); - let ah_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); - let global_ah = Location::new(1, ah_context.clone()); - let assets = vec![ - // DOT - Location::new(1, []), - // GLMR (Some Polkadot parachain currency) - Location::new(1, [Parachain(2004)]), - // AH asset - Location::new(0, [PalletInstance(50), GeneralIndex(42)]), - // KSM - Location::new(2, [GlobalConsensus(Kusama)]), - // KAR (Some Kusama parachain currency) - Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), - ]; - for asset in assets.iter() { - // reanchor logic in pallet_xcm on AH - let mut reanchored_asset = asset.clone(); - assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); - // reanchor back to original location in context of Ethereum - let mut reanchored_asset_with_ethereum_context = reanchored_asset.clone(); - assert_ok!( - reanchored_asset_with_ethereum_context.reanchor(&global_ah, ðereum_context) - ); - assert_eq!(reanchored_asset_with_ethereum_context, asset.clone()); - } - } -} diff --git a/bridges/snowbridge/primitives/router/src/inbound/v2.rs b/bridges/snowbridge/primitives/router/src/inbound/v2.rs deleted file mode 100644 index 73e5f5ada939..000000000000 --- a/bridges/snowbridge/primitives/router/src/inbound/v2.rs +++ /dev/null @@ -1,520 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -//! Converts messages from Ethereum to XCM messages - -use crate::inbound::{CallIndex, EthereumLocationsConverterFor}; -use codec::{Decode, Encode}; -use core::marker::PhantomData; -use frame_support::{traits::tokens::Balance as BalanceT, PalletError}; -use scale_info::TypeInfo; -use snowbridge_core::TokenId; -use sp_core::{Get, RuntimeDebug, H160, H256}; -use sp_runtime::{traits::MaybeEquivalence, MultiAddress}; -use sp_std::prelude::*; -use xcm::prelude::{Junction::AccountKey20, *}; - -const MINIMUM_DEPOSIT: u128 = 1; - -/// Messages from Ethereum are versioned. This is because in future, -/// we may want to evolve the protocol so that the ethereum side sends XCM messages directly. -/// Instead having BridgeHub transcode the messages into XCM. -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub enum VersionedMessage { - V1(MessageV1), -} - -/// For V1, the ethereum side sends messages which are transcoded into XCM. These messages are -/// self-contained, in that they can be transcoded using only information in the message. -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub struct MessageV1 { - /// EIP-155 chain id of the origin Ethereum network - pub chain_id: u64, - /// The command originating from the Gateway contract - pub command: Command, -} - -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub enum Command { - /// Register a wrapped token on the AssetHub `ForeignAssets` pallet - RegisterToken { - /// The address of the ERC20 token to be bridged over to AssetHub - token: H160, - /// XCM execution fee on AssetHub - fee: u128, - }, - /// Send Ethereum token to AssetHub or another parachain - SendToken { - /// The address of the ERC20 token to be bridged over to AssetHub - token: H160, - /// The destination for the transfer - destination: Destination, - /// Amount to transfer - amount: u128, - /// XCM execution fee on AssetHub - fee: u128, - }, - /// Send Polkadot token back to the original parachain - SendNativeToken { - /// The Id of the token - token_id: TokenId, - /// The destination for the transfer - destination: Destination, - /// Amount to transfer - amount: u128, - /// XCM execution fee on AssetHub - fee: u128, - }, -} - -/// Destination for bridged tokens -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub enum Destination { - /// The funds will be deposited into account `id` on AssetHub - AccountId32 { id: [u8; 32] }, - /// The funds will deposited into the sovereign account of destination parachain `para_id` on - /// AssetHub, Account `id` on the destination parachain will receive the funds via a - /// reserve-backed transfer. See - ForeignAccountId32 { - para_id: u32, - id: [u8; 32], - /// XCM execution fee on final destination - fee: u128, - }, - /// The funds will deposited into the sovereign account of destination parachain `para_id` on - /// AssetHub, Account `id` on the destination parachain will receive the funds via a - /// reserve-backed transfer. See - ForeignAccountId20 { - para_id: u32, - id: [u8; 20], - /// XCM execution fee on final destination - fee: u128, - }, -} - -pub struct MessageToXcm< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, -> where - CreateAssetCall: Get, - CreateAssetDeposit: Get, - Balance: BalanceT, - ConvertAssetId: MaybeEquivalence, - EthereumUniversalLocation: Get, - GlobalAssetHubLocation: Get, -{ - _phantom: PhantomData<( - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - )>, -} - -/// Reason why a message conversion failed. -#[derive(Copy, Clone, TypeInfo, PalletError, Encode, Decode, RuntimeDebug)] -pub enum ConvertMessageError { - /// The message version is not supported for conversion. - UnsupportedVersion, - InvalidDestination, - InvalidToken, - /// The fee asset is not supported for conversion. - UnsupportedFeeAsset, - CannotReanchor, -} - -/// convert the inbound message to xcm which will be forwarded to the destination chain -pub trait ConvertMessage { - type Balance: BalanceT + From; - type AccountId; - /// Converts a versioned message into an XCM message and an optional topicID - fn convert( - message_id: H256, - message: VersionedMessage, - ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError>; -} - -impl< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > ConvertMessage - for MessageToXcm< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > -where - CreateAssetCall: Get, - CreateAssetDeposit: Get, - InboundQueuePalletInstance: Get, - Balance: BalanceT + From, - AccountId: Into<[u8; 32]>, - ConvertAssetId: MaybeEquivalence, - EthereumUniversalLocation: Get, - GlobalAssetHubLocation: Get, -{ - type Balance = Balance; - type AccountId = AccountId; - - fn convert( - message_id: H256, - message: VersionedMessage, - ) -> Result<(Xcm<()>, Self::Balance), ConvertMessageError> { - use Command::*; - use VersionedMessage::*; - match message { - V1(MessageV1 { chain_id, command: RegisterToken { token, fee } }) => - Ok(Self::convert_register_token(message_id, chain_id, token, fee)), - V1(MessageV1 { chain_id, command: SendToken { token, destination, amount, fee } }) => - Ok(Self::convert_send_token(message_id, chain_id, token, destination, amount, fee)), - V1(MessageV1 { - chain_id, - command: SendNativeToken { token_id, destination, amount, fee }, - }) => Self::convert_send_native_token( - message_id, - chain_id, - token_id, - destination, - amount, - fee, - ), - } - } -} - -impl< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > - MessageToXcm< - CreateAssetCall, - CreateAssetDeposit, - InboundQueuePalletInstance, - AccountId, - Balance, - ConvertAssetId, - EthereumUniversalLocation, - GlobalAssetHubLocation, - > -where - CreateAssetCall: Get, - CreateAssetDeposit: Get, - InboundQueuePalletInstance: Get, - Balance: BalanceT + From, - AccountId: Into<[u8; 32]>, - ConvertAssetId: MaybeEquivalence, - EthereumUniversalLocation: Get, - GlobalAssetHubLocation: Get, -{ - fn convert_register_token( - message_id: H256, - chain_id: u64, - token: H160, - fee: u128, - ) -> (Xcm<()>, Balance) { - let network = Ethereum { chain_id }; - let xcm_fee: Asset = (Location::parent(), fee).into(); - let deposit: Asset = (Location::parent(), CreateAssetDeposit::get()).into(); - - let total_amount = fee + CreateAssetDeposit::get(); - let total: Asset = (Location::parent(), total_amount).into(); - - let bridge_location = Location::new(2, GlobalConsensus(network)); - - let owner = EthereumLocationsConverterFor::<[u8; 32]>::from_chain_id(&chain_id); - let asset_id = Self::convert_token_address(network, token); - let create_call_index: [u8; 2] = CreateAssetCall::get(); - let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); - - let xcm: Xcm<()> = vec![ - // Teleport required fees. - ReceiveTeleportedAsset(total.into()), - // Pay for execution. - BuyExecution { fees: xcm_fee, weight_limit: Unlimited }, - // Fund the snowbridge sovereign with the required deposit for creation. - DepositAsset { assets: Definite(deposit.into()), beneficiary: bridge_location.clone() }, - // This `SetAppendix` ensures that `xcm_fee` not spent by `Transact` will be - // deposited to snowbridge sovereign, instead of being trapped, regardless of - // `Transact` success or not. - SetAppendix(Xcm(vec![ - RefundSurplus, - DepositAsset { assets: AllCounted(1).into(), beneficiary: bridge_location }, - ])), - // Only our inbound-queue pallet is allowed to invoke `UniversalOrigin`. - DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), - // Change origin to the bridge. - UniversalOrigin(GlobalConsensus(network)), - // Call create_asset on foreign assets pallet. - Transact { - origin_kind: OriginKind::Xcm, - call: ( - create_call_index, - asset_id, - MultiAddress::<[u8; 32], ()>::Id(owner), - MINIMUM_DEPOSIT, - ) - .encode() - .into(), - }, - // Forward message id to Asset Hub - SetTopic(message_id.into()), - // Once the program ends here, appendix program will run, which will deposit any - // leftover fee to snowbridge sovereign. - ] - .into(); - - (xcm, total_amount.into()) - } - - fn convert_send_token( - message_id: H256, - chain_id: u64, - token: H160, - destination: Destination, - amount: u128, - asset_hub_fee: u128, - ) -> (Xcm<()>, Balance) { - let network = Ethereum { chain_id }; - let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); - let asset: Asset = (Self::convert_token_address(network, token), amount).into(); - - let (dest_para_id, beneficiary, dest_para_fee) = match destination { - // Final destination is a 32-byte account on AssetHub - Destination::AccountId32 { id } => - (None, Location::new(0, [AccountId32 { network: None, id }]), 0), - // Final destination is a 32-byte account on a sibling of AssetHub - Destination::ForeignAccountId32 { para_id, id, fee } => ( - Some(para_id), - Location::new(0, [AccountId32 { network: None, id }]), - // Total fee needs to cover execution on AssetHub and Sibling - fee, - ), - // Final destination is a 20-byte account on a sibling of AssetHub - Destination::ForeignAccountId20 { para_id, id, fee } => ( - Some(para_id), - Location::new(0, [AccountKey20 { network: None, key: id }]), - // Total fee needs to cover execution on AssetHub and Sibling - fee, - ), - }; - - let total_fees = asset_hub_fee.saturating_add(dest_para_fee); - let total_fee_asset: Asset = (Location::parent(), total_fees).into(); - let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); - - let mut instructions = vec![ - ReceiveTeleportedAsset(total_fee_asset.into()), - BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, - DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), - UniversalOrigin(GlobalConsensus(network)), - ReserveAssetDeposited(asset.clone().into()), - ClearOrigin, - ]; - - match dest_para_id { - Some(dest_para_id) => { - let dest_para_fee_asset: Asset = (Location::parent(), dest_para_fee).into(); - let bridge_location = Location::new(2, GlobalConsensus(network)); - - instructions.extend(vec![ - // After program finishes deposit any leftover assets to the snowbridge - // sovereign. - SetAppendix(Xcm(vec![DepositAsset { - assets: Wild(AllCounted(2)), - beneficiary: bridge_location, - }])), - // Perform a deposit reserve to send to destination chain. - DepositReserveAsset { - assets: Definite(vec![dest_para_fee_asset.clone(), asset].into()), - dest: Location::new(1, [Parachain(dest_para_id)]), - xcm: vec![ - // Buy execution on target. - BuyExecution { fees: dest_para_fee_asset, weight_limit: Unlimited }, - // Deposit assets to beneficiary. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - // Forward message id to destination parachain. - SetTopic(message_id.into()), - ] - .into(), - }, - ]); - }, - None => { - instructions.extend(vec![ - // Deposit both asset and fees to beneficiary so the fees will not get - // trapped. Another benefit is when fees left more than ED on AssetHub could be - // used to create the beneficiary account in case it does not exist. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - ]); - }, - } - - // Forward message id to Asset Hub. - instructions.push(SetTopic(message_id.into())); - - // The `instructions` to forward to AssetHub, and the `total_fees` to locally burn (since - // they are teleported within `instructions`). - (instructions.into(), total_fees.into()) - } - - // Convert ERC20 token address to a location that can be understood by Assets Hub. - fn convert_token_address(network: NetworkId, token: H160) -> Location { - Location::new( - 2, - [GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }], - ) - } - - /// Constructs an XCM message destined for AssetHub that withdraws assets from the sovereign - /// account of the Gateway contract and either deposits those assets into a recipient account or - /// forwards the assets to another parachain. - fn convert_send_native_token( - message_id: H256, - chain_id: u64, - token_id: TokenId, - destination: Destination, - amount: u128, - asset_hub_fee: u128, - ) -> Result<(Xcm<()>, Balance), ConvertMessageError> { - let network = Ethereum { chain_id }; - let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); - - let beneficiary = match destination { - // Final destination is a 32-byte account on AssetHub - Destination::AccountId32 { id } => - Ok(Location::new(0, [AccountId32 { network: None, id }])), - _ => Err(ConvertMessageError::InvalidDestination), - }?; - - let total_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); - - let asset_loc = - ConvertAssetId::convert(&token_id).ok_or(ConvertMessageError::InvalidToken)?; - - let mut reanchored_asset_loc = asset_loc.clone(); - reanchored_asset_loc - .reanchor(&GlobalAssetHubLocation::get(), &EthereumUniversalLocation::get()) - .map_err(|_| ConvertMessageError::CannotReanchor)?; - - let asset: Asset = (reanchored_asset_loc, amount).into(); - - let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); - - let instructions = vec![ - ReceiveTeleportedAsset(total_fee_asset.clone().into()), - BuyExecution { fees: asset_hub_fee_asset, weight_limit: Unlimited }, - DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), - UniversalOrigin(GlobalConsensus(network)), - WithdrawAsset(asset.clone().into()), - // Deposit both asset and fees to beneficiary so the fees will not get - // trapped. Another benefit is when fees left more than ED on AssetHub could be - // used to create the beneficiary account in case it does not exist. - DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, - SetTopic(message_id.into()), - ]; - - // `total_fees` to burn on this chain when sending `instructions` to run on AH (which also - // teleport fees) - Ok((instructions.into(), asset_hub_fee.into())) - } -} - -#[cfg(test)] -mod tests { - use crate::inbound::{CallIndex, EthereumLocationsConverterFor}; - use frame_support::{assert_ok, parameter_types}; - use hex_literal::hex; - use xcm::prelude::*; - use xcm_executor::traits::ConvertLocation; - - const NETWORK: NetworkId = Ethereum { chain_id: 11155111 }; - - parameter_types! { - pub EthereumNetwork: NetworkId = NETWORK; - - pub const CreateAssetCall: CallIndex = [1, 1]; - pub const CreateAssetExecutionFee: u128 = 123; - pub const CreateAssetDeposit: u128 = 891; - pub const SendTokenExecutionFee: u128 = 592; - } - - #[test] - fn test_contract_location_with_network_converts_successfully() { - let expected_account: [u8; 32] = - hex!("ce796ae65569a670d0c1cc1ac12515a3ce21b5fbf729d63d7b289baad070139d"); - let contract_location = Location::new(2, [GlobalConsensus(NETWORK)]); - - let account = - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location) - .unwrap(); - - assert_eq!(account, expected_account); - } - - #[test] - fn test_contract_location_with_incorrect_location_fails_convert() { - let contract_location = Location::new(2, [GlobalConsensus(Polkadot), Parachain(1000)]); - - assert_eq!( - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location), - None, - ); - } - - #[test] - fn test_reanchor_all_assets() { - let ethereum_context: InteriorLocation = [GlobalConsensus(Ethereum { chain_id: 1 })].into(); - let ethereum = Location::new(2, ethereum_context.clone()); - let ah_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); - let global_ah = Location::new(1, ah_context.clone()); - let assets = vec![ - // DOT - Location::new(1, []), - // GLMR (Some Polkadot parachain currency) - Location::new(1, [Parachain(2004)]), - // AH asset - Location::new(0, [PalletInstance(50), GeneralIndex(42)]), - // KSM - Location::new(2, [GlobalConsensus(Kusama)]), - // KAR (Some Kusama parachain currency) - Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), - ]; - for asset in assets.iter() { - // reanchor logic in pallet_xcm on AH - let mut reanchored_asset = asset.clone(); - assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); - // reanchor back to original location in context of Ethereum - let mut reanchored_asset_with_ethereum_context = reanchored_asset.clone(); - assert_ok!( - reanchored_asset_with_ethereum_context.reanchor(&global_ah, ðereum_context) - ); - assert_eq!(reanchored_asset_with_ethereum_context, asset.clone()); - } - } -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index 6921f0e870f2..6a6809763471 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -22,8 +22,7 @@ use hex_literal::hex; use rococo_westend_system_emulated_network::asset_hub_westend_emulated_chain::genesis::AssetHubWestendAssetOwner; use snowbridge_core::{outbound::OperatingMode, AssetMetadata, TokenIdOf}; use snowbridge_router_primitives::inbound::{ - v1::{Command, Destination, MessageV1, VersionedMessage}, - EthereumLocationsConverterFor, + Command, Destination, EthereumLocationsConverterFor, MessageV1, VersionedMessage, }; use sp_core::H256; use testnet_parachains_constants::westend::snowbridge::EthereumNetwork; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs index 801e6470512e..4af0e08418c8 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs @@ -25,8 +25,8 @@ use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards}; use snowbridge_outbound_router_primitives::v1::EthereumBlobExporter; -use snowbridge_router_primitives::inbound::v1::MessageToXcm; -use sp_core::{H160, H256}; +use snowbridge_router_primitives::inbound::MessageToXcm; +use sp_core::H160; use testnet_parachains_constants::rococo::{ currency::*, fee::WeightToFee, @@ -38,10 +38,6 @@ use crate::xcm_config::RelayNetwork; use benchmark_helpers::DoNothingRouter; use frame_support::{parameter_types, weights::ConstantMultiplier}; use pallet_xcm::EnsureXcm; -use snowbridge_core::outbound::{ - v2::{Message, SendMessage}, - SendError, SendMessageFeeProvider, -}; use sp_runtime::{ traits::{ConstU32, ConstU8, Keccak256}, FixedU128, @@ -182,29 +178,6 @@ impl snowbridge_pallet_ethereum_client::Config for Runtime { type WeightInfo = crate::weights::snowbridge_pallet_ethereum_client::WeightInfo; } -pub struct DefaultOutboundQueue; -impl SendMessage for DefaultOutboundQueue { - type Ticket = (); - - type Balance = Balance; - - fn validate(_: &Message) -> Result<(Self::Ticket, Self::Balance), SendError> { - Ok(((), Default::default())) - } - - fn deliver(_: Self::Ticket) -> Result { - Ok(H256::zero()) - } -} - -impl SendMessageFeeProvider for DefaultOutboundQueue { - type Balance = Balance; - - fn local_fee() -> Self::Balance { - Default::default() - } -} - impl snowbridge_pallet_system::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OutboundQueue = EthereumOutboundQueue; @@ -219,7 +192,6 @@ impl snowbridge_pallet_system::Config for Runtime { type InboundDeliveryCost = EthereumInboundQueue; type UniversalLocation = UniversalLocation; type EthereumLocation = EthereumLocation; - type OutboundQueueV2 = DefaultOutboundQueue; } #[cfg(feature = "runtime-benchmarks")] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index a3fed13fe384..4ec6ff5228cf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -19,8 +19,8 @@ use crate::XcmRouter; use crate::{ xcm_config, xcm_config::{TreasuryAccount, UniversalLocation}, - Balances, EthereumInboundQueue, EthereumOutboundQueue, EthereumOutboundQueueV2, EthereumSystem, - MessageQueue, Runtime, RuntimeEvent, TransactionByteFee, + Balances, EthereumInboundQueue, EthereumOutboundQueue, EthereumSystem, MessageQueue, Runtime, + RuntimeEvent, TransactionByteFee, }; use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; @@ -28,7 +28,7 @@ use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards} use snowbridge_outbound_router_primitives::{ v1::EthereumBlobExporter, v2::EthereumBlobExporter as EthereumBlobExporterV2, }; -use snowbridge_router_primitives::inbound::v1::MessageToXcm; +use snowbridge_router_primitives::inbound::MessageToXcm; use sp_core::H160; use testnet_parachains_constants::westend::{ currency::*, @@ -224,7 +224,6 @@ impl snowbridge_pallet_system::Config for Runtime { type InboundDeliveryCost = EthereumInboundQueue; type UniversalLocation = UniversalLocation; type EthereumLocation = EthereumLocation; - type OutboundQueueV2 = EthereumOutboundQueueV2; } #[cfg(feature = "runtime-benchmarks")] From 992740bcd03089111cfc3fd3290393dd1e7d34a5 Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 29 Nov 2024 10:54:34 +0800 Subject: [PATCH 32/68] Rename test --- .../bridges/bridge-hub-westend/src/tests/mod.rs | 2 +- .../bridge-hub-westend/src/tests/snowbridge.rs | 14 ++++++++++++-- ...{snowbridge_v2.rs => snowbridge_v2_outbound.rs} | 0 3 files changed, 13 insertions(+), 3 deletions(-) rename cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/{snowbridge_v2.rs => snowbridge_v2_outbound.rs} (100%) diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index cd826e3bfb29..4c49614c6a96 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -20,7 +20,7 @@ mod claim_assets; mod register_bridged_assets; mod send_xcm; mod snowbridge; -mod snowbridge_v2; +mod snowbridge_v2_outbound; mod teleport; mod transact; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index 6a6809763471..ffa60a4f52e7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -256,6 +256,7 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { }); BridgeHubWestend::execute_with(|| { + use bridge_hub_westend_runtime::xcm_config::TreasuryAccount; type RuntimeEvent = ::RuntimeEvent; // Check that the transfer token back to Ethereum message was queue in the Ethereum // Outbound Queue @@ -264,12 +265,21 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { vec![RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageQueued{ .. }) => {},] ); let events = BridgeHubWestend::events(); + // Check that the local fee was credited to the Snowbridge sovereign account + assert!( + events.iter().any(|event| matches!( + event, + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) + if *who == TreasuryAccount::get().into() && *amount == 5071000000 + )), + "Snowbridge sovereign takes local fee." + ); // Check that the remote fee was credited to the AssetHub sovereign account assert!( events.iter().any(|event| matches!( event, - RuntimeEvent::Balances(pallet_balances::Event::Minted { who,.. }) - if *who == assethub_sovereign + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) + if *who == assethub_sovereign && *amount == 2680000000000, )), "AssetHub sovereign takes remote fee." ); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs From 0c8badf0b2e44ed727366e53175295855a015d8e Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 29 Nov 2024 13:31:19 +0800 Subject: [PATCH 33/68] Reorgnize code layout --- Cargo.lock | 35 +++++++++++- Cargo.toml | 2 + .../pallets/outbound-queue-v2/Cargo.toml | 5 +- .../outbound-queue-v2/runtime-api/Cargo.toml | 2 + .../outbound-queue-v2/runtime-api/src/lib.rs | 2 +- .../pallets/outbound-queue-v2/src/api.rs | 4 +- .../outbound-queue-v2/src/benchmarking.rs | 6 +- .../pallets/outbound-queue-v2/src/lib.rs | 8 +-- .../pallets/outbound-queue-v2/src/mock.rs | 2 +- .../src/send_message_impl.rs | 2 +- .../pallets/outbound-queue-v2/src/test.rs | 10 ++-- .../pallets/outbound-queue/Cargo.toml | 4 +- .../outbound-queue/runtime-api/Cargo.toml | 2 + .../outbound-queue/runtime-api/src/lib.rs | 6 +- .../pallets/outbound-queue/src/api.rs | 6 +- .../outbound-queue/src/benchmarking.rs | 6 +- .../pallets/outbound-queue/src/lib.rs | 8 +-- .../pallets/outbound-queue/src/mock.rs | 2 +- .../outbound-queue/src/send_message_impl.rs | 10 ++-- .../pallets/outbound-queue/src/test.rs | 10 ++-- bridges/snowbridge/pallets/system/Cargo.toml | 2 + .../pallets/system/src/benchmarking.rs | 3 +- bridges/snowbridge/pallets/system/src/lib.rs | 11 ++-- bridges/snowbridge/pallets/system/src/mock.rs | 5 +- bridges/snowbridge/primitives/core/Cargo.toml | 3 - bridges/snowbridge/primitives/core/src/lib.rs | 1 - .../primitives/core/src/operating_mode.rs | 1 - .../primitives/outbound-router/Cargo.toml | 9 +-- .../primitives/outbound-router/src/lib.rs | 1 - .../primitives/outbound-router/src/v1/mod.rs | 6 +- .../outbound-router/src/v1/tests.rs | 6 +- .../outbound-router/src/v2/convert.rs | 10 ++-- .../primitives/outbound-router/src/v2/mod.rs | 3 +- .../outbound-router/src/v2/tests.rs | 10 ++-- .../snowbridge/primitives/outbound/Cargo.toml | 56 +++++++++++++++++++ .../snowbridge/primitives/outbound/README.md | 4 ++ .../outbound/mod.rs => outbound/src/lib.rs} | 9 +-- .../{core/src/outbound => outbound/src}/v1.rs | 7 +-- .../{core/src/outbound => outbound/src}/v2.rs | 4 +- .../runtime/runtime-common/Cargo.toml | 2 + .../runtime/runtime-common/src/lib.rs | 2 +- .../runtime/runtime-common/src/tests.rs | 2 +- .../bridges/bridge-hub-rococo/Cargo.toml | 1 + .../bridge-hub-rococo/src/tests/snowbridge.rs | 3 +- .../bridges/bridge-hub-westend/Cargo.toml | 1 + .../src/tests/snowbridge.rs | 3 +- .../src/tests/snowbridge_v2_outbound.rs | 3 +- .../assets/asset-hub-westend/Cargo.toml | 1 - .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 3 +- .../src/bridge_to_ethereum_config.rs | 2 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 6 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 3 +- .../src/bridge_to_ethereum_config.rs | 7 ++- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 12 ++-- umbrella/Cargo.toml | 1 - 55 files changed, 206 insertions(+), 129 deletions(-) create mode 100644 bridges/snowbridge/primitives/outbound/Cargo.toml create mode 100644 bridges/snowbridge/primitives/outbound/README.md rename bridges/snowbridge/primitives/{core/src/outbound/mod.rs => outbound/src/lib.rs} (96%) rename bridges/snowbridge/primitives/{core/src/outbound => outbound/src}/v1.rs (99%) rename bridges/snowbridge/primitives/{core/src/outbound => outbound/src}/v2.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 0ec70bb40a92..53c7032a154c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2512,6 +2512,7 @@ dependencies = [ "rococo-westend-system-emulated-network", "scale-info", "snowbridge-core 0.2.0", + "snowbridge-outbound-primitives", "snowbridge-pallet-inbound-queue-fixtures 0.10.0", "snowbridge-pallet-outbound-queue 0.2.0", "snowbridge-pallet-system 0.2.0", @@ -2594,6 +2595,7 @@ dependencies = [ "snowbridge-beacon-primitives 0.2.0", "snowbridge-core 0.2.0", "snowbridge-merkle-tree", + "snowbridge-outbound-primitives", "snowbridge-outbound-queue-runtime-api 0.2.0", "snowbridge-outbound-router-primitives", "snowbridge-pallet-ethereum-client 0.2.0", @@ -2753,6 +2755,7 @@ dependencies = [ "rococo-westend-system-emulated-network", "scale-info", "snowbridge-core 0.2.0", + "snowbridge-outbound-primitives", "snowbridge-outbound-router-primitives", "snowbridge-pallet-inbound-queue 0.2.0", "snowbridge-pallet-inbound-queue-fixtures 0.10.0", @@ -2835,6 +2838,7 @@ dependencies = [ "snowbridge-beacon-primitives 0.2.0", "snowbridge-core 0.2.0", "snowbridge-merkle-tree", + "snowbridge-outbound-primitives", "snowbridge-outbound-queue-runtime-api 0.2.0", "snowbridge-outbound-queue-runtime-api-v2", "snowbridge-outbound-router-primitives", @@ -24743,7 +24747,6 @@ dependencies = [ "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "scale-info", - "serde", "snowbridge-beacon-primitives 0.2.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", @@ -24852,6 +24855,29 @@ dependencies = [ "zeroize", ] +[[package]] +name = "snowbridge-outbound-primitives" +version = "0.2.0" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "ethabi-decode 2.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "hex", + "hex-literal", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "scale-info", + "snowbridge-core 0.2.0", + "sp-arithmetic 23.0.0", + "sp-core 28.0.0", + "sp-std 14.0.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + [[package]] name = "snowbridge-outbound-queue-merkle-tree" version = "0.9.1" @@ -24872,6 +24898,7 @@ dependencies = [ "parity-scale-codec", "snowbridge-core 0.2.0", "snowbridge-merkle-tree", + "snowbridge-outbound-primitives", "sp-api 26.0.0", "sp-std 14.0.0", ] @@ -24899,6 +24926,7 @@ dependencies = [ "scale-info", "snowbridge-core 0.2.0", "snowbridge-merkle-tree", + "snowbridge-outbound-primitives", "sp-api 26.0.0", "sp-std 14.0.0", "staging-xcm 7.0.0", @@ -24914,6 +24942,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "snowbridge-core 0.2.0", + "snowbridge-outbound-primitives", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", @@ -25095,6 +25124,7 @@ dependencies = [ "serde", "snowbridge-core 0.2.0", "snowbridge-merkle-tree", + "snowbridge-outbound-primitives", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", @@ -25144,6 +25174,7 @@ dependencies = [ "serde", "snowbridge-core 0.2.0", "snowbridge-merkle-tree", + "snowbridge-outbound-primitives", "snowbridge-outbound-router-primitives", "sp-arithmetic 23.0.0", "sp-core 28.0.0", @@ -25172,6 +25203,7 @@ dependencies = [ "polkadot-primitives 7.0.0", "scale-info", "snowbridge-core 0.2.0", + "snowbridge-outbound-primitives", "snowbridge-pallet-outbound-queue 0.2.0", "sp-core 28.0.0", "sp-io 30.0.0", @@ -25249,6 +25281,7 @@ dependencies = [ "log", "parity-scale-codec", "snowbridge-core 0.2.0", + "snowbridge-outbound-primitives", "sp-arithmetic 23.0.0", "sp-std 14.0.0", "staging-xcm 7.0.0", diff --git a/Cargo.toml b/Cargo.toml index b753c867b51e..4e6e30552433 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,6 +60,7 @@ members = [ "bridges/snowbridge/primitives/core", "bridges/snowbridge/primitives/ethereum", "bridges/snowbridge/primitives/merkle-tree", + "bridges/snowbridge/primitives/outbound", "bridges/snowbridge/primitives/outbound-router", "bridges/snowbridge/primitives/router", "bridges/snowbridge/runtime/runtime-common", @@ -1226,6 +1227,7 @@ snowbridge-beacon-primitives = { path = "bridges/snowbridge/primitives/beacon", snowbridge-core = { path = "bridges/snowbridge/primitives/core", default-features = false } snowbridge-ethereum = { path = "bridges/snowbridge/primitives/ethereum", default-features = false } snowbridge-merkle-tree = { path = "bridges/snowbridge/primitives/merkle-tree", default-features = false } +snowbridge-outbound-primitives = { path = "bridges/snowbridge/primitives/outbound", default-features = false } snowbridge-outbound-queue-runtime-api = { path = "bridges/snowbridge/pallets/outbound-queue/runtime-api", default-features = false } snowbridge-outbound-queue-runtime-api-v2 = { path = "bridges/snowbridge/pallets/outbound-queue-v2/runtime-api", default-features = false } snowbridge-outbound-router-primitives = { path = "bridges/snowbridge/primitives/outbound-router", default-features = false } diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml index ac8dee02f116..1f5c6c84c766 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue-v2/Cargo.toml @@ -32,11 +32,12 @@ sp-arithmetic = { workspace = true } bridge-hub-common = { workspace = true } -snowbridge-core = { features = ["serde"], workspace = true } +snowbridge-core = { workspace = true } ethabi = { workspace = true } hex-literal = { workspace = true, default-features = true } snowbridge-merkle-tree = { workspace = true } snowbridge-outbound-router-primitives = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } xcm-builder = { workspace = true } @@ -61,6 +62,7 @@ std = [ "serde/std", "snowbridge-core/std", "snowbridge-merkle-tree/std", + "snowbridge-outbound-primitives/std", "snowbridge-outbound-router-primitives/std", "sp-arithmetic/std", "sp-core/std", @@ -79,7 +81,6 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", - "snowbridge-outbound-router-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/Cargo.toml index 14f4a8d18c19..8d416b667df1 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/Cargo.toml @@ -22,6 +22,7 @@ sp-api = { workspace = true } frame-support = { workspace = true } snowbridge-core = { workspace = true } snowbridge-merkle-tree = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } xcm = { workspace = true } [features] @@ -32,6 +33,7 @@ std = [ "scale-info/std", "snowbridge-core/std", "snowbridge-merkle-tree/std", + "snowbridge-outbound-primitives/std", "sp-api/std", "sp-std/std", "xcm/std", diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs index f2c88658c23f..955c37892e7e 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/runtime-api/src/lib.rs @@ -3,8 +3,8 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::traits::tokens::Balance as BalanceT; -use snowbridge_core::outbound::{v2::abi::InboundMessage, DryRunError}; use snowbridge_merkle_tree::MerkleProof; +use snowbridge_outbound_primitives::{v2::abi::InboundMessage, DryRunError}; use xcm::prelude::Xcm; sp_api::decl_runtime_apis! { diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs index 75e51be90112..2b046ed0b883 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/api.rs @@ -4,14 +4,14 @@ use crate::{Config, MessageLeaves}; use frame_support::storage::StorageStreamIter; -use snowbridge_core::outbound::{ +use snowbridge_merkle_tree::{merkle_proof, MerkleProof}; +use snowbridge_outbound_primitives::{ v2::{ abi::{CommandWrapper, InboundMessage}, GasMeter, Message, }, DryRunError, }; -use snowbridge_merkle_tree::{merkle_proof, MerkleProof}; use snowbridge_outbound_router_primitives::v2::convert::XcmConverter; use sp_core::Get; use sp_std::{default::Default, vec::Vec}; diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/benchmarking.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/benchmarking.rs index f6e02844a58d..80ce44532921 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/benchmarking.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/benchmarking.rs @@ -5,10 +5,8 @@ use super::*; use bridge_hub_common::AggregateMessageOrigin; use codec::Encode; use frame_benchmarking::v2::*; -use snowbridge_core::{ - outbound::v1::{Command, Initializer, QueuedMessage}, - ChannelId, -}; +use snowbridge_core::ChannelId; +use snowbridge_outbound_primitives::v1::{Command, Initializer, QueuedMessage}; use sp_core::{H160, H256}; #[allow(unused_imports)] diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs index 6b669a75e5c9..3fdc838e3039 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/lib.rs @@ -73,13 +73,13 @@ use frame_support::{ pub use pallet::*; use snowbridge_core::{ inbound::{Message as DeliveryMessage, VerificationError, Verifier}, - outbound::v2::{ - abi::{CommandWrapper, InboundMessage, InboundMessageWrapper}, - GasMeter, Message, - }, BasicOperatingMode, RewardLedger, TokenId, }; use snowbridge_merkle_tree::merkle_root; +use snowbridge_outbound_primitives::v2::{ + abi::{CommandWrapper, InboundMessage, InboundMessageWrapper}, + GasMeter, Message, +}; use sp_core::{H160, H256}; use sp_runtime::{ traits::{BlockNumberProvider, Hash, MaybeEquivalence}, diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs index 2215f388b70d..8f3c53c64471 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/mock.rs @@ -14,10 +14,10 @@ use snowbridge_core::{ gwei, inbound::{Log, Proof, VerificationError, Verifier}, meth, - outbound::v2::*, pricing::{PricingParameters, Rewards}, ParaId, }; +use snowbridge_outbound_primitives::v2::*; use sp_core::{ConstU32, H160, H256}; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup, Keccak256}, diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs index 97188c9c4bc2..6c9a34c3d53a 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/send_message_impl.rs @@ -8,7 +8,7 @@ use frame_support::{ ensure, traits::{EnqueueMessage, Get}, }; -use snowbridge_core::outbound::{ +use snowbridge_outbound_primitives::{ v2::{primary_governance_origin, Message, SendMessage}, SendError, SendMessageFeeProvider, }; diff --git a/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs b/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs index abbbfd64f54a..8f53485328d1 100644 --- a/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs +++ b/bridges/snowbridge/pallets/outbound-queue-v2/src/test.rs @@ -11,12 +11,10 @@ use frame_support::{ }; use codec::Encode; -use snowbridge_core::{ - outbound::{ - v2::{abi::InboundMessageWrapper, primary_governance_origin, Command, SendMessage}, - SendError, - }, - ChannelId, ParaId, +use snowbridge_core::{ChannelId, ParaId}; +use snowbridge_outbound_primitives::{ + v2::{abi::InboundMessageWrapper, primary_governance_origin, Command, SendMessage}, + SendError, }; use sp_core::{hexdisplay::HexDisplay, H256}; diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 5aa10e69a01e..f0316409ab1e 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -30,7 +30,8 @@ sp-arithmetic = { workspace = true } bridge-hub-common = { workspace = true } -snowbridge-core = { features = ["serde"], workspace = true } +snowbridge-core = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } snowbridge-merkle-tree = { workspace = true } ethabi = { workspace = true } @@ -52,6 +53,7 @@ std = [ "serde/std", "snowbridge-core/std", "snowbridge-merkle-tree/std", + "snowbridge-outbound-primitives/std", "sp-arithmetic/std", "sp-core/std", "sp-io/std", diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index f050db9378a9..132dcf6235c7 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -21,6 +21,7 @@ sp-api = { workspace = true } frame-support = { workspace = true } snowbridge-merkle-tree = { workspace = true } snowbridge-core = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } [features] default = ["std"] @@ -29,6 +30,7 @@ std = [ "frame-support/std", "snowbridge-core/std", "snowbridge-merkle-tree/std", + "snowbridge-outbound-primitives/std", "sp-api/std", "sp-std/std", ] diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs index ecd2de682268..cd25f7169bce 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs @@ -3,11 +3,9 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::traits::tokens::Balance as BalanceT; -use snowbridge_core::{ - outbound::v1::{Command, Fee}, - PricingParameters, -}; +use snowbridge_core::PricingParameters; use snowbridge_merkle_tree::MerkleProof; +use snowbridge_outbound_primitives::v1::{Command, Fee}; sp_api::decl_runtime_apis! { pub trait OutboundQueueApi where Balance: BalanceT diff --git a/bridges/snowbridge/pallets/outbound-queue/src/api.rs b/bridges/snowbridge/pallets/outbound-queue/src/api.rs index 08f4f1561968..af2880a67110 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/api.rs @@ -4,11 +4,9 @@ use crate::{Config, MessageLeaves}; use frame_support::storage::StorageStreamIter; -use snowbridge_core::{ - outbound::v1::{Command, Fee, GasMeter}, - PricingParameters, -}; +use snowbridge_core::PricingParameters; use snowbridge_merkle_tree::{merkle_proof, MerkleProof}; +use snowbridge_outbound_primitives::v1::{Command, Fee, GasMeter}; use sp_core::Get; pub fn prove_message(leaf_index: u64) -> Option diff --git a/bridges/snowbridge/pallets/outbound-queue/src/benchmarking.rs b/bridges/snowbridge/pallets/outbound-queue/src/benchmarking.rs index 0eff490b1ae4..99e7ce642aac 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/benchmarking.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/benchmarking.rs @@ -5,10 +5,8 @@ use super::*; use bridge_hub_common::AggregateMessageOrigin; use codec::Encode; use frame_benchmarking::v2::*; -use snowbridge_core::{ - outbound::v1::{Command, Initializer}, - ChannelId, -}; +use snowbridge_core::ChannelId; +use snowbridge_outbound_primitives::v1::{Command, Initializer}; use sp_core::{H160, H256}; #[allow(unused_imports)] diff --git a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs index feb86bce5dd8..08a8937fbc9b 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs @@ -110,11 +110,11 @@ use frame_support::{ traits::{tokens::Balance, Contains, Defensive, EnqueueMessage, Get, ProcessMessageError}, weights::{Weight, WeightToFee}, }; -use snowbridge_core::{ - outbound::v1::{Fee, GasMeter, QueuedMessage, VersionedQueuedMessage, ETHER_DECIMALS}, - BasicOperatingMode, ChannelId, -}; +use snowbridge_core::{BasicOperatingMode, ChannelId}; use snowbridge_merkle_tree::merkle_root; +use snowbridge_outbound_primitives::v1::{ + Fee, GasMeter, QueuedMessage, VersionedQueuedMessage, ETHER_DECIMALS, +}; use sp_core::{H256, U256}; use sp_runtime::{ traits::{CheckedDiv, Hash}, diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index d7bc4a8bcb5d..aae6bbca3adb 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -10,10 +10,10 @@ use frame_support::{ use snowbridge_core::{ gwei, meth, - outbound::v1::*, pricing::{PricingParameters, Rewards}, ParaId, PRIMARY_GOVERNANCE_CHANNEL, }; +use snowbridge_outbound_primitives::v1::*; use sp_core::{ConstU32, ConstU8, H160, H256}; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup, Keccak256}, diff --git a/bridges/snowbridge/pallets/outbound-queue/src/send_message_impl.rs b/bridges/snowbridge/pallets/outbound-queue/src/send_message_impl.rs index 39b41b1c792a..f3b79cdf91c4 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/send_message_impl.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/send_message_impl.rs @@ -10,12 +10,10 @@ use frame_support::{ CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use frame_system::unique; -use snowbridge_core::{ - outbound::{ - v1::{Fee, Message, QueuedMessage, SendMessage, VersionedQueuedMessage}, - SendError, SendMessageFeeProvider, - }, - ChannelId, PRIMARY_GOVERNANCE_CHANNEL, +use snowbridge_core::{ChannelId, PRIMARY_GOVERNANCE_CHANNEL}; +use snowbridge_outbound_primitives::{ + v1::{Fee, Message, QueuedMessage, SendMessage, VersionedQueuedMessage}, + SendError, SendMessageFeeProvider, }; use sp_core::H256; use sp_runtime::BoundedVec; diff --git a/bridges/snowbridge/pallets/outbound-queue/src/test.rs b/bridges/snowbridge/pallets/outbound-queue/src/test.rs index 36227817f368..7311f48ed8df 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/test.rs @@ -9,12 +9,10 @@ use frame_support::{ }; use codec::Encode; -use snowbridge_core::{ - outbound::{ - v1::{Command, SendMessage}, - SendError, - }, - ParaId, PricingParameters, Rewards, +use snowbridge_core::{ParaId, PricingParameters, Rewards}; +use snowbridge_outbound_primitives::{ + v1::{Command, SendMessage}, + SendError, }; use sp_arithmetic::FixedU128; use sp_core::H256; diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index f1e749afb997..a22f6e3b47bc 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -33,6 +33,7 @@ xcm = { workspace = true } xcm-executor = { workspace = true } snowbridge-core = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } [dev-dependencies] hex = { workspace = true, default-features = true } @@ -53,6 +54,7 @@ std = [ "log/std", "scale-info/std", "snowbridge-core/std", + "snowbridge-outbound-primitives/std", "sp-core/std", "sp-io/std", "sp-runtime/std", diff --git a/bridges/snowbridge/pallets/system/src/benchmarking.rs b/bridges/snowbridge/pallets/system/src/benchmarking.rs index 939de9d40d13..ec6949ed7036 100644 --- a/bridges/snowbridge/pallets/system/src/benchmarking.rs +++ b/bridges/snowbridge/pallets/system/src/benchmarking.rs @@ -7,7 +7,8 @@ use super::*; use crate::Pallet as SnowbridgeControl; use frame_benchmarking::v2::*; use frame_system::RawOrigin; -use snowbridge_core::{eth, outbound::OperatingMode}; +use snowbridge_core::eth; +use snowbridge_outbound_primitives::OperatingMode; use sp_runtime::SaturatedConversion; use xcm::prelude::*; diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 64b093884622..24575a75b14c 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -67,15 +67,14 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; use snowbridge_core::{ - meth, - outbound::{ - v1::{Command, Initializer, Message, SendMessage}, - OperatingMode, SendError, - }, - sibling_sovereign_account, AgentId, AssetMetadata, Channel, ChannelId, ParaId, + meth, sibling_sovereign_account, AgentId, AssetMetadata, Channel, ChannelId, ParaId, PricingParameters as PricingParametersRecord, TokenId, TokenIdOf, PRIMARY_GOVERNANCE_CHANNEL, SECONDARY_GOVERNANCE_CHANNEL, }; +use snowbridge_outbound_primitives::{ + v1::{Command, Initializer, Message, SendMessage}, + OperatingMode, SendError, +}; use sp_core::{RuntimeDebug, H160, H256}; use sp_io::hashing::blake2_256; use sp_runtime::{ diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index 5b83c0d856b6..1518326797c5 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -11,9 +11,10 @@ use sp_core::H256; use xcm_executor::traits::ConvertLocation; use snowbridge_core::{ - gwei, meth, outbound::v1::ConstantGasMeter, sibling_sovereign_account, AgentId, - AllowSiblingsOnly, ParaId, PricingParameters, Rewards, + gwei, meth, sibling_sovereign_account, AgentId, AllowSiblingsOnly, ParaId, PricingParameters, + Rewards, }; +use snowbridge_outbound_primitives::v1::ConstantGasMeter; use sp_runtime::{ traits::{AccountIdConversion, BlakeTwo256, IdentityLookup, Keccak256}, AccountId32, BuildStorage, FixedU128, diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index 0e696f0d2256..b5863c50805f 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -12,7 +12,6 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { optional = true, features = ["alloc", "derive"], workspace = true } codec = { workspace = true } scale-info = { features = ["derive"], workspace = true } hex-literal = { workspace = true, default-features = true } @@ -50,7 +49,6 @@ std = [ "frame-system/std", "polkadot-parachain-primitives/std", "scale-info/std", - "serde/std", "snowbridge-beacon-primitives/std", "sp-arithmetic/std", "sp-core/std", @@ -60,7 +58,6 @@ std = [ "xcm-builder/std", "xcm/std", ] -serde = ["dep:serde", "scale-info/serde"] runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/bridges/snowbridge/primitives/core/src/lib.rs b/bridges/snowbridge/primitives/core/src/lib.rs index 88ac8124a15b..e3bfb34897d6 100644 --- a/bridges/snowbridge/primitives/core/src/lib.rs +++ b/bridges/snowbridge/primitives/core/src/lib.rs @@ -11,7 +11,6 @@ mod tests; pub mod inbound; pub mod location; pub mod operating_mode; -pub mod outbound; pub mod pricing; pub mod reward; pub mod ringbuffer; diff --git a/bridges/snowbridge/primitives/core/src/operating_mode.rs b/bridges/snowbridge/primitives/core/src/operating_mode.rs index 9894e587ef5e..8957bc6cc45e 100644 --- a/bridges/snowbridge/primitives/core/src/operating_mode.rs +++ b/bridges/snowbridge/primitives/core/src/operating_mode.rs @@ -4,7 +4,6 @@ use sp_runtime::RuntimeDebug; /// Basic operating modes for a bridges module (Normal/Halted). #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum BasicOperatingMode { /// Normal mode, when all operations are allowed. Normal, diff --git a/bridges/snowbridge/primitives/outbound-router/Cargo.toml b/bridges/snowbridge/primitives/outbound-router/Cargo.toml index 17601d440973..5eb9e703cbc0 100644 --- a/bridges/snowbridge/primitives/outbound-router/Cargo.toml +++ b/bridges/snowbridge/primitives/outbound-router/Cargo.toml @@ -27,6 +27,7 @@ xcm-executor = { workspace = true } xcm-builder = { workspace = true } snowbridge-core = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } hex-literal = { workspace = true, default-features = true } @@ -40,6 +41,7 @@ std = [ "log/std", "scale-info/std", "snowbridge-core/std", + "snowbridge-outbound-primitives/std", "sp-core/std", "sp-io/std", "sp-runtime/std", @@ -48,10 +50,3 @@ std = [ "xcm-executor/std", "xcm/std", ] -runtime-benchmarks = [ - "frame-support/runtime-benchmarks", - "snowbridge-core/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] diff --git a/bridges/snowbridge/primitives/outbound-router/src/lib.rs b/bridges/snowbridge/primitives/outbound-router/src/lib.rs index 7ab04608543d..f497ef3742a0 100644 --- a/bridges/snowbridge/primitives/outbound-router/src/lib.rs +++ b/bridges/snowbridge/primitives/outbound-router/src/lib.rs @@ -1,6 +1,5 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork #![cfg_attr(not(feature = "std"), no_std)] - pub mod v1; pub mod v2; diff --git a/bridges/snowbridge/primitives/outbound-router/src/v1/mod.rs b/bridges/snowbridge/primitives/outbound-router/src/v1/mod.rs index 6394ba927d8a..e5f274c1eaac 100644 --- a/bridges/snowbridge/primitives/outbound-router/src/v1/mod.rs +++ b/bridges/snowbridge/primitives/outbound-router/src/v1/mod.rs @@ -10,10 +10,8 @@ use core::slice::Iter; use codec::{Decode, Encode}; use frame_support::{ensure, traits::Get}; -use snowbridge_core::{ - outbound::v1::{AgentExecuteCommand, Command, Message, SendMessage}, - AgentId, ChannelId, ParaId, TokenId, TokenIdOf, -}; +use snowbridge_core::{AgentId, ChannelId, ParaId, TokenId, TokenIdOf}; +use snowbridge_outbound_primitives::v1::{AgentExecuteCommand, Command, Message, SendMessage}; use sp_core::{H160, H256}; use sp_runtime::traits::MaybeEquivalence; use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; diff --git a/bridges/snowbridge/primitives/outbound-router/src/v1/tests.rs b/bridges/snowbridge/primitives/outbound-router/src/v1/tests.rs index 607e2ea611a4..ad889fbd5d35 100644 --- a/bridges/snowbridge/primitives/outbound-router/src/v1/tests.rs +++ b/bridges/snowbridge/primitives/outbound-router/src/v1/tests.rs @@ -1,9 +1,7 @@ use frame_support::parameter_types; use hex_literal::hex; -use snowbridge_core::{ - outbound::{v1::Fee, SendError, SendMessageFeeProvider}, - AgentIdOf, -}; +use snowbridge_core::AgentIdOf; +use snowbridge_outbound_primitives::{v1::Fee, SendError, SendMessageFeeProvider}; use sp_std::default::Default; use xcm::{ latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, diff --git a/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs b/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs index 8253322c34d5..25ecdcee3bc6 100644 --- a/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs +++ b/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs @@ -5,12 +5,10 @@ use codec::DecodeAll; use core::slice::Iter; use frame_support::{ensure, traits::Get, BoundedVec}; -use snowbridge_core::{ - outbound::{ - v2::{Command, Message}, - TransactInfo, - }, - TokenId, TokenIdOf, TokenIdOf as LocationIdOf, +use snowbridge_core::{TokenId, TokenIdOf, TokenIdOf as LocationIdOf}; +use snowbridge_outbound_primitives::{ + v2::{Command, Message}, + TransactInfo, }; use sp_core::H160; use sp_runtime::traits::MaybeEquivalence; diff --git a/bridges/snowbridge/primitives/outbound-router/src/v2/mod.rs b/bridges/snowbridge/primitives/outbound-router/src/v2/mod.rs index fe719e68ea04..eeffc7361d34 100644 --- a/bridges/snowbridge/primitives/outbound-router/src/v2/mod.rs +++ b/bridges/snowbridge/primitives/outbound-router/src/v2/mod.rs @@ -13,7 +13,8 @@ use frame_support::{ ensure, traits::{Contains, Get, ProcessMessageError}, }; -use snowbridge_core::{outbound::v2::SendMessage, TokenId}; +use snowbridge_core::TokenId; +use snowbridge_outbound_primitives::v2::SendMessage; use sp_core::{H160, H256}; use sp_runtime::traits::MaybeEquivalence; use sp_std::{marker::PhantomData, ops::ControlFlow, prelude::*}; diff --git a/bridges/snowbridge/primitives/outbound-router/src/v2/tests.rs b/bridges/snowbridge/primitives/outbound-router/src/v2/tests.rs index 835c7abc59aa..e5eaba48c179 100644 --- a/bridges/snowbridge/primitives/outbound-router/src/v2/tests.rs +++ b/bridges/snowbridge/primitives/outbound-router/src/v2/tests.rs @@ -2,12 +2,10 @@ use super::*; use crate::v2::convert::XcmConverterError; use frame_support::{parameter_types, BoundedVec}; use hex_literal::hex; -use snowbridge_core::{ - outbound::{ - v2::{Command, Message}, - SendError, SendMessageFeeProvider, - }, - AgentIdOf, TokenIdOf, +use snowbridge_core::{AgentIdOf, TokenIdOf}; +use snowbridge_outbound_primitives::{ + v2::{Command, Message}, + SendError, SendMessageFeeProvider, }; use sp_std::default::Default; use xcm::{latest::WESTEND_GENESIS_HASH, prelude::SendError as XcmSendError}; diff --git a/bridges/snowbridge/primitives/outbound/Cargo.toml b/bridges/snowbridge/primitives/outbound/Cargo.toml new file mode 100644 index 000000000000..87af3fb3ffe5 --- /dev/null +++ b/bridges/snowbridge/primitives/outbound/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "snowbridge-outbound-primitives" +description = "Snowbridge outbound primitives" +version = "0.2.0" +authors = ["Snowfork "] +edition.workspace = true +repository.workspace = true +license = "Apache-2.0" +categories = ["cryptography::cryptocurrencies"] + +[lints] +workspace = true + +[dependencies] +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } + +polkadot-parachain-primitives = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } + +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-std = { workspace = true } +sp-core = { workspace = true } +sp-arithmetic = { workspace = true } + +ethabi = { workspace = true } +alloy-primitives = { features = ["rlp"], workspace = true } +alloy-sol-types = { workspace = true } + +snowbridge-core = { workspace = true } + +[dev-dependencies] +hex = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } + +[features] +default = ["std"] +std = [ + "alloy-primitives/std", + "alloy-sol-types/std", + "codec/std", + "ethabi/std", + "frame-support/std", + "frame-system/std", + "polkadot-parachain-primitives/std", + "scale-info/std", + "snowbridge-core/std", + "sp-arithmetic/std", + "sp-core/std", + "sp-std/std", + "xcm-builder/std", + "xcm/std", +] diff --git a/bridges/snowbridge/primitives/outbound/README.md b/bridges/snowbridge/primitives/outbound/README.md new file mode 100644 index 000000000000..0126be63aeba --- /dev/null +++ b/bridges/snowbridge/primitives/outbound/README.md @@ -0,0 +1,4 @@ +# Core Primitives + +Contains common code core to Snowbridge, such as inbound and outbound queue types, pricing structs, ringbuffer data +types (used in the beacon client). diff --git a/bridges/snowbridge/primitives/core/src/outbound/mod.rs b/bridges/snowbridge/primitives/outbound/src/lib.rs similarity index 96% rename from bridges/snowbridge/primitives/core/src/outbound/mod.rs rename to bridges/snowbridge/primitives/outbound/src/lib.rs index 972f16fb2139..6a4c21d501d4 100644 --- a/bridges/snowbridge/primitives/core/src/outbound/mod.rs +++ b/bridges/snowbridge/primitives/outbound/src/lib.rs @@ -1,17 +1,18 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork +#![cfg_attr(not(feature = "std"), no_std)] //! # Outbound //! //! Common traits and types -use crate::Vec; +pub mod v1; +pub mod v2; + use codec::{Decode, Encode}; use frame_support::PalletError; use scale_info::TypeInfo; use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; use sp_core::{RuntimeDebug, H160}; - -pub mod v1; -pub mod v2; +use sp_std::vec::Vec; /// The operating mode of Channels and Gateway contract on Ethereum. #[derive(Copy, Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)] diff --git a/bridges/snowbridge/primitives/core/src/outbound/v1.rs b/bridges/snowbridge/primitives/outbound/src/v1.rs similarity index 99% rename from bridges/snowbridge/primitives/core/src/outbound/v1.rs rename to bridges/snowbridge/primitives/outbound/src/v1.rs index 037fc21db017..b35e55d524e2 100644 --- a/bridges/snowbridge/primitives/core/src/outbound/v1.rs +++ b/bridges/snowbridge/primitives/outbound/src/v1.rs @@ -2,14 +2,11 @@ // SPDX-FileCopyrightText: 2023 Snowfork //! # Outbound V1 primitives -use crate::{ - outbound::{OperatingMode, SendError, SendMessageFeeProvider}, - pricing::UD60x18, - ChannelId, -}; +use crate::{OperatingMode, SendError, SendMessageFeeProvider}; use codec::{Decode, Encode}; use ethabi::Token; use scale_info::TypeInfo; +use snowbridge_core::{pricing::UD60x18, ChannelId}; use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; use sp_core::{RuntimeDebug, H160, H256, U256}; use sp_std::{borrow::ToOwned, vec, vec::Vec}; diff --git a/bridges/snowbridge/primitives/core/src/outbound/v2.rs b/bridges/snowbridge/primitives/outbound/src/v2.rs similarity index 99% rename from bridges/snowbridge/primitives/core/src/outbound/v2.rs rename to bridges/snowbridge/primitives/outbound/src/v2.rs index a45fcc9eb261..4b0add908528 100644 --- a/bridges/snowbridge/primitives/core/src/outbound/v2.rs +++ b/bridges/snowbridge/primitives/outbound/src/v2.rs @@ -2,7 +2,6 @@ // SPDX-FileCopyrightText: 2023 Snowfork //! # Outbound V2 primitives -use crate::outbound::{OperatingMode, SendError}; use codec::{Decode, Encode}; use frame_support::{pallet_prelude::ConstU32, BoundedVec}; use hex_literal::hex; @@ -11,7 +10,8 @@ use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; use sp_core::{RuntimeDebug, H160, H256}; use sp_std::{vec, vec::Vec}; -use crate::outbound::v2::abi::{ +use crate::{OperatingMode, SendError}; +use abi::{ CallContractParams, MintForeignTokenParams, RegisterForeignTokenParams, SetOperatingModeParams, UnlockNativeTokenParams, UpgradeParams, }; diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index d47cb3cb7101..946932e5d7f9 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -22,6 +22,7 @@ xcm-builder = { workspace = true } xcm-executor = { workspace = true } snowbridge-core = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } [dev-dependencies] @@ -32,6 +33,7 @@ std = [ "frame-support/std", "log/std", "snowbridge-core/std", + "snowbridge-outbound-primitives/std", "sp-arithmetic/std", "sp-std/std", "xcm-builder/std", diff --git a/bridges/snowbridge/runtime/runtime-common/src/lib.rs b/bridges/snowbridge/runtime/runtime-common/src/lib.rs index 0b1a74b232a0..314156b367b0 100644 --- a/bridges/snowbridge/runtime/runtime-common/src/lib.rs +++ b/bridges/snowbridge/runtime/runtime-common/src/lib.rs @@ -11,7 +11,7 @@ mod tests; use codec::FullCodec; use core::marker::PhantomData; use frame_support::traits::Get; -use snowbridge_core::outbound::SendMessageFeeProvider; +use snowbridge_outbound_primitives::SendMessageFeeProvider; use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; use sp_std::fmt::Debug; use xcm::prelude::*; diff --git a/bridges/snowbridge/runtime/runtime-common/src/tests.rs b/bridges/snowbridge/runtime/runtime-common/src/tests.rs index dea5ad5411c2..72f86d255b4c 100644 --- a/bridges/snowbridge/runtime/runtime-common/src/tests.rs +++ b/bridges/snowbridge/runtime/runtime-common/src/tests.rs @@ -1,6 +1,6 @@ use crate::XcmExportFeeToSibling; use frame_support::{parameter_types, sp_runtime::testing::H256}; -use snowbridge_core::outbound::{ +use snowbridge_outbound_primitives::{ v1::{Fee, Message, SendMessage}, SendError, SendMessageFeeProvider, }; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 9f6fe78a33ee..ea8a986fcd59 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -45,6 +45,7 @@ testnet-parachains-constants = { features = ["rococo", "westend"], workspace = t # Snowbridge snowbridge-core = { workspace = true } snowbridge-router-primitives = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } snowbridge-pallet-system = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-pallet-inbound-queue-fixtures = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index d59553574c26..967dc43407be 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -18,7 +18,8 @@ use emulated_integration_tests_common::xcm_emulator::ConvertLocation; use frame_support::pallet_prelude::TypeInfo; use hex_literal::hex; use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender; -use snowbridge_core::{inbound::InboundQueueFixture, outbound::OperatingMode}; +use snowbridge_core::inbound::InboundQueueFixture; +use snowbridge_outbound_primitives::OperatingMode; use snowbridge_pallet_inbound_queue_fixtures::{ register_token::make_register_token_message, send_token::make_send_token_message, send_token_to_penpal::make_send_token_to_penpal_message, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index d375c4a3cc43..fde1e29f9d23 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -47,6 +47,7 @@ bridge-hub-westend-runtime = { workspace = true } # Snowbridge snowbridge-core = { workspace = true } snowbridge-router-primitives = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } snowbridge-outbound-router-primitives = { workspace = true } snowbridge-pallet-system = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index ffa60a4f52e7..bee5665d56ce 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -20,7 +20,8 @@ use emulated_integration_tests_common::RESERVABLE_ASSET_ID; use frame_support::pallet_prelude::TypeInfo; use hex_literal::hex; use rococo_westend_system_emulated_network::asset_hub_westend_emulated_chain::genesis::AssetHubWestendAssetOwner; -use snowbridge_core::{outbound::OperatingMode, AssetMetadata, TokenIdOf}; +use snowbridge_core::{AssetMetadata, TokenIdOf}; +use snowbridge_outbound_primitives::OperatingMode; use snowbridge_router_primitives::inbound::{ Command, Destination, EthereumLocationsConverterFor, MessageV1, VersionedMessage, }; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs index b07f7faf554c..21e752a981a2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs @@ -15,7 +15,8 @@ use crate::imports::*; use frame_support::traits::fungibles::Mutate; use hex_literal::hex; -use snowbridge_core::{outbound::TransactInfo, AssetMetadata}; +use snowbridge_core::AssetMetadata; +use snowbridge_outbound_primitives::TransactInfo; use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; use sp_runtime::MultiAddress; use testnet_parachains_constants::westend::snowbridge::EthereumNetwork; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 8a8e62c5c1b9..b5ef949febdf 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -144,7 +144,6 @@ runtime-benchmarks = [ "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", - "snowbridge-outbound-router-primitives/runtime-benchmarks", "snowbridge-router-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index eb4a7d40de6f..a3d3e682801e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -114,6 +114,7 @@ snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-merkle-tree = { workspace = true } snowbridge-router-primitives = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } snowbridge-outbound-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } @@ -193,6 +194,7 @@ std = [ "snowbridge-beacon-primitives/std", "snowbridge-core/std", "snowbridge-merkle-tree/std", + "snowbridge-outbound-primitives/std", "snowbridge-outbound-queue-runtime-api/std", "snowbridge-outbound-router-primitives/std", "snowbridge-pallet-ethereum-client/std", @@ -255,7 +257,6 @@ runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", - "snowbridge-outbound-router-primitives/runtime-benchmarks", "snowbridge-pallet-ethereum-client/runtime-benchmarks", "snowbridge-pallet-inbound-queue/runtime-benchmarks", "snowbridge-pallet-outbound-queue/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs index 4af0e08418c8..98d7db2ad08e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs @@ -108,7 +108,7 @@ impl snowbridge_pallet_outbound_queue::Config for Runtime { type Decimals = ConstU8<12>; type MaxMessagePayloadSize = ConstU32<2048>; type MaxMessagesPerBlock = ConstU32<32>; - type GasMeter = snowbridge_core::outbound::v1::ConstantGasMeter; + type GasMeter = crate::ConstantGasMeter; type Balance = Balance; type WeightToFee = WeightToFee; type WeightInfo = crate::weights::snowbridge_pallet_outbound_queue::WeightInfo; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index e19f9853cb22..a090d1e9799c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -91,10 +91,8 @@ pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use rococo_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; -use snowbridge_core::{ - outbound::v1::{Command, Fee}, - AgentId, PricingParameters, -}; +use snowbridge_core::{AgentId, PricingParameters}; +pub use snowbridge_outbound_primitives::v1::{Command, ConstantGasMeter, Fee}; use xcm::{latest::prelude::*, prelude::*}; use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 40506e99c6f6..dc5ec22ad231 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -113,6 +113,7 @@ snowbridge-pallet-inbound-queue = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-router-primitives = { workspace = true } +snowbridge-outbound-primitives = { workspace = true } snowbridge-outbound-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } snowbridge-pallet-outbound-queue-v2 = { workspace = true } @@ -191,6 +192,7 @@ std = [ "snowbridge-beacon-primitives/std", "snowbridge-core/std", "snowbridge-merkle-tree/std", + "snowbridge-outbound-primitives/std", "snowbridge-outbound-queue-runtime-api-v2/std", "snowbridge-outbound-queue-runtime-api/std", "snowbridge-outbound-router-primitives/std", @@ -256,7 +258,6 @@ runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", - "snowbridge-outbound-router-primitives/runtime-benchmarks", "snowbridge-pallet-ethereum-client/runtime-benchmarks", "snowbridge-pallet-inbound-queue/runtime-benchmarks", "snowbridge-pallet-outbound-queue-v2/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index 4ec6ff5228cf..f1b800824fe2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -25,6 +25,9 @@ use crate::{ use parachains_common::{AccountId, Balance}; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{gwei, meth, AllowSiblingsOnly, PricingParameters, Rewards}; +use snowbridge_outbound_primitives::{ + v1::ConstantGasMeter, v2::ConstantGasMeter as ConstantGasMeterV2, +}; use snowbridge_outbound_router_primitives::{ v1::EthereumBlobExporter, v2::EthereumBlobExporter as EthereumBlobExporterV2, }; @@ -125,7 +128,7 @@ impl snowbridge_pallet_outbound_queue::Config for Runtime { type Decimals = ConstU8<12>; type MaxMessagePayloadSize = ConstU32<2048>; type MaxMessagesPerBlock = ConstU32<32>; - type GasMeter = snowbridge_core::outbound::v1::ConstantGasMeter; + type GasMeter = ConstantGasMeter; type Balance = Balance; type WeightToFee = WeightToFee; type WeightInfo = crate::weights::snowbridge_pallet_outbound_queue::WeightInfo; @@ -139,7 +142,7 @@ impl snowbridge_pallet_outbound_queue_v2::Config for Runtime { type MessageQueue = MessageQueue; type MaxMessagePayloadSize = ConstU32<2048>; type MaxMessagesPerBlock = ConstU32<32>; - type GasMeter = snowbridge_core::outbound::v2::ConstantGasMeter; + type GasMeter = ConstantGasMeterV2; type Balance = Balance; type WeightToFee = WeightToFee; type Verifier = snowbridge_pallet_ethereum_client::Pallet; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 96faab57f687..bf91526ab079 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -98,13 +98,11 @@ use parachains_common::{ impls::DealWithFees, AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; -use snowbridge_core::{ - outbound::{ - v1::{Command, Fee}, - v2::abi::InboundMessage, - DryRunError, - }, - AgentId, PricingParameters, +use snowbridge_core::{AgentId, PricingParameters}; +use snowbridge_outbound_primitives::{ + v1::{Command, Fee}, + v2::abi::InboundMessage, + DryRunError, }; use testnet_parachains_constants::westend::{consensus::*, currency::*, fee::WeightToFee, time::*}; use westend_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 31e7e9fea3a4..664b3a9e46f1 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -504,7 +504,6 @@ serde = [ "pallet-treasury?/serde", "pallet-xcm?/serde", "snowbridge-beacon-primitives?/serde", - "snowbridge-core?/serde", "snowbridge-ethereum?/serde", "snowbridge-pallet-ethereum-client?/serde", "snowbridge-pallet-inbound-queue?/serde", From 10dcaf57980e6c291b8a968bd8605bcb1e30374f Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 29 Nov 2024 16:26:01 +0800 Subject: [PATCH 34/68] More cleanup --- Cargo.lock | 3 +-- bridges/snowbridge/primitives/core/Cargo.toml | 7 +++---- bridges/snowbridge/primitives/core/src/operating_mode.rs | 1 + 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53c7032a154c..94941beb99f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24737,8 +24737,6 @@ dependencies = [ name = "snowbridge-core" version = "0.2.0" dependencies = [ - "alloy-primitives", - "alloy-sol-types", "ethabi-decode 2.0.0", "frame-support 28.0.0", "frame-system 28.0.0", @@ -24747,6 +24745,7 @@ dependencies = [ "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "scale-info", + "serde", "snowbridge-beacon-primitives 0.2.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index b5863c50805f..fa37c795b2d1 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -12,6 +12,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] +serde = { optional = true, features = ["alloc", "derive"], workspace = true } codec = { workspace = true } scale-info = { features = ["derive"], workspace = true } hex-literal = { workspace = true, default-features = true } @@ -31,8 +32,6 @@ sp-arithmetic = { workspace = true } snowbridge-beacon-primitives = { workspace = true } ethabi = { workspace = true } -alloy-primitives = { features = ["rlp"], workspace = true } -alloy-sol-types = { workspace = true } [dev-dependencies] hex = { workspace = true, default-features = true } @@ -41,14 +40,13 @@ xcm-executor = { workspace = true, default-features = true } [features] default = ["std"] std = [ - "alloy-primitives/std", - "alloy-sol-types/std", "codec/std", "ethabi/std", "frame-support/std", "frame-system/std", "polkadot-parachain-primitives/std", "scale-info/std", + "serde/std", "snowbridge-beacon-primitives/std", "sp-arithmetic/std", "sp-core/std", @@ -58,6 +56,7 @@ std = [ "xcm-builder/std", "xcm/std", ] +serde = ["dep:serde", "scale-info/serde"] runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/bridges/snowbridge/primitives/core/src/operating_mode.rs b/bridges/snowbridge/primitives/core/src/operating_mode.rs index 8957bc6cc45e..9894e587ef5e 100644 --- a/bridges/snowbridge/primitives/core/src/operating_mode.rs +++ b/bridges/snowbridge/primitives/core/src/operating_mode.rs @@ -4,6 +4,7 @@ use sp_runtime::RuntimeDebug; /// Basic operating modes for a bridges module (Normal/Halted). #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum BasicOperatingMode { /// Normal mode, when all operations are allowed. Normal, From 72fb8bd3cd4a5051bb855415b360657d7ce247fb Mon Sep 17 00:00:00 2001 From: Rodrigo Quelhas <22591718+RomarQ@users.noreply.github.com> Date: Fri, 29 Nov 2024 10:33:46 +0000 Subject: [PATCH 35/68] Expose types from `sc-service` (#5855) # Description At moonbeam we have worked on a `lazy-loading` feature which is a client mode that forks a live parachain and fetches its state on-demand, we have been able to do this by duplicating some code from `sc_service::client`. The objective of this PR is to simplify the implementation by making public some types in polkadot-sdk. - Modules: - `sc_service::client` **I do not see a point to only expose this type when `test-helpers` feature is enabled** ## Integration Not applicable, the PR just makes some types public. ## Review Notes The changes included in this PR give more flexibility for client developers by exposing important types. --- prdoc/pr_5855.prdoc | 15 +++++++ substrate/bin/node/testing/Cargo.toml | 2 +- substrate/client/network/test/Cargo.toml | 2 +- substrate/client/rpc-spec-v2/Cargo.toml | 2 +- .../src/chain_head/subscription/inner.rs | 6 +-- .../rpc-spec-v2/src/chain_head/tests.rs | 6 +-- substrate/client/service/Cargo.toml | 2 - substrate/client/service/src/client/client.rs | 40 +------------------ substrate/client/service/src/client/mod.rs | 3 +- substrate/client/service/src/lib.rs | 5 +-- substrate/client/service/test/Cargo.toml | 2 +- .../client/service/test/src/client/mod.rs | 6 +-- substrate/test-utils/client/Cargo.toml | 4 +- substrate/test-utils/runtime/Cargo.toml | 2 +- 14 files changed, 34 insertions(+), 63 deletions(-) create mode 100644 prdoc/pr_5855.prdoc diff --git a/prdoc/pr_5855.prdoc b/prdoc/pr_5855.prdoc new file mode 100644 index 000000000000..7735cfee9f37 --- /dev/null +++ b/prdoc/pr_5855.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove feature `test-helpers` from sc-service + +doc: + - audience: Node Dev + description: | + Removes feature `test-helpers` from sc-service. + +crates: + - name: sc-service + bump: major + - name: sc-rpc-spec-v2 + bump: major diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 16112386ad7c..1972c03a368b 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -37,7 +37,7 @@ sc-client-api = { workspace = true, default-features = true } sc-client-db = { features = ["rocksdb"], workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-service = { features = ["rocksdb", "test-helpers"], workspace = true, default-features = true } +sc-service = { features = ["rocksdb"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index ebece1762f29..6340d1dfb2f4 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -33,7 +33,7 @@ sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sc-network-light = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true } +sc-service = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index b304bc905925..70f68436767f 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -56,7 +56,7 @@ sp-consensus = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true, features = ["test-helpers"] } assert_matches = { workspace = true } pretty_assertions = { workspace = true } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 95a7c7fe1832..3e1bd23776d3 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -784,7 +784,7 @@ mod tests { use super::*; use jsonrpsee::ConnectionId; use sc_block_builder::BlockBuilderBuilder; - use sc_service::client::new_in_mem; + use sc_service::client::new_with_backend; use sp_consensus::BlockOrigin; use sp_core::{testing::TaskExecutor, H256}; use substrate_test_runtime_client::{ @@ -811,13 +811,13 @@ mod tests { ) .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(), diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c505566d887d..21e8365622a1 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -34,7 +34,7 @@ use jsonrpsee::{ use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_rpc::testing::TokioTestExecutor; -use sc_service::client::new_in_mem; +use sc_service::client::new_with_backend; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{ @@ -2547,13 +2547,13 @@ async fn pin_block_references() { .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TokioTestExecutor::default()), None, None, - Box::new(TokioTestExecutor::default()), client_config, ) .unwrap(), diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index f2fc65ef2439..3981395d9768 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -20,8 +20,6 @@ default = ["rocksdb"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. rocksdb = ["sc-client-db/rocksdb"] -# exposes the client type -test-helpers = [] runtime-benchmarks = [ "sc-client-db/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index ce5b92551bf2..eddbb9260c05 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -85,10 +85,8 @@ use std::{ sync::Arc, }; -#[cfg(feature = "test-helpers")] -use { - super::call_executor::LocalCallExecutor, sc_client_api::in_mem, sp_core::traits::CodeExecutor, -}; +use super::call_executor::LocalCallExecutor; +use sp_core::traits::CodeExecutor; type NotificationSinks = Mutex>>; @@ -152,39 +150,6 @@ enum PrepareStorageChangesResult { Discard(ImportResult), Import(Option>), } - -/// Create an instance of in-memory client. -#[cfg(feature = "test-helpers")] -pub fn new_in_mem( - backend: Arc>, - executor: E, - genesis_block_builder: G, - prometheus_registry: Option, - telemetry: Option, - spawn_handle: Box, - config: ClientConfig, -) -> sp_blockchain::Result< - Client, LocalCallExecutor, E>, Block, RA>, -> -where - E: CodeExecutor + sc_executor::RuntimeVersionOf, - Block: BlockT, - G: BuildGenesisBlock< - Block, - BlockImportOperation = as backend::Backend>::BlockImportOperation, - >, -{ - new_with_backend( - backend, - executor, - genesis_block_builder, - spawn_handle, - prometheus_registry, - telemetry, - config, - ) -} - /// Client configuration items. #[derive(Debug, Clone)] pub struct ClientConfig { @@ -218,7 +183,6 @@ impl Default for ClientConfig { /// Create a client with the explicitly provided backend. /// This is useful for testing backend implementations. -#[cfg(feature = "test-helpers")] pub fn new_with_backend( backend: Arc, executor: E, diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index ec77a92f162f..3020b3d296f4 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -56,5 +56,4 @@ pub use call_executor::LocalCallExecutor; pub use client::{Client, ClientConfig}; pub(crate) use code_provider::CodeProvider; -#[cfg(feature = "test-helpers")] -pub use self::client::{new_in_mem, new_with_backend}; +pub use self::client::new_with_backend; diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 9c01d7288a81..b5a38d875e3b 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -23,14 +23,11 @@ #![recursion_limit = "1024"] pub mod chain_ops; +pub mod client; pub mod config; pub mod error; mod builder; -#[cfg(feature = "test-helpers")] -pub mod client; -#[cfg(not(feature = "test-helpers"))] -mod client; mod metrics; mod task_manager; diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 0edfc5b19314..632b98104f6b 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -31,7 +31,7 @@ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 55bbfcdd8594..ead90c4c65d8 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -29,7 +29,7 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; use sc_executor::WasmExecutor; -use sc_service::client::{new_in_mem, Client, LocalCallExecutor}; +use sc_service::client::{new_with_backend, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; use sp_core::{testing::TaskExecutor, traits::CallContext, H256}; @@ -2087,13 +2087,13 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = new_in_mem::<_, Block, _, RuntimeApi>( + let mut client = new_with_backend::<_, _, Block, _, RuntimeApi>( backend, executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(); diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index ebd1eab5980d..a67c91fc5f79 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -29,9 +29,7 @@ sc-client-db = { features = [ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } -sc-service = { features = [ - "test-helpers", -], workspace = true } +sc-service = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 1c82c73072bc..96a888052876 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -45,7 +45,7 @@ sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-trie = { workspace = true } sp-transaction-pool = { workspace = true } trie-db = { workspace = true } -sc-service = { features = ["test-helpers"], optional = true, workspace = true } +sc-service = { optional = true, workspace = true } sp-state-machine = { workspace = true } sp-externalities = { workspace = true } From 1dd21bcc1406e0f07f70e604f9cef4dc2115c989 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 29 Nov 2024 12:00:52 +0100 Subject: [PATCH 36/68] ci: update nightly in ci-unified to 2024-11-19 (#6691) cc https://github.com/paritytech/ci_cd/issues/1088 --- .github/env | 2 +- .gitlab-ci.yml | 2 +- docs/contributor/container.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/env b/.github/env index bb61e1f4cd99..730c37f1db80 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" +IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f508404f1efa..42a7e87bda43 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,7 +22,7 @@ workflow: variables: # CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] - CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" diff --git a/docs/contributor/container.md b/docs/contributor/container.md index ec51b8b9d7cc..e387f568d7b5 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -24,7 +24,7 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 \ + docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` From b3ab312724ee8c3a0c7f3d9b5ea6c98513b5c951 Mon Sep 17 00:00:00 2001 From: Xavier Lau Date: Fri, 29 Nov 2024 20:02:59 +0800 Subject: [PATCH 37/68] Migrate pallet-preimage to benchmark v2 (#6277) Part of: - #6202. --------- Co-authored-by: Giuseppe Re Co-authored-by: command-bot <> --- substrate/frame/preimage/src/benchmarking.rs | 296 ++++++++++--------- substrate/frame/preimage/src/weights.rs | 150 +++++----- 2 files changed, 231 insertions(+), 215 deletions(-) diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index 3d0c5b900579..ea635bf3ef77 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -17,14 +17,13 @@ //! Preimage pallet benchmarking. -use super::*; use alloc::vec; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::Pallet as Preimage; +use crate::*; fn funded_account() -> T::AccountId { let caller: T::AccountId = whitelisted_caller(); @@ -43,206 +42,225 @@ fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { (preimage, hash) } -benchmarks! { +fn insert_old_unrequested(s: u32) -> ::Hash { + let acc = account("old", s, 0); + T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); + + // The preimage size does not matter here as it is not touched. + let preimage = s.to_le_bytes(); + let hash = ::Hashing::hash(&preimage[..]); + + #[allow(deprecated)] + StatusFor::::insert( + &hash, + OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, + ); + hash +} + +#[benchmarks] +mod benchmarks { + use super::*; + // Expensive note - will reserve. - note_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - }: _(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it was requested. - note_requested_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_requested_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( + assert_ok!(Pallet::::request_preimage( T::ManagerOrigin::try_successful_origin() .expect("ManagerOrigin has no successful origin required for the benchmark"), hash, )); - }: note_preimage(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + note_preimage(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it's the manager. - note_no_deposit_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_no_deposit_preimage(s: Linear<0, MAX_SIZE>) { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: note_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - preimage - ) verify { - assert!(Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + note_preimage(o as T::RuntimeOrigin, preimage); + + assert!(Pallet::::have_preimage(&hash)); } // Expensive unnote - will unreserve. - unnote_preimage { + #[benchmark] + fn unnote_preimage() { let caller = funded_account::(); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); - }: _(RawOrigin::Signed(caller), hash) - verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hash); + + assert!(!Pallet::::have_preimage(&hash)); } + // Cheap unnote - will not unreserve since there's no deposit held. - unnote_no_deposit_preimage { + #[benchmark] + fn unnote_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: unnote_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + unnote_preimage(o as T::RuntimeOrigin, hash); + + assert!(!Pallet::::have_preimage(&hash)); } // Expensive request - will unreserve the noter's deposit. - request_preimage { + #[benchmark] + fn request_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); let noter = funded_account::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); - let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + + let ticket = + TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); + let s = RequestStatus::Requested { + maybe_ticket: Some((noter, ticket)), + count: 1, + maybe_len: Some(MAX_SIZE), + }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - would unreserve the deposit but none was held. - request_no_deposit_preimage { + #[benchmark] + fn request_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + + let s = + RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is not yet noted, so deposit to unreserve. - request_unnoted_preimage { + #[benchmark] + fn request_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is already requested, so just a counter bump. - request_requested_preimage { + #[benchmark] + fn request_requested_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. - unrequest_preimage { + #[benchmark] + fn unrequest_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - last reference, but it's not noted. - unrequest_unnoted_preimage { + #[benchmark] + fn unrequest_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - not the last reference. - unrequest_multi_referenced_preimage { + #[benchmark] + fn unrequest_multi_referenced_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } - ensure_updated { - let n in 1..MAX_HASH_UPGRADE_BULK_COUNT; - + #[benchmark] + fn ensure_updated(n: Linear<1, MAX_HASH_UPGRADE_BULK_COUNT>) { let caller = funded_account::(); let hashes = (0..n).map(|i| insert_old_unrequested::(i)).collect::>(); - }: _(RawOrigin::Signed(caller), hashes) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hashes); + assert_eq!(RequestStatusFor::::iter_keys().count(), n as usize); #[allow(deprecated)] let c = StatusFor::::iter_keys().count(); assert_eq!(c, 0); } - impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); -} - -fn insert_old_unrequested(s: u32) -> ::Hash { - let acc = account("old", s, 0); - T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); - - // The preimage size does not matter here as it is not touched. - let preimage = s.to_le_bytes(); - let hash = ::Hashing::hash(&preimage[..]); - - #[allow(deprecated)] - StatusFor::::insert( - &hash, - OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, - ); - hash + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/preimage/src/weights.rs b/substrate/frame/preimage/src/weights.rs index edb2eed9c75a..a3aec7e7546e 100644 --- a/substrate/frame/preimage/src/weights.rs +++ b/substrate/frame/preimage/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_preimage` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_preimage -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/preimage/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_preimage +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/preimage/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -84,10 +82,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `7` // Estimated: `6012` - // Minimum execution time: 51_981_000 picoseconds. - Weight::from_parts(52_228_000, 6012) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -102,10 +100,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_835_000 picoseconds. - Weight::from_parts(16_429_000, 3556) - // Standard Error: 8 - .saturating_add(Weight::from_parts(2_647, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -120,10 +118,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_578_000, 3556) - // Standard Error: 7 - .saturating_add(Weight::from_parts(2_598, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,8 +137,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `206` // Estimated: `3820` - // Minimum execution time: 64_189_000 picoseconds. - Weight::from_parts(70_371_000, 3820) + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -154,8 +152,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 27_582_000 picoseconds. - Weight::from_parts(31_256_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -167,8 +165,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `150` // Estimated: `3556` - // Minimum execution time: 27_667_000 picoseconds. - Weight::from_parts(32_088_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -180,8 +178,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_065_000 picoseconds. - Weight::from_parts(20_550_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -193,8 +191,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3556` - // Minimum execution time: 13_638_000 picoseconds. - Weight::from_parts(16_979_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -206,8 +204,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 11_383_000 picoseconds. - Weight::from_parts(12_154_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -221,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 22_832_000 picoseconds. - Weight::from_parts(30_716_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -234,8 +232,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(12_129_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -247,8 +245,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_394_000 picoseconds. - Weight::from_parts(10_951_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -267,10 +265,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 62_203_000 picoseconds. - Weight::from_parts(63_735_000, 6012) - // Standard Error: 59_589 - .saturating_add(Weight::from_parts(59_482_352, 0).saturating_mul(n.into())) + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) @@ -295,10 +293,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `7` // Estimated: `6012` - // Minimum execution time: 51_981_000 picoseconds. - Weight::from_parts(52_228_000, 6012) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -313,10 +311,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_835_000 picoseconds. - Weight::from_parts(16_429_000, 3556) - // Standard Error: 8 - .saturating_add(Weight::from_parts(2_647, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -331,10 +329,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_578_000, 3556) - // Standard Error: 7 - .saturating_add(Weight::from_parts(2_598, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -350,8 +348,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `206` // Estimated: `3820` - // Minimum execution time: 64_189_000 picoseconds. - Weight::from_parts(70_371_000, 3820) + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -365,8 +363,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 27_582_000 picoseconds. - Weight::from_parts(31_256_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -378,8 +376,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `150` // Estimated: `3556` - // Minimum execution time: 27_667_000 picoseconds. - Weight::from_parts(32_088_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -391,8 +389,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_065_000 picoseconds. - Weight::from_parts(20_550_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -404,8 +402,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3556` - // Minimum execution time: 13_638_000 picoseconds. - Weight::from_parts(16_979_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -417,8 +415,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 11_383_000 picoseconds. - Weight::from_parts(12_154_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -432,8 +430,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 22_832_000 picoseconds. - Weight::from_parts(30_716_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -445,8 +443,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(12_129_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -458,8 +456,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_394_000 picoseconds. - Weight::from_parts(10_951_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -478,10 +476,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 62_203_000 picoseconds. - Weight::from_parts(63_735_000, 6012) - // Standard Error: 59_589 - .saturating_add(Weight::from_parts(59_482_352, 0).saturating_mul(n.into())) + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(n.into()))) From 447902eff4a574e66894ad60cb41999b05bf5e84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Fri, 29 Nov 2024 13:46:31 +0100 Subject: [PATCH 38/68] pallet_revive: Switch to 64bit RISC-V (#6565) This PR updates pallet_revive to the newest PolkaVM version and adapts the test fixtures and syscall interface to work under 64bit. Please note that after this PR no 32bit contracts can be deployed (they will be rejected at deploy time). Pre-deployed 32bit contracts are now considered defunct since we changes how parameters are passed for functions with more than 6 arguments. ## Fixtures The fixtures are now built for the 64bit target. I also removed the temporary directory mechanism that triggered a full rebuild every time. It also makes it easier to find the compiled fixtures since they are now always in `target/pallet-revive-fixtures`. ## Syscall interface ### Passing pointer Registers and pointers are now 64bit wide. This allows us to pass u64 arguments in a single register. Before we needed two registers to pass them. This means that just as before we need one register per pointer we pass. We keep pointers as `u32` argument by truncating the register. This is done since the memory space of PolkaVM is 32bit. ### Functions with more than 6 arguments We only have 6 registers to pass arguments. This is why we pass a pointer to a struct when we need more than 6. Before this PR we expected a packed struct and interpreted it as SCALE encoded tuple. However, this was buggy because the `MaxEncodedLen` returned something that was larger than the packed size of the structure. This wasn't a problem before. But now the memory space changed in a way that things were placed at the edges of the memory space and those extra bytes lead to an out of bound access. This is why this PR drops SCALE and expects the arguments to be passed as a pointer to a `C` aligned struct. This avoids unaligned accesses. However, revive needs to adapt its codegen to properly align the structure fields. ## TODO - [ ] Add multi block migration that wipes all existing contracts as we made breaking changes to the syscall interface --------- Co-authored-by: GitHub Action --- .github/workflows/checks-quick.yml | 1 - Cargo.lock | 72 +++++++------- prdoc/pr_6565.prdoc | 35 +++++++ substrate/frame/revive/Cargo.toml | 2 +- substrate/frame/revive/fixtures/Cargo.toml | 4 +- substrate/frame/revive/fixtures/build.rs | 96 +++++++++++++------ .../build/{Cargo.toml => _Cargo.toml} | 5 +- .../fixtures/build/_rust-toolchain.toml | 4 + .../riscv32emac-unknown-none-polkavm.json | 26 ----- substrate/frame/revive/fixtures/src/lib.rs | 13 +-- substrate/frame/revive/proc-macro/src/lib.rs | 91 ++++++++++-------- substrate/frame/revive/rpc/src/tests.rs | 6 ++ substrate/frame/revive/src/chain_extension.rs | 12 +-- substrate/frame/revive/src/limits.rs | 21 +++- substrate/frame/revive/src/wasm/mod.rs | 20 +++- substrate/frame/revive/src/wasm/runtime.rs | 33 ++----- substrate/frame/revive/uapi/Cargo.toml | 6 +- substrate/frame/revive/uapi/src/host.rs | 4 +- .../uapi/src/host/{riscv32.rs => riscv64.rs} | 86 ++++++++--------- substrate/frame/revive/uapi/src/lib.rs | 6 ++ 20 files changed, 309 insertions(+), 234 deletions(-) create mode 100644 prdoc/pr_6565.prdoc rename substrate/frame/revive/fixtures/build/{Cargo.toml => _Cargo.toml} (80%) create mode 100644 substrate/frame/revive/fixtures/build/_rust-toolchain.toml delete mode 100644 substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json rename substrate/frame/revive/uapi/src/host/{riscv32.rs => riscv64.rs} (93%) diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index c733a2517cb8..4c26b85a6303 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -97,7 +97,6 @@ jobs: --exclude "substrate/frame/contracts/fixtures/build" "substrate/frame/contracts/fixtures/contracts/common" - "substrate/frame/revive/fixtures/build" "substrate/frame/revive/fixtures/contracts/common" - name: deny git deps run: python3 .github/scripts/deny-git-deps.py . diff --git a/Cargo.lock b/Cargo.lock index 84477cd05416..e1abeea49283 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5975,6 +5975,15 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.4.1" @@ -14646,7 +14655,7 @@ dependencies = [ "pallet-utility 28.0.0", "parity-scale-codec", "paste", - "polkavm 0.13.0", + "polkavm 0.17.0", "pretty_assertions", "rlp 0.6.1", "scale-info", @@ -14742,12 +14751,10 @@ dependencies = [ "anyhow", "frame-system 28.0.0", "log", - "parity-wasm", - "polkavm-linker 0.14.0", + "polkavm-linker 0.17.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "tempfile", "toml 0.8.12", ] @@ -14864,7 +14871,7 @@ dependencies = [ "bitflags 1.3.2", "parity-scale-codec", "paste", - "polkavm-derive 0.14.0", + "polkavm-derive 0.17.0", "scale-info", ] @@ -19699,15 +19706,15 @@ dependencies = [ [[package]] name = "polkavm" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e79a14b15ed38cb5b9a1e38d02e933f19e3d180ae5b325fed606c5e5b9177e" +checksum = "84979be196ba2855f73616413e7b1d18258128aa396b3dc23f520a00a807720e" dependencies = [ "libc", "log", - "polkavm-assembler 0.13.0", - "polkavm-common 0.13.0", - "polkavm-linux-raw 0.13.0", + "polkavm-assembler 0.17.0", + "polkavm-common 0.17.0", + "polkavm-linux-raw 0.17.0", ] [[package]] @@ -19730,9 +19737,9 @@ dependencies = [ [[package]] name = "polkavm-assembler" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e8da55465000feb0a61bbf556ed03024db58f3420eca37721fc726b3b2136bf" +checksum = "0ba7b434ff630b0f73a1560e8baea807246ca22098abe49f97821e0e2d2accc4" dependencies = [ "log", ] @@ -19764,20 +19771,14 @@ dependencies = [ [[package]] name = "polkavm-common" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084b4339aae7dfdaaa5aa7d634110afd95970e0737b6fb2a0cb10db8b56b753c" +checksum = "8f0dbafef4ab6ceecb4982ac3b550df430ef4f9fdbf07c108b7d4f91a0682fce" dependencies = [ "log", - "polkavm-assembler 0.13.0", + "polkavm-assembler 0.17.0", ] -[[package]] -name = "polkavm-common" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711952a783e9c5ad407cdacb1ed147f36d37c5d43417c1091d86456d2999417b" - [[package]] name = "polkavm-derive" version = "0.8.0" @@ -19807,11 +19808,11 @@ dependencies = [ [[package]] name = "polkavm-derive" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4832a0aebf6cefc988bb7b2d74ea8c86c983164672e2fc96300f356a1babfc1" +checksum = "c0c3dbb6c8c7bd3e5f5b05aa7fc9355acf14df7ce5d392911e77d01090a38d0d" dependencies = [ - "polkavm-derive-impl-macro 0.14.0", + "polkavm-derive-impl-macro 0.17.0", ] [[package]] @@ -19852,11 +19853,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e339fc7c11310fe5adf711d9342278ac44a75c9784947937cce12bd4f30842f2" +checksum = "42565aed4adbc4034612d0b17dea8db3681fb1bd1aed040d6edc5455a9f478a1" dependencies = [ - "polkavm-common 0.14.0", + "polkavm-common 0.17.0", "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", @@ -19894,11 +19895,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl-macro" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b569754b15060d03000c09e3bf11509d527f60b75d79b4c30c3625b5071d9702" +checksum = "86d9838e95241b0bce4fe269cdd4af96464160505840ed5a8ac8536119ba19e2" dependencies = [ - "polkavm-derive-impl 0.14.0", + "polkavm-derive-impl 0.17.0", "syn 2.0.87", ] @@ -19934,15 +19935,16 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0959ac3b0f4fd5caf5c245c637705f19493efe83dba31a83bbba928b93b0116a" +checksum = "d359dc721d2cc9b555ebb3558c305112ddc5bdac09d26f95f2f7b49c1f2db7e9" dependencies = [ + "dirs", "gimli 0.31.1", "hashbrown 0.14.5", "log", "object 0.36.1", - "polkavm-common 0.14.0", + "polkavm-common 0.17.0", "regalloc2 0.9.3", "rustc-demangle", ] @@ -19961,9 +19963,9 @@ checksum = "26e45fa59c7e1bb12ef5289080601e9ec9b31435f6e32800a5c90c132453d126" [[package]] name = "polkavm-linux-raw" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686c4dd9c9c16cc22565b51bdbb269792318d0fd2e6b966b5f6c788534cad0e9" +checksum = "e64c3d93a58ffbc3099d1227f0da9675a025a9ea6c917038f266920c1de1e568" [[package]] name = "polling" diff --git a/prdoc/pr_6565.prdoc b/prdoc/pr_6565.prdoc new file mode 100644 index 000000000000..f9a75a16a6a7 --- /dev/null +++ b/prdoc/pr_6565.prdoc @@ -0,0 +1,35 @@ +title: 'pallet_revive: Switch to 64bit RISC-V' +doc: +- audience: Runtime Dev + description: |- + This PR updates pallet_revive to the newest PolkaVM version and adapts the test fixtures and syscall interface to work under 64bit. + + Please note that after this PR no 32bit contracts can be deployed (they will be rejected at deploy time). Pre-deployed 32bit contracts are now considered defunct since we changes how parameters are passed for functions with more than 6 arguments. + + ## Fixtures + + The fixtures are now built for the 64bit target. I also removed the temporary directory mechanism that triggered a full rebuild every time. It also makes it easier to find the compiled fixtures since they are now always in `target/pallet-revive-fixtures`. + + ## Syscall interface + + ### Passing pointer + + Registers and pointers are now 64bit wide. This allows us to pass u64 arguments in a single register. Before we needed two registers to pass them. This means that just as before we need one register per pointer we pass. We keep pointers as `u32` argument by truncating the register. This is done since the memory space of PolkaVM is 32bit. + + ### Functions with more than 6 arguments + + We only have 6 registers to pass arguments. This is why we pass a pointer to a struct when we need more than 6. Before this PR we expected a packed struct and interpreted it as SCALE encoded tuple. However, this was buggy because the `MaxEncodedLen` returned something that was larger than the packed size of the structure. This wasn't a problem before. But now the memory space changed in a way that things were placed at the edges of the memory space and those extra bytes lead to an out of bound access. + + This is why this PR drops SCALE and expects the arguments to be passed as a pointer to a `C` aligned struct. This avoids unaligned accesses. However, revive needs to adapt its codegen to properly align the structure fields. + + ## TODO + - [ ] Add multi block migration that wipes all existing contracts as we made breaking changes to the syscall interface +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive-proc-macro + bump: major +- name: pallet-revive-uapi + bump: major diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 81fbbc8cf38e..677ef0e1367f 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] environmental = { workspace = true } paste = { workspace = true } -polkavm = { version = "0.13.0", default-features = false } +polkavm = { version = "0.17.0", default-features = false } bitflags = { workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } scale-info = { features = ["derive"], workspace = true } diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 7a5452853d65..798ed8c75a5a 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -18,10 +18,8 @@ anyhow = { workspace = true, default-features = true, optional = true } log = { workspace = true } [build-dependencies] -parity-wasm = { workspace = true } -tempfile = { workspace = true } toml = { workspace = true } -polkavm-linker = { version = "0.14.0" } +polkavm-linker = { version = "0.17.0" } anyhow = { workspace = true, default-features = true } [features] diff --git a/substrate/frame/revive/fixtures/build.rs b/substrate/frame/revive/fixtures/build.rs index 3472e0846efd..46cd5760ca4e 100644 --- a/substrate/frame/revive/fixtures/build.rs +++ b/substrate/frame/revive/fixtures/build.rs @@ -20,7 +20,8 @@ use anyhow::Result; use anyhow::{bail, Context}; use std::{ - cfg, env, fs, + env, fs, + io::Write, path::{Path, PathBuf}, process::Command, }; @@ -82,7 +83,7 @@ fn create_cargo_toml<'a>( entries: impl Iterator, output_dir: &Path, ) -> Result<()> { - let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/Cargo.toml"))?; + let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/_Cargo.toml"))?; let mut set_dep = |name, path| -> Result<()> { cargo_toml["dependencies"][name]["path"] = toml::Value::String( fixtures_dir.join(path).canonicalize()?.to_str().unwrap().to_string(), @@ -108,21 +109,24 @@ fn create_cargo_toml<'a>( let cargo_toml = toml::to_string_pretty(&cargo_toml)?; fs::write(output_dir.join("Cargo.toml"), cargo_toml.clone()) .with_context(|| format!("Failed to write {cargo_toml:?}"))?; + fs::copy( + fixtures_dir.join("build/_rust-toolchain.toml"), + output_dir.join("rust-toolchain.toml"), + ) + .context("Failed to write toolchain file")?; Ok(()) } -fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { +fn invoke_build(current_dir: &Path) -> Result<()> { let encoded_rustflags = ["-Dwarnings"].join("\x1f"); - let mut build_command = Command::new(env::var("CARGO")?); + let mut build_command = Command::new("cargo"); build_command .current_dir(current_dir) .env_clear() .env("PATH", env::var("PATH").unwrap_or_default()) .env("CARGO_ENCODED_RUSTFLAGS", encoded_rustflags) - .env("RUSTC_BOOTSTRAP", "1") .env("RUSTUP_HOME", env::var("RUSTUP_HOME").unwrap_or_default()) - .env("RUSTUP_TOOLCHAIN", env::var("RUSTUP_TOOLCHAIN").unwrap_or_default()) .args([ "build", "--release", @@ -130,7 +134,7 @@ fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { "-Zbuild-std-features=panic_immediate_abort", ]) .arg("--target") - .arg(target); + .arg(polkavm_linker::target_json_64_path().unwrap()); if let Ok(toolchain) = env::var(OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR) { build_command.env("RUSTUP_TOOLCHAIN", &toolchain); @@ -168,7 +172,7 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result for entry in entries { post_process( &build_dir - .join("target/riscv32emac-unknown-none-polkavm/release") + .join("target/riscv64emac-unknown-none-polkavm/release") .join(entry.name()), &out_dir.join(entry.out_filename()), )?; @@ -177,11 +181,61 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result Ok(()) } +/// Create a directory in the `target` as output directory +fn create_out_dir() -> Result { + let temp_dir: PathBuf = env::var("OUT_DIR")?.into(); + + // this is set in case the user has overriden the target directory + let out_dir = if let Ok(path) = env::var("CARGO_TARGET_DIR") { + path.into() + } else { + // otherwise just traverse up from the out dir + let mut out_dir: PathBuf = temp_dir.clone(); + loop { + if !out_dir.pop() { + bail!("Cannot find project root.") + } + if out_dir.join("Cargo.lock").exists() { + break; + } + } + out_dir.join("target") + } + .join("pallet-revive-fixtures"); + + // clean up some leftover symlink from previous versions of this script + if out_dir.exists() && !out_dir.is_dir() { + fs::remove_file(&out_dir)?; + } + fs::create_dir_all(&out_dir).context("Failed to create output directory")?; + + // write the location of the out dir so it can be found later + let mut file = fs::File::create(temp_dir.join("fixture_location.rs")) + .context("Failed to create fixture_location.rs")?; + write!( + file, + r#" + #[allow(dead_code)] + const FIXTURE_DIR: &str = "{0}"; + macro_rules! fixture {{ + ($name: literal) => {{ + include_bytes!(concat!("{0}", "/", $name, ".polkavm")) + }}; + }} + "#, + out_dir.display() + ) + .context("Failed to write to fixture_location.rs")?; + + Ok(out_dir) +} + pub fn main() -> Result<()> { let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); let contracts_dir = fixtures_dir.join("contracts"); - let out_dir: PathBuf = env::var("OUT_DIR")?.into(); - let target = fixtures_dir.join("riscv32emac-unknown-none-polkavm.json"); + let out_dir = create_out_dir().context("Cannot determine output directory")?; + let build_dir = out_dir.join("build"); + fs::create_dir_all(&build_dir).context("Failed to create build directory")?; println!("cargo::rerun-if-env-changed={OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR}"); println!("cargo::rerun-if-env-changed={OVERRIDE_STRIP_ENV_VAR}"); @@ -199,25 +253,9 @@ pub fn main() -> Result<()> { return Ok(()) } - let tmp_dir = tempfile::tempdir()?; - let tmp_dir_path = tmp_dir.path(); - - create_cargo_toml(&fixtures_dir, entries.iter(), tmp_dir.path())?; - invoke_build(&target, tmp_dir_path)?; - - write_output(tmp_dir_path, &out_dir, entries)?; - - #[cfg(unix)] - if let Ok(symlink_dir) = env::var("CARGO_WORKSPACE_ROOT_DIR") { - let symlink_dir: PathBuf = symlink_dir.into(); - let symlink_dir: PathBuf = symlink_dir.join("target").join("pallet-revive-fixtures"); - if symlink_dir.is_symlink() { - fs::remove_file(&symlink_dir) - .with_context(|| format!("Failed to remove_file {symlink_dir:?}"))?; - } - std::os::unix::fs::symlink(&out_dir, &symlink_dir) - .with_context(|| format!("Failed to symlink {out_dir:?} -> {symlink_dir:?}"))?; - } + create_cargo_toml(&fixtures_dir, entries.iter(), &build_dir)?; + invoke_build(&build_dir)?; + write_output(&build_dir, &out_dir, entries)?; Ok(()) } diff --git a/substrate/frame/revive/fixtures/build/Cargo.toml b/substrate/frame/revive/fixtures/build/_Cargo.toml similarity index 80% rename from substrate/frame/revive/fixtures/build/Cargo.toml rename to substrate/frame/revive/fixtures/build/_Cargo.toml index 5d0e256e2e73..beaabd83403e 100644 --- a/substrate/frame/revive/fixtures/build/Cargo.toml +++ b/substrate/frame/revive/fixtures/build/_Cargo.toml @@ -4,6 +4,9 @@ publish = false version = "1.0.0" edition = "2021" +# Make sure this is not included into the workspace +[workspace] + # Binary targets are injected dynamically by the build script. [[bin]] @@ -11,7 +14,7 @@ edition = "2021" [dependencies] uapi = { package = 'pallet-revive-uapi', path = "", default-features = false } common = { package = 'pallet-revive-fixtures-common', path = "" } -polkavm-derive = { version = "0.14.0" } +polkavm-derive = { version = "0.17.0" } [profile.release] opt-level = 3 diff --git a/substrate/frame/revive/fixtures/build/_rust-toolchain.toml b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml new file mode 100644 index 000000000000..4c757c708d58 --- /dev/null +++ b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "nightly-2024-11-19" +components = ["rust-src"] +profile = "minimal" diff --git a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json b/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json deleted file mode 100644 index bbd54cdefbac..000000000000 --- a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "arch": "riscv32", - "cpu": "generic-rv32", - "crt-objects-fallback": "false", - "data-layout": "e-m:e-p:32:32-i64:64-n32-S32", - "eh-frame-header": false, - "emit-debug-gdb-scripts": false, - "features": "+e,+m,+a,+c,+lui-addi-fusion,+fast-unaligned-access,+xtheadcondmov", - "linker": "rust-lld", - "linker-flavor": "ld.lld", - "llvm-abiname": "ilp32e", - "llvm-target": "riscv32", - "max-atomic-width": 32, - "panic-strategy": "abort", - "relocation-model": "pie", - "target-pointer-width": "32", - "singlethread": true, - "pre-link-args": { - "ld": [ - "--emit-relocs", - "--unique", - "--relocatable" - ] - }, - "env": "polkavm" -} diff --git a/substrate/frame/revive/fixtures/src/lib.rs b/substrate/frame/revive/fixtures/src/lib.rs index cc84daec9b59..24f6ee547dc7 100644 --- a/substrate/frame/revive/fixtures/src/lib.rs +++ b/substrate/frame/revive/fixtures/src/lib.rs @@ -19,10 +19,13 @@ extern crate alloc; +// generated file that tells us where to find the fixtures +include!(concat!(env!("OUT_DIR"), "/fixture_location.rs")); + /// Load a given wasm module and returns a wasm binary contents along with it's hash. #[cfg(feature = "std")] pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H256)> { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = FIXTURE_DIR.into(); let fixture_path = out_dir.join(format!("{fixture_name}.polkavm")); log::debug!("Loading fixture from {fixture_path:?}"); let binary = std::fs::read(fixture_path)?; @@ -36,12 +39,6 @@ pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H /// available in no-std environments (runtime benchmarks). pub mod bench { use alloc::vec::Vec; - - macro_rules! fixture { - ($name: literal) => { - include_bytes!(concat!(env!("OUT_DIR"), "/", $name, ".polkavm")) - }; - } pub const DUMMY: &[u8] = fixture!("dummy"); pub const NOOP: &[u8] = fixture!("noop"); pub const INSTR: &[u8] = fixture!("instr_benchmark"); @@ -61,7 +58,7 @@ pub mod bench { mod test { #[test] fn out_dir_should_have_compiled_mocks() { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = crate::FIXTURE_DIR.into(); assert!(out_dir.join("dummy.polkavm").exists()); } } diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs index 7232c6342824..6814add128d9 100644 --- a/substrate/frame/revive/proc-macro/src/lib.rs +++ b/substrate/frame/revive/proc-macro/src/lib.rs @@ -79,6 +79,7 @@ use syn::{parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, F /// - `Result<(), TrapReason>`, /// - `Result`, /// - `Result`. +/// - `Result`. /// /// The macro expands to `pub struct Env` declaration, with the following traits implementations: /// - `pallet_revive::wasm::Environment> where E: Ext` @@ -127,6 +128,7 @@ struct HostFn { enum HostFnReturn { Unit, U32, + U64, ReturnCode, } @@ -134,8 +136,7 @@ impl HostFnReturn { fn map_output(&self) -> TokenStream2 { match self { Self::Unit => quote! { |_| None }, - Self::U32 => quote! { |ret_val| Some(ret_val) }, - Self::ReturnCode => quote! { |ret_code| Some(ret_code.into()) }, + _ => quote! { |ret_val| Some(ret_val.into()) }, } } @@ -143,6 +144,7 @@ impl HostFnReturn { match self { Self::Unit => syn::ReturnType::Default, Self::U32 => parse_quote! { -> u32 }, + Self::U64 => parse_quote! { -> u64 }, Self::ReturnCode => parse_quote! { -> ReturnErrorCode }, } } @@ -243,7 +245,8 @@ impl HostFn { let msg = r#"Should return one of the following: - Result<(), TrapReason>, - Result, - - Result"#; + - Result, + - Result"#; let ret_ty = match item.clone().sig.output { syn::ReturnType::Type(_, ty) => Ok(ty.clone()), _ => Err(err(span, &msg)), @@ -305,6 +308,7 @@ impl HostFn { let returns = match ok_ty_str.as_str() { "()" => Ok(HostFnReturn::Unit), "u32" => Ok(HostFnReturn::U32), + "u64" => Ok(HostFnReturn::U64), "ReturnErrorCode" => Ok(HostFnReturn::ReturnCode), _ => Err(err(arg1.span(), &msg)), }?; @@ -339,50 +343,61 @@ where P: Iterator> + Clone, I: Iterator> + Clone, { - const ALLOWED_REGISTERS: u32 = 6; - let mut registers_used = 0; - let mut bindings = vec![]; - let mut idx = 0; - for (name, ty) in param_names.clone().zip(param_types.clone()) { + const ALLOWED_REGISTERS: usize = 6; + + // all of them take one register but we truncate them before passing into the function + // it is important to not allow any type which has illegal bit patterns like 'bool' + if !param_types.clone().all(|ty| { let syn::Type::Path(path) = &**ty else { panic!("Type needs to be path"); }; let Some(ident) = path.path.get_ident() else { panic!("Type needs to be ident"); }; - let size = if ident == "i8" || - ident == "i16" || - ident == "i32" || - ident == "u8" || - ident == "u16" || - ident == "u32" - { - 1 - } else if ident == "i64" || ident == "u64" { - 2 - } else { - panic!("Pass by value only supports primitives"); - }; - registers_used += size; - if registers_used > ALLOWED_REGISTERS { - return quote! { - let (#( #param_names, )*): (#( #param_types, )*) = memory.read_as(__a0__)?; - } - } - let this_reg = quote::format_ident!("__a{}__", idx); - let next_reg = quote::format_ident!("__a{}__", idx + 1); - let binding = if size == 1 { + matches!(ident.to_string().as_ref(), "u8" | "u16" | "u32" | "u64") + }) { + panic!("Only primitive unsigned integers are allowed as arguments to syscalls"); + } + + // too many arguments: pass as pointer to a struct in memory + if param_names.clone().count() > ALLOWED_REGISTERS { + let fields = param_names.clone().zip(param_types.clone()).map(|(name, ty)| { quote! { - let #name = #this_reg as #ty; + #name: #ty, } - } else { - quote! { - let #name = (#this_reg as #ty) | ((#next_reg as #ty) << 32); + }); + return quote! { + #[derive(Default)] + #[repr(C)] + struct Args { + #(#fields)* } - }; - bindings.push(binding); - idx += size; + let Args { #(#param_names,)* } = { + let len = ::core::mem::size_of::(); + let mut args = Args::default(); + let ptr = &mut args as *mut Args as *mut u8; + // Safety + // 1. The struct is initialized at all times. + // 2. We only allow primitive integers (no bools) as arguments so every bit pattern is safe. + // 3. The reference doesn't outlive the args field. + // 4. There is only the single reference to the args field. + // 5. The length of the generated slice is the same as the struct. + let reference = unsafe { + ::core::slice::from_raw_parts_mut(ptr, len) + }; + memory.read_into_buf(__a0__ as _, reference)?; + args + }; + } } + + // otherwise: one argument per register + let bindings = param_names.zip(param_types).enumerate().map(|(idx, (name, ty))| { + let reg = quote::format_ident!("__a{}__", idx); + quote! { + let #name = #reg as #ty; + } + }); quote! { #( #bindings )* } @@ -409,7 +424,7 @@ fn expand_env(def: &EnvDef) -> TokenStream2 { memory: &mut M, __syscall_symbol__: &[u8], __available_api_version__: ApiVersion, - ) -> Result, TrapReason> + ) -> Result, TrapReason> { #impls } diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index 7734c8c57209..920318b26f71 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -218,6 +218,8 @@ async fn deploy_and_call() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn revert_call() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); @@ -240,6 +242,8 @@ async fn revert_call() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn event_logs() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); @@ -279,6 +283,8 @@ async fn invalid_transaction() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn native_evm_ratio_works() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); diff --git a/substrate/frame/revive/src/chain_extension.rs b/substrate/frame/revive/src/chain_extension.rs index ccea12945054..5b3e886a5628 100644 --- a/substrate/frame/revive/src/chain_extension.rs +++ b/substrate/frame/revive/src/chain_extension.rs @@ -75,7 +75,7 @@ use crate::{ Error, }; use alloc::vec::Vec; -use codec::{Decode, MaxEncodedLen}; +use codec::Decode; use frame_support::weights::Weight; use sp_runtime::DispatchError; @@ -304,16 +304,6 @@ impl<'a, 'b, E: Ext, M: ?Sized + Memory> Environment<'a, 'b, E, M> { Ok(()) } - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// This function is secure and recommended for all input types of fixed size - /// as long as the cost of reading the memory is included in the overall already charged - /// weight of the chain extension. This should usually be the case when fixed input types - /// are used. - pub fn read_as(&mut self) -> Result { - self.memory.read_as(self.input_ptr) - } - /// Reads and decodes a type with a dynamic size from contract memory. /// /// Make sure to include `len` in your weight calculations. diff --git a/substrate/frame/revive/src/limits.rs b/substrate/frame/revive/src/limits.rs index 64e66382b9ab..5ce96f59c14d 100644 --- a/substrate/frame/revive/src/limits.rs +++ b/substrate/frame/revive/src/limits.rs @@ -129,23 +129,36 @@ pub mod code { Error::::CodeRejected })?; + if !program.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + // This scans the whole program but we only do it once on code deployment. // It is safe to do unchecked math in u32 because the size of the program // was already checked above. - use polkavm::program::ISA32_V1_NoSbrk as ISA; + use polkavm::program::ISA64_V1 as ISA; let mut num_instructions: u32 = 0; let mut max_basic_block_size: u32 = 0; let mut basic_block_size: u32 = 0; for inst in program.instructions(ISA) { + use polkavm::program::Instruction; num_instructions += 1; basic_block_size += 1; if inst.kind.opcode().starts_new_basic_block() { max_basic_block_size = max_basic_block_size.max(basic_block_size); basic_block_size = 0; } - if matches!(inst.kind, polkavm::program::Instruction::invalid) { - log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); - return Err(>::InvalidInstruction.into()) + match inst.kind { + Instruction::invalid => { + log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + Instruction::sbrk(_, _) => { + log::debug!(target: LOG_TARGET, "sbrk instruction is not allowed. offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + _ => (), } } diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index f10c4f5fddf8..d87ec7112286 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -293,8 +293,15 @@ impl WasmBlob { ) -> Result, ExecError> { let mut config = polkavm::Config::default(); config.set_backend(Some(polkavm::BackendKind::Interpreter)); - let engine = - polkavm::Engine::new(&config).expect("interpreter is available on all plattforms; qed"); + config.set_cache_enabled(false); + #[cfg(feature = "std")] + if std::env::var_os("REVIVE_USE_COMPILER").is_some() { + config.set_backend(Some(polkavm::BackendKind::Compiler)); + } + let engine = polkavm::Engine::new(&config).expect( + "on-chain (no_std) use of interpreter is hard coded. + interpreter is available on all plattforms; qed", + ); let mut module_config = polkavm::ModuleConfig::new(); module_config.set_page_size(limits::PAGE_SIZE); @@ -306,6 +313,15 @@ impl WasmBlob { Error::::CodeRejected })?; + // This is checked at deploy time but we also want to reject pre-existing + // 32bit programs. + // TODO: Remove when we reset the test net. + // https://github.com/paritytech/contract-issues/issues/11 + if !module.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + let entry_program_counter = module .exports() .find(|export| export.symbol().as_bytes() == entry_point.identifier().as_bytes()) diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index 3e2c83db1ebd..7ea518081e23 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -27,7 +27,7 @@ use crate::{ Config, Error, LOG_TARGET, SENTINEL, }; use alloc::{boxed::Box, vec, vec::Vec}; -use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; +use codec::{Decode, DecodeLimit, Encode}; use core::{fmt, marker::PhantomData, mem}; use frame_support::{ dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, @@ -126,34 +126,13 @@ pub trait Memory { /// /// # Note /// - /// There must be an extra benchmark for determining the influence of `len` with - /// regard to the overall weight. + /// Make sure to charge a proportional amount of weight if `len` is not fixed. fn read_as_unbounded(&self, ptr: u32, len: u32) -> Result { let buf = self.read(ptr, len)?; let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } - - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// # Only use on fixed size types - /// - /// Don't use this for types where the encoded size is not fixed but merely bounded. Otherwise - /// this implementation will out of bound access the buffer declared by the guest. Some examples - /// of those bounded but not fixed types: Enums with data, `BoundedVec` or any compact encoded - /// integer. - /// - /// # Note - /// - /// The weight of reading a fixed value is included in the overall weight of any - /// contract callable function. - fn read_as(&self, ptr: u32) -> Result { - let buf = self.read(ptr, D::max_encoded_len() as u32)?; - let decoded = D::decode_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) - .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; - Ok(decoded) - } } /// Allows syscalls access to the PolkaVM instance they are executing in. @@ -164,8 +143,8 @@ pub trait Memory { pub trait PolkaVmInstance: Memory { fn gas(&self) -> polkavm::Gas; fn set_gas(&mut self, gas: polkavm::Gas); - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32); - fn write_output(&mut self, output: u32); + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64); + fn write_output(&mut self, output: u64); } // Memory implementation used in benchmarking where guest memory is mapped into the host. @@ -214,7 +193,7 @@ impl PolkaVmInstance for polkavm::RawInstance { self.set_gas(gas) } - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32) { + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64) { ( self.reg(polkavm::Reg::A0), self.reg(polkavm::Reg::A1), @@ -225,7 +204,7 @@ impl PolkaVmInstance for polkavm::RawInstance { ) } - fn write_output(&mut self, output: u32) { + fn write_output(&mut self, output: u64) { self.set_reg(polkavm::Reg::A0, output); } } diff --git a/substrate/frame/revive/uapi/Cargo.toml b/substrate/frame/revive/uapi/Cargo.toml index 0c7461a35d69..b55391dd5d6c 100644 --- a/substrate/frame/revive/uapi/Cargo.toml +++ b/substrate/frame/revive/uapi/Cargo.toml @@ -20,11 +20,11 @@ codec = { features = [ "max-encoded-len", ], optional = true, workspace = true } -[target.'cfg(target_arch = "riscv32")'.dependencies] -polkavm-derive = { version = "0.14.0" } +[target.'cfg(target_arch = "riscv64")'.dependencies] +polkavm-derive = { version = "0.17.0" } [package.metadata.docs.rs] -default-target = ["wasm32-unknown-unknown"] +default-target = ["riscv64imac-unknown-none-elf"] [features] default = ["scale"] diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index 6b3a8b07f040..d3fd4ac8d03e 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -14,8 +14,8 @@ use crate::{CallFlags, Result, ReturnFlags, StorageFlags}; use paste::paste; -#[cfg(target_arch = "riscv32")] -mod riscv32; +#[cfg(target_arch = "riscv64")] +mod riscv64; macro_rules! hash_fn { ( $name:ident, $bytes:literal ) => { diff --git a/substrate/frame/revive/uapi/src/host/riscv32.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs similarity index 93% rename from substrate/frame/revive/uapi/src/host/riscv32.rs rename to substrate/frame/revive/uapi/src/host/riscv64.rs index e8b27057ed18..3cba14db6a04 100644 --- a/substrate/frame/revive/uapi/src/host/riscv32.rs +++ b/substrate/frame/revive/uapi/src/host/riscv64.rs @@ -26,10 +26,10 @@ mod sys { mod abi {} impl abi::FromHost for ReturnCode { - type Regs = (u32,); + type Regs = (u64,); fn from_host((a0,): Self::Regs) -> Self { - ReturnCode(a0) + ReturnCode(a0 as _) } } @@ -207,33 +207,33 @@ impl HostFn for HostFnImpl { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); let salt_ptr = ptr_or_sentinel(&salt); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { - code_hash: *const u8, + code_hash: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - address: *const u8, - output: *mut u8, - output_len: *mut u32, - salt: *const u8, + address: u32, + output: u32, + output_len: u32, + salt: u32, } let args = Args { - code_hash: code_hash.as_ptr(), + code_hash: code_hash.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - address, - output: output_ptr, - output_len: &mut output_len as *mut _, - salt: salt_ptr, + address: address as _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, + salt: salt_ptr as _, }; let ret_code = { unsafe { sys::instantiate(&args as *const Args as *const _) } }; @@ -257,31 +257,31 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - callee: *const u8, + callee: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - callee: callee.as_ptr(), + callee: callee.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::call(&args as *const Args as *const _) } }; @@ -308,29 +308,29 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - address: *const u8, + address: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - input: *const u8, + deposit_limit: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - address: address.as_ptr(), + address: address.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::delegate_call(&args as *const Args as *const _) } }; diff --git a/substrate/frame/revive/uapi/src/lib.rs b/substrate/frame/revive/uapi/src/lib.rs index e660ce36ef75..91c2543bb719 100644 --- a/substrate/frame/revive/uapi/src/lib.rs +++ b/substrate/frame/revive/uapi/src/lib.rs @@ -65,6 +65,12 @@ impl From for u32 { } } +impl From for u64 { + fn from(error: ReturnErrorCode) -> Self { + u32::from(error).into() + } +} + define_error_codes! { /// The called function trapped and has its state changes reverted. /// In this case no output buffer is returned. From 1e89a311471eba937a9552d7d1f55af1661feb08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 29 Nov 2024 14:09:49 +0100 Subject: [PATCH 39/68] Fix runtime api impl detection by construct runtime (#6665) Construct runtime uses autoref-based specialization to fetch the metadata about the implemented runtime apis. This is done to not fail to compile when there are no runtime apis implemented. However, there was an issue with detecting runtime apis when they were implemented in a different file. The problem is solved by moving the trait implemented by `impl_runtime_apis!` to the metadata ir crate. Closes: https://github.com/paritytech/polkadot-sdk/issues/6659 --------- Co-authored-by: GitHub Action --- Cargo.lock | 1 + prdoc/pr_6665.prdoc | 15 ++++++ .../src/construct_runtime/expand/metadata.rs | 2 + .../procedural/src/construct_runtime/mod.rs | 3 +- .../support/test/tests/runtime_metadata.rs | 49 ++++++++++--------- .../api/proc-macro/src/runtime_metadata.rs | 6 +-- substrate/primitives/api/test/Cargo.toml | 3 +- .../api/test/tests/decl_and_impl.rs | 2 + substrate/primitives/metadata-ir/src/lib.rs | 10 ++++ 9 files changed, 62 insertions(+), 29 deletions(-) create mode 100644 prdoc/pr_6665.prdoc diff --git a/Cargo.lock b/Cargo.lock index e1abeea49283..5e4e9c267b08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25548,6 +25548,7 @@ dependencies = [ "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", diff --git a/prdoc/pr_6665.prdoc b/prdoc/pr_6665.prdoc new file mode 100644 index 000000000000..b5aaf8a3b184 --- /dev/null +++ b/prdoc/pr_6665.prdoc @@ -0,0 +1,15 @@ +title: Fix runtime api impl detection by construct runtime +doc: +- audience: Runtime Dev + description: |- + Construct runtime uses autoref-based specialization to fetch the metadata about the implemented runtime apis. This is done to not fail to compile when there are no runtime apis implemented. However, there was an issue with detecting runtime apis when they were implemented in a different file. The problem is solved by moving the trait implemented by `impl_runtime_apis!` to the metadata ir crate. + + + Closes: https://github.com/paritytech/polkadot-sdk/issues/6659 +crates: +- name: frame-support-procedural + bump: patch +- name: sp-api-proc-macro + bump: patch +- name: sp-metadata-ir + bump: patch diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 4590a3a7f490..0b3bd5168865 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -113,6 +113,8 @@ pub fn expand_runtime_metadata( <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Extension >(); + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #scrate::__private::metadata_ir::MetadataIR { pallets: #scrate::__private::vec![ #(#pallets),* ], extrinsic: #scrate::__private::metadata_ir::ExtrinsicMetadataIR { diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 17042c248780..087faf37252d 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -466,7 +466,6 @@ fn construct_runtime_final_expansion( // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` // when both macros are called; and will resolve an empty `runtime_metadata` when only the `construct_runtime!` // is called. - #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] @@ -477,6 +476,8 @@ fn construct_runtime_final_expansion( #[doc(hidden)] impl InternalConstructRuntime for &#name {} + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #outer_event #outer_error diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index 7523a415d458..a098643abb91 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -80,34 +80,39 @@ sp_api::decl_runtime_apis! { } } -sp_api::impl_runtime_apis! { - impl self::Api for Runtime { - fn test(_data: u64) { - unimplemented!() - } +// Module to emulate having the implementation in a different file. +mod apis { + use super::{Block, BlockT, Runtime}; - fn something_with_block(_: Block) -> Block { - unimplemented!() - } + sp_api::impl_runtime_apis! { + impl crate::Api for Runtime { + fn test(_data: u64) { + unimplemented!() + } - fn function_with_two_args(_: u64, _: Block) { - unimplemented!() - } + fn something_with_block(_: Block) -> Block { + unimplemented!() + } - fn same_name() {} + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } - fn wild_card(_: u32) {} - } + fn same_name() {} - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() + fn wild_card(_: u32) {} } - fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - unimplemented!() + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + unimplemented!() + } } } } diff --git a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs index 6be396339259..1706f8ca6fbb 100644 --- a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs +++ b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs @@ -298,18 +298,14 @@ pub fn generate_impl_runtime_metadata(impls: &[ItemImpl]) -> Result #crate_::vec::Vec<#crate_::metadata_ir::RuntimeApiMetadataIR> { #crate_::vec![ #( #metadata, )* ] } } - #[doc(hidden)] - impl InternalImplRuntimeApis for #runtime_name {} } )) } diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 1d21f23eb804..27f6dafa24bf 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -21,6 +21,7 @@ sp-version = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-metadata-ir = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } codec = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } @@ -40,5 +41,5 @@ name = "bench" harness = false [features] -"enable-staging-api" = [] +enable-staging-api = [] disable-ui-tests = [] diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index 890cf6eccdbc..2e5a078cb382 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -309,6 +309,8 @@ fn mock_runtime_api_works_with_advanced() { #[test] fn runtime_api_metadata_matches_version_implemented() { + use sp_metadata_ir::InternalImplRuntimeApis; + let rt = Runtime {}; let runtime_metadata = rt.runtime_metadata(); diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index bf234432a1a6..dc01f7eaadb3 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -87,6 +87,16 @@ pub fn into_unstable(metadata: MetadataIR) -> RuntimeMetadataPrefixed { latest.into() } +/// INTERNAL USE ONLY +/// +/// Special trait that is used together with `InternalConstructRuntime` by `construct_runtime!` to +/// fetch the runtime api metadata without exploding when there is no runtime api implementation +/// available. +#[doc(hidden)] +pub trait InternalImplRuntimeApis { + fn runtime_metadata(&self) -> alloc::vec::Vec; +} + #[cfg(test)] mod test { use super::*; From 4e7c968ae97c66812df989117ad251cba3864632 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Fri, 29 Nov 2024 16:49:45 +0200 Subject: [PATCH 40/68] archive: Refactor `archive_storage` method into subscription (#6483) This PR adapts the `archive_storage` implementation from a method to a subscription. This keeps the archive APIs uniform and consistent. Builds on: https://github.com/paritytech/polkadot-sdk/pull/5997 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: James Wilson --- .../client/rpc-spec-v2/src/archive/api.rs | 13 +- .../client/rpc-spec-v2/src/archive/archive.rs | 202 +++---- .../src/archive/archive_storage.rs | 105 +--- .../client/rpc-spec-v2/src/archive/mod.rs | 2 +- .../client/rpc-spec-v2/src/archive/tests.rs | 500 +++++++----------- .../rpc-spec-v2/src/chain_head/event.rs | 3 +- .../client/rpc-spec-v2/src/common/events.rs | 59 ++- .../client/rpc-spec-v2/src/common/storage.rs | 151 ++++-- substrate/client/service/src/builder.rs | 2 - 9 files changed, 458 insertions(+), 579 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs index dcfeaecb147b..a205d0502c93 100644 --- a/substrate/client/rpc-spec-v2/src/archive/api.rs +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -20,8 +20,7 @@ use crate::{ common::events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, - PaginatedStorageQuery, + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, }, MethodResult, }; @@ -100,13 +99,17 @@ pub trait ArchiveApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "archive_unstable_storage", blocking)] + #[subscription( + name = "archive_unstable_storage" => "archive_unstable_storageEvent", + unsubscribe = "archive_unstable_stopStorage", + item = ArchiveStorageEvent, + )] fn archive_unstable_storage( &self, hash: Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult; + ); /// Returns the storage difference between two blocks. /// diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index 55054d91d85d..62e44a016241 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -20,13 +20,13 @@ use crate::{ archive::{ - archive_storage::{ArchiveStorage, ArchiveStorageDiff}, - error::Error as ArchiveError, - ArchiveApiServer, + archive_storage::ArchiveStorageDiff, error::Error as ArchiveError, ArchiveApiServer, }, - common::events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, - PaginatedStorageQuery, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, + }, + storage::{QueryResult, StorageSubscriptionClient}, }, hex_string, MethodResult, SubscriptionTaskExecutor, }; @@ -57,42 +57,12 @@ use tokio::sync::mpsc; pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::archive"; -/// The configuration of [`Archive`]. -pub struct ArchiveConfig { - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - pub max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - pub max_queried_items: usize, -} - -/// The maximum number of items the `archive_storage` can return for a descendant query before -/// pagination is required. -/// -/// Note: this is identical to the `chainHead` value. -const MAX_DESCENDANT_RESPONSES: usize = 5; - -/// The maximum number of queried items allowed for the `archive_storage` at a time. -/// -/// Note: A queried item can also be a descendant query which can return up to -/// `MAX_DESCENDANT_RESPONSES`. -const MAX_QUERIED_ITEMS: usize = 8; - /// The buffer capacity for each storage query. /// /// This is small because the underlying JSON-RPC server has /// its down buffer capacity per connection as well. const STORAGE_QUERY_BUF: usize = 16; -impl Default for ArchiveConfig { - fn default() -> Self { - Self { - max_descendant_responses: MAX_DESCENDANT_RESPONSES, - max_queried_items: MAX_QUERIED_ITEMS, - } - } -} - /// An API for archive RPC calls. pub struct Archive, Block: BlockT, Client> { /// Substrate client. @@ -103,11 +73,6 @@ pub struct Archive, Block: BlockT, Client> { executor: SubscriptionTaskExecutor, /// The hexadecimal encoded hash of the genesis block. genesis_hash: String, - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -119,18 +84,9 @@ impl, Block: BlockT, Client> Archive { backend: Arc, genesis_hash: GenesisHash, executor: SubscriptionTaskExecutor, - config: ArchiveConfig, ) -> Self { let genesis_hash = hex_string(&genesis_hash.as_ref()); - Self { - client, - backend, - executor, - genesis_hash, - storage_max_descendant_responses: config.max_descendant_responses, - storage_max_queried_items: config.max_queried_items, - _phantom: PhantomData, - } + Self { client, backend, executor, genesis_hash, _phantom: PhantomData } } } @@ -260,47 +216,53 @@ where fn archive_unstable_storage( &self, + pending: PendingSubscriptionSink, hash: Block::Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult { - let items = items - .into_iter() - .map(|query| { - let key = StorageKey(parse_hex_param(query.key)?); - let pagination_start_key = query - .pagination_start_key - .map(|key| parse_hex_param(key).map(|key| StorageKey(key))) - .transpose()?; - - // Paginated start key is only supported - if pagination_start_key.is_some() && !query.query_type.is_descendant_query() { - return Err(ArchiveError::InvalidParam( - "Pagination start key is only supported for descendants queries" - .to_string(), - )) - } + ) { + let mut storage_client = + StorageSubscriptionClient::::new(self.client.clone()); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; - Ok(PaginatedStorageQuery { - key, - query_type: query.query_type, - pagination_start_key, + let items = match items + .into_iter() + .map(|query| { + let key = StorageKey(parse_hex_param(query.key)?); + Ok(StorageQuery { key, query_type: query.query_type }) }) - }) - .collect::, ArchiveError>>()?; + .collect::, ArchiveError>>() + { + Ok(items) => items, + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; - let child_trie = child_trie - .map(|child_trie| parse_hex_param(child_trie)) - .transpose()? - .map(ChildInfo::new_default_from_vec); + let child_trie = child_trie.map(|child_trie| parse_hex_param(child_trie)).transpose(); + let child_trie = match child_trie { + Ok(child_trie) => child_trie.map(ChildInfo::new_default_from_vec), + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; - let storage_client = ArchiveStorage::new( - self.client.clone(), - self.storage_max_descendant_responses, - self.storage_max_queried_items, - ); + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = storage_client.generate_events(hash, items, child_trie, tx); - Ok(storage_client.handle_query(hash, items, child_trie)) + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = futures::future::join(storage_fut, process_storage_events(&mut rx, &mut sink)) + .await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } fn archive_unstable_storage_diff( @@ -337,24 +299,74 @@ where // - process_events might encounter an error (if the client disconnected) // - storage_fut might encounter an error while processing a trie queries and // the error is propagated via the sink. - let _ = futures::future::join(storage_fut, process_events(&mut rx, &mut sink)).await; + let _ = + futures::future::join(storage_fut, process_storage_diff_events(&mut rx, &mut sink)) + .await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } } -/// Sends all the events to the sink. -async fn process_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { - while let Some(event) = rx.recv().await { - if event.is_done() { - log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); - } else if event.is_err() { - log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); +/// Sends all the events of the storage_diff method to the sink. +async fn process_storage_diff_events( + rx: &mut mpsc::Receiver, + sink: &mut Subscription, +) { + loop { + tokio::select! { + _ = sink.closed() => { + return + }, + + maybe_event = rx.recv() => { + let Some(event) = maybe_event else { + break; + }; + + if event.is_done() { + log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); + } else if event.is_err() { + log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); + } + + if sink.send(&event).await.is_err() { + return + } + } } + } +} + +/// Sends all the events of the storage method to the sink. +async fn process_storage_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { + loop { + tokio::select! { + _ = sink.closed() => { + break + } + + maybe_storage = rx.recv() => { + let Some(event) = maybe_storage else { + break; + }; + + match event { + Ok(None) => continue, + + Ok(Some(event)) => + if sink.send(&ArchiveStorageEvent::result(event)).await.is_err() { + return + }, - if sink.send(&event).await.is_err() { - return + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error)).await; + return + } + } + } } } + + let _ = sink.send(&ArchiveStorageEvent::StorageDone).await; } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs index 5a3920882f00..390db765a48f 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs @@ -33,114 +33,13 @@ use crate::{ common::{ events::{ ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, - ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageResult, - PaginatedStorageQuery, StorageQueryType, StorageResult, + ArchiveStorageDiffResult, ArchiveStorageDiffType, StorageResult, }, - storage::{IterQueryType, QueryIter, Storage}, + storage::Storage, }, }; use tokio::sync::mpsc; -/// Generates the events of the `archive_storage` method. -pub struct ArchiveStorage { - /// Storage client. - client: Storage, - /// The maximum number of responses the API can return for a descendant query at a time. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, -} - -impl ArchiveStorage { - /// Constructs a new [`ArchiveStorage`]. - pub fn new( - client: Arc, - storage_max_descendant_responses: usize, - storage_max_queried_items: usize, - ) -> Self { - Self { - client: Storage::new(client), - storage_max_descendant_responses, - storage_max_queried_items, - } - } -} - -impl ArchiveStorage -where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: StorageProvider + 'static, -{ - /// Generate the response of the `archive_storage` method. - pub fn handle_query( - &self, - hash: Block::Hash, - mut items: Vec>, - child_key: Option, - ) -> ArchiveStorageResult { - let discarded_items = items.len().saturating_sub(self.storage_max_queried_items); - items.truncate(self.storage_max_queried_items); - - let mut storage_results = Vec::with_capacity(items.len()); - for item in items { - match item.query_type { - StorageQueryType::Value => { - match self.client.query_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::Hash => - match self.client.query_hash(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::ClosestDescendantMerkleValue => - match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::DescendantsValues => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Value, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::DescendantsHashes => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Hash, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } - }, - }; - } - - ArchiveStorageResult::ok(storage_results, discarded_items) - } -} - /// Parse hex-encoded string parameter as raw bytes. /// /// If the parsing fails, returns an error propagated to the RPC method. diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs index 5f020c203eab..14fa104c113a 100644 --- a/substrate/client/rpc-spec-v2/src/archive/mod.rs +++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs @@ -32,4 +32,4 @@ pub mod archive; pub mod error; pub use api::ArchiveApiServer; -pub use archive::{Archive, ArchiveConfig}; +pub use archive::Archive; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 994c5d28bd61..cddaafde6659 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -19,16 +19,13 @@ use crate::{ common::events::{ ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, - ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageMethodOk, - ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType, StorageResultType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageEvent, StorageQuery, + StorageQueryType, StorageResult, StorageResultType, }, hex_string, MethodResult, }; -use super::{ - archive::{Archive, ArchiveConfig}, - *, -}; +use super::{archive::Archive, *}; use assert_matches::assert_matches; use codec::{Decode, Encode}; @@ -55,8 +52,6 @@ use substrate_test_runtime_client::{ const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; -const MAX_PAGINATION_LIMIT: usize = 5; -const MAX_QUERIED_LIMIT: usize = 5; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_STORAGE_KEY: &[u8] = b"child"; @@ -65,10 +60,7 @@ const CHILD_VALUE: &[u8] = b"child value"; type Header = substrate_test_runtime_client::runtime::Header; type Block = substrate_test_runtime_client::runtime::Block; -fn setup_api( - max_descendant_responses: usize, - max_queried_items: usize, -) -> (Arc>, RpcModule>>) { +fn setup_api() -> (Arc>, RpcModule>>) { let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY); let builder = TestClientBuilder::new().add_extra_child_storage( &child_info, @@ -83,7 +75,6 @@ fn setup_api( backend, CHAIN_GENESIS, Arc::new(TokioTestExecutor::default()), - ArchiveConfig { max_descendant_responses, max_queried_items }, ) .into_rpc(); @@ -101,7 +92,7 @@ async fn get_next_event(sub: &mut RpcSubscriptio #[tokio::test] async fn archive_genesis() { - let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_client, api) = setup_api(); let genesis: String = api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap(); @@ -110,7 +101,7 @@ async fn archive_genesis() { #[tokio::test] async fn archive_body() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -144,7 +135,7 @@ async fn archive_body() { #[tokio::test] async fn archive_header() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -178,7 +169,7 @@ async fn archive_header() { #[tokio::test] async fn archive_finalized_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let client_height: u32 = client.info().finalized_number.saturated_into(); @@ -190,7 +181,7 @@ async fn archive_finalized_height() { #[tokio::test] async fn archive_hash_by_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Genesis height. let hashes: Vec = api.call("archive_unstable_hashByHeight", [0]).await.unwrap(); @@ -296,7 +287,7 @@ async fn archive_hash_by_height() { #[tokio::test] async fn archive_call() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let invalid_hash = hex_string(&INVALID_HASH); // Invalid parameter (non-hex). @@ -355,7 +346,7 @@ async fn archive_call() { #[tokio::test] async fn archive_storage_hashes_values() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -369,42 +360,23 @@ async fn archive_storage_hashes_values() { let block_hash = format!("{:?}", block.header.hash()); let key = hex_string(&KEY); - let items: Vec> = vec![ - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsHashes, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }, ]; - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - // Key has not been imported yet. - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; + // Key has not been imported yet. + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); // Import a block with the given key value pair. let mut builder = BlockBuilderBuilder::new(&*client) @@ -420,32 +392,103 @@ async fn archive_storage_hashes_values() { let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE)); let expected_value = hex_string(&VALUE); - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 4); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, key); - assert_eq!(result[0].result, StorageResultType::Hash(expected_hash.clone())); - assert_eq!(result[1].key, key); - assert_eq!(result[1].result, StorageResultType::Value(expected_value.clone())); - assert_eq!(result[2].key, key); - assert_eq!(result[2].result, StorageResultType::Hash(expected_hash)); - assert_eq!(result[3].key, key); - assert_eq!(result[3].result, StorageResultType::Value(expected_value)); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value), + child_trie_key: None, + }), + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_hashes_values_child_trie() { + let (client, api) = setup_api(); + + // Get child storage values set in `setup_api`. + let child_info = hex_string(&CHILD_STORAGE_KEY); + let key = hex_string(&KEY); + let genesis_hash = format!("{:?}", client.genesis_hash()); + let expected_hash = format!("{:?}", Blake2Hasher::hash(&CHILD_VALUE)); + let expected_value = hex_string(&CHILD_VALUE); + + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storage", + rpc_params![&genesis_hash, items, &child_info], + ) + .await + .unwrap(); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); } #[tokio::test] async fn archive_storage_closest_merkle_value() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); /// The core of this test. /// @@ -457,55 +500,47 @@ async fn archive_storage_closest_merkle_value() { api: &RpcModule>>, block_hash: String, ) -> HashMap { - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, vec![ - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key with descendant. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Keys below this comment do not produce a result. // Key that exceed the keyspace of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAABX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key that are not part of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, ] ], @@ -513,19 +548,21 @@ async fn archive_storage_closest_merkle_value() { .await .unwrap(); - let merkle_values: HashMap<_, _> = match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, .. }) => result - .into_iter() - .map(|res| { - let value = match res.result { + let mut merkle_values = HashMap::new(); + loop { + let event = get_next_event::(&mut sub).await; + match event { + ArchiveStorageEvent::Storage(result) => { + let str_result = match result.result { StorageResultType::ClosestDescendantMerkleValue(value) => value, - _ => panic!("Unexpected StorageResultType"), + _ => panic!("Unexpected result type"), }; - (res.key, value) - }) - .collect(), - _ => panic!("Unexpected result"), - }; + merkle_values.insert(result.key, str_result); + }, + ArchiveStorageEvent::StorageError(err) => panic!("Unexpected error {err:?}"), + ArchiveStorageEvent::StorageDone => break, + } + } // Response for AAAA, AAAB, A and AA. assert_eq!(merkle_values.len(), 4); @@ -604,9 +641,9 @@ async fn archive_storage_closest_merkle_value() { } #[tokio::test] -async fn archive_storage_paginate_iterations() { +async fn archive_storage_iterations() { // 1 iteration allowed before pagination kicks in. - let (client, api) = setup_api(1, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) @@ -625,237 +662,94 @@ async fn archive_storage_paginate_iterations() { // Calling with an invalid hash. let invalid_hash = hex_string(&INVALID_HASH); - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &invalid_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Err(_) => (), - _ => panic!("Unexpected result"), - }; - - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; - - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":m")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":mo")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"ab"))); - }, - _ => panic!("Unexpected result"), - }; - - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mo")), }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":moD")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcmoD"))); - }, - _ => panic!("Unexpected result"), - }; - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moD")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":moc")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abc"))); - }, - _ => panic!("Unexpected result"), - }; + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(_) + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( + // Valid call with storage at the key. + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moc")), }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - assert_eq!(result[0].key, hex_string(b":mock")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcd"))); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":m"), + result: StorageResultType::Value(hex_string(b"a")), + child_trie_key: None, + }) + ); - // Continue with pagination until no keys are returned. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mock")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; -} + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mo"), + result: StorageResultType::Value(hex_string(b"ab")), + child_trie_key: None, + }) + ); -#[tokio::test] -async fn archive_storage_discarded_items() { - // One query at a time - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, 1); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moD"), + result: StorageResultType::Value(hex_string(b"abcmoD")), + child_trie_key: None, + }) + ); - // Import a new block with storage changes. - let mut builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap(); - builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); - let block = builder.build().unwrap().block; - let block_hash = format!("{:?}", block.header.hash()); - client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moc"), + result: StorageResultType::Value(hex_string(b"abc")), + child_trie_key: None, + }) + ); - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![ - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - } - ] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 2); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mock"), + result: StorageResultType::Value(hex_string(b"abcd")), + child_trie_key: None, + }) + ); - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); } #[tokio::test] async fn archive_storage_diff_main_trie() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let mut builder = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -965,7 +859,7 @@ async fn archive_storage_diff_main_trie() { #[tokio::test] async fn archive_storage_diff_no_changes() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Build 2 identical blocks. let mut builder = BlockBuilderBuilder::new(&*client) @@ -1012,7 +906,7 @@ async fn archive_storage_diff_no_changes() { #[tokio::test] async fn archive_storage_diff_deleted_changes() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Blocks are imported as forks. let mut builder = BlockBuilderBuilder::new(&*client) @@ -1079,7 +973,7 @@ async fn archive_storage_diff_deleted_changes() { #[tokio::test] async fn archive_storage_diff_invalid_params() { let invalid_hash = hex_string(&INVALID_HASH); - let (_, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_, api) = setup_api(); // Invalid shape for parameters. let items: Vec> = Vec::new(); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index bd9863060910..de74145a3f08 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -235,7 +235,7 @@ pub struct OperationCallDone { pub output: String, } -/// The response of the `chainHead_call` method. +/// The response of the `chainHead_storage` method. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct OperationStorageItems { @@ -536,6 +536,7 @@ mod tests { items: vec![StorageResult { key: "0x1".into(), result: StorageResultType::Value("0x123".to_string()), + child_trie_key: None, }], }); diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs index 198a60bf4cac..44f722c0c61b 100644 --- a/substrate/client/rpc-spec-v2/src/common/events.rs +++ b/substrate/client/rpc-spec-v2/src/common/events.rs @@ -78,6 +78,10 @@ pub struct StorageResult { /// The result of the query. #[serde(flatten)] pub result: StorageResultType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, } /// The type of the storage query. @@ -105,23 +109,41 @@ pub struct StorageResultErr { /// The result of a storage call. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(untagged)] -pub enum ArchiveStorageResult { +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageEvent { /// Query generated a result. - Ok(ArchiveStorageMethodOk), + Storage(StorageResult), /// Query encountered an error. - Err(ArchiveStorageMethodErr), + StorageError(ArchiveStorageMethodErr), + /// Operation storage is done. + StorageDone, } -impl ArchiveStorageResult { - /// Create a new `ArchiveStorageResult::Ok` result. - pub fn ok(result: Vec, discarded_items: usize) -> Self { - Self::Ok(ArchiveStorageMethodOk { result, discarded_items }) +impl ArchiveStorageEvent { + /// Create a new `ArchiveStorageEvent::StorageErr` event. + pub fn err(error: String) -> Self { + Self::StorageError(ArchiveStorageMethodErr { error }) } - /// Create a new `ArchiveStorageResult::Err` result. - pub fn err(error: String) -> Self { - Self::Err(ArchiveStorageMethodErr { error }) + /// Create a new `ArchiveStorageEvent::StorageResult` event. + pub fn result(result: StorageResult) -> Self { + Self::Storage(result) + } + + /// Checks if the event is a `StorageDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDone) + } + + /// Checks if the event is a `StorageErr` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageError(_)) + } + + /// Checks if the event is a `StorageResult` event. + pub fn is_result(&self) -> bool { + matches!(self, Self::Storage(_)) } } @@ -354,8 +376,11 @@ mod tests { #[test] fn storage_result() { // Item with Value. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","value":"res"}"#; @@ -365,8 +390,11 @@ mod tests { assert_eq!(dec, item); // Item with Hash. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","hash":"res"}"#; @@ -379,6 +407,7 @@ mod tests { let item = StorageResult { key: "0x1".into(), result: StorageResultType::ClosestDescendantMerkleValue("res".into()), + child_trie_key: None, }; // Encode let ser = serde_json::to_string(&item).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs index 673e20b2bc78..a1e34d51530e 100644 --- a/substrate/client/rpc-spec-v2/src/common/storage.rs +++ b/substrate/client/rpc-spec-v2/src/common/storage.rs @@ -24,7 +24,7 @@ use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; use tokio::sync::mpsc; -use super::events::{StorageResult, StorageResultType}; +use super::events::{StorageQuery, StorageQueryType, StorageResult, StorageResultType}; use crate::hex_string; /// Call into the storage of blocks. @@ -70,9 +70,6 @@ pub enum IterQueryType { /// The result of making a query call. pub type QueryResult = Result, String>; -/// The result of iterating over keys. -pub type QueryIterResult = Result<(Vec, Option), String>; - impl Storage where Block: BlockT + 'static, @@ -97,6 +94,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Value(hex_string(&storage_data.0)), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -120,6 +118,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Hash(hex_string(&storage_data.as_ref())), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -149,6 +148,7 @@ where StorageResult { key: hex_string(&key.0), result: StorageResultType::ClosestDescendantMerkleValue(result), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), } })) }) @@ -199,56 +199,6 @@ where } } - /// Iterate over at most the provided number of keys. - /// - /// Returns the storage result with a potential next key to resume iteration. - pub fn query_iter_pagination( - &self, - query: QueryIter, - hash: Block::Hash, - child_key: Option<&ChildInfo>, - count: usize, - ) -> QueryIterResult { - let QueryIter { ty, query_key, pagination_start_key } = query; - - let mut keys_iter = if let Some(child_key) = child_key { - self.client.child_storage_keys( - hash, - child_key.to_owned(), - Some(&query_key), - pagination_start_key.as_ref(), - ) - } else { - self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref()) - } - .map_err(|err| err.to_string())?; - - let mut ret = Vec::with_capacity(count); - let mut next_pagination_key = None; - for _ in 0..count { - let Some(key) = keys_iter.next() else { break }; - - next_pagination_key = Some(key.clone()); - - let result = match ty { - IterQueryType::Value => self.query_value(hash, &key, child_key), - IterQueryType::Hash => self.query_hash(hash, &key, child_key), - }?; - - if let Some(value) = result { - ret.push(value); - } - } - - // Save the next key if any to continue the iteration. - let maybe_next_query = keys_iter.next().map(|_| QueryIter { - ty, - query_key, - pagination_start_key: next_pagination_key, - }); - Ok((ret, maybe_next_query)) - } - /// Raw iterator over the keys. pub fn raw_keys_iter( &self, @@ -264,3 +214,96 @@ where keys_iter.map_err(|err| err.to_string()) } } + +/// Generates storage events for `chainHead_storage` and `archive_storage` subscriptions. +pub struct StorageSubscriptionClient { + /// Storage client. + client: Storage, + _phandom: PhantomData<(BE, Block)>, +} + +impl Clone for StorageSubscriptionClient { + fn clone(&self) -> Self { + Self { client: self.client.clone(), _phandom: PhantomData } + } +} + +impl StorageSubscriptionClient { + /// Constructs a new [`StorageSubscriptionClient`]. + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client), _phandom: PhantomData } + } +} + +impl StorageSubscriptionClient +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: StorageProvider + Send + Sync + 'static, +{ + /// Generate storage events to the provided sender. + pub async fn generate_events( + &mut self, + hash: Block::Hash, + items: Vec>, + child_key: Option, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = self.clone(); + + tokio::task::spawn_blocking(move || { + for item in items { + match item.query_type { + StorageQueryType::Value => { + let rp = this.client.query_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::Hash => { + let rp = this.client.query_hash(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::ClosestDescendantMerkleValue => { + let rp = + this.client.query_merkle_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::DescendantsValues => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + StorageQueryType::DescendantsHashes => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + } + } + }) + .await?; + + Ok(()) + } +} diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 027a444012af..a47a05c0a190 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -756,8 +756,6 @@ where backend.clone(), genesis_hash, task_executor.clone(), - // Defaults to sensible limits for the `Archive`. - sc_rpc_spec_v2::archive::ArchiveConfig::default(), ) .into_rpc(); rpc_api.merge(archive_v2).map_err(|e| Error::Application(e.into()))?; From 1d519a1054d2edb8fc0b868eba6318fb3d448b33 Mon Sep 17 00:00:00 2001 From: Pavlo Khrystenko <45178695+pkhry@users.noreply.github.com> Date: Fri, 29 Nov 2024 16:24:58 +0100 Subject: [PATCH 41/68] Update scale-info to 2.11.6 (#6681) # Description Updates scale-info to from 2.11.5 2.11.6, so that generated code is annotated with `allow(deprecated)` Pre-requisite for https://github.com/paritytech/polkadot-sdk/pull/6312 --- Cargo.lock | 8 +- Cargo.toml | 2 +- prdoc/pr_6681.prdoc | 406 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 411 insertions(+), 5 deletions(-) create mode 100644 prdoc/pr_6681.prdoc diff --git a/Cargo.lock b/Cargo.lock index 5e4e9c267b08..1fe2d766f16a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23715,9 +23715,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.5" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aa7ffc1c0ef49b0452c6e2986abf2b07743320641ffd5fc63d552458e3b779b" +checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "bitvec", "cfg-if", @@ -23729,9 +23729,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.5" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46385cc24172cf615450267463f937c10072516359b3ff1cb24228a4a08bf951" +checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", diff --git a/Cargo.toml b/Cargo.toml index 964964908a9b..ecc385504181 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1197,7 +1197,7 @@ sc-tracing-proc-macro = { path = "substrate/client/tracing/proc-macro", default- sc-transaction-pool = { path = "substrate/client/transaction-pool", default-features = false } sc-transaction-pool-api = { path = "substrate/client/transaction-pool/api", default-features = false } sc-utils = { path = "substrate/client/utils", default-features = false } -scale-info = { version = "2.11.1", default-features = false } +scale-info = { version = "2.11.6", default-features = false } schemars = { version = "0.8.13", default-features = false } schnellru = { version = "0.2.3" } schnorrkel = { version = "0.11.4", default-features = false } diff --git a/prdoc/pr_6681.prdoc b/prdoc/pr_6681.prdoc new file mode 100644 index 000000000000..93a967d4a66c --- /dev/null +++ b/prdoc/pr_6681.prdoc @@ -0,0 +1,406 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: update scale-info to 2.11.6 + +doc: + - audience: Runtime Dev + description: | + Updates scale-info to 2.11.1 from 2.11.5. + Updated version of scale-info annotates generated code with `allow(deprecated)` + +crates: + - name: bridge-runtime-common + bump: none + - name: bp-header-chain + bump: none + - name: bp-runtime + bump: none + - name: frame-support + bump: none + - name: sp-core + bump: none + - name: sp-trie + bump: none + - name: sp-runtime + bump: none + - name: sp-application-crypto + bump: none + - name: sp-arithmetic + bump: none + - name: sp-weights + bump: none + - name: sp-api + bump: none + - name: sp-metadata-ir + bump: none + - name: sp-version + bump: none + - name: sp-inherents + bump: none + - name: frame-executive + bump: none + - name: frame-system + bump: none + - name: pallet-balances + bump: none + - name: frame-benchmarking + bump: none + - name: pallet-migrations + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-primitives-core + bump: none + - name: polkadot-core-primitives + bump: none + - name: polkadot-parachain-primitives + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-slots + bump: none + - name: sp-staking + bump: none + - name: staging-xcm + bump: none + - name: cumulus-primitives-parachain-inherent + bump: none + - name: pallet-message-queue + bump: none + - name: polkadot-runtime-common + bump: none + - name: frame-election-provider-support + bump: none + - name: sp-npos-elections + bump: none + - name: sp-consensus-grandpa + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-grandpa + bump: none + - name: sp-genesis-builder + bump: none + - name: sp-consensus-babe + bump: none + - name: sp-mixnet + bump: none + - name: sc-rpc-api + bump: none + - name: sp-session + bump: none + - name: sp-statement-store + bump: none + - name: sp-transaction-storage-proof + bump: none + - name: pallet-asset-rate + bump: none + - name: pallet-authorship + bump: none + - name: pallet-babe + bump: none + - name: pallet-session + bump: none + - name: pallet-timestamp + bump: none + - name: pallet-offences + bump: none + - name: pallet-staking + bump: none + - name: pallet-bags-list + bump: none + - name: pallet-broker + bump: none + - name: pallet-election-provider-multi-phase + bump: none + - name: pallet-fast-unstake + bump: none + - name: pallet-identity + bump: none + - name: pallet-transaction-payment + bump: none + - name: pallet-treasury + bump: none + - name: pallet-utility + bump: none + - name: pallet-collective + bump: none + - name: pallet-root-testing + bump: none + - name: pallet-vesting + bump: none + - name: polkadot-runtime-parachains + bump: none + - name: pallet-authority-discovery + bump: none + - name: pallet-mmr + bump: none + - name: sp-mmr-primitives + bump: none + - name: staging-xcm-executor + bump: none + - name: staging-xcm-builder + bump: none + - name: pallet-asset-conversion + bump: none + - name: pallet-assets + bump: none + - name: pallet-salary + bump: none + - name: pallet-ranked-collective + bump: none + - name: pallet-xcm + bump: none + - name: xcm-runtime-apis + bump: none + - name: pallet-grandpa + bump: none + - name: pallet-indices + bump: none + - name: pallet-sudo + bump: none + - name: sp-consensus-beefy + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + - name: cumulus-pallet-aura-ext + bump: none + - name: pallet-aura + bump: none + - name: sp-consensus-aura + bump: none + - name: pallet-collator-selection + bump: none + - name: pallet-glutton + bump: none + - name: staging-parachain-info + bump: none + - name: westend-runtime + bump: none + - name: frame-metadata-hash-extension + bump: none + - name: frame-system-benchmarking + bump: none + - name: pallet-beefy + bump: none + - name: pallet-beefy-mmr + bump: none + - name: pallet-conviction-voting + bump: none + - name: pallet-scheduler + bump: none + - name: pallet-preimage + bump: none + - name: pallet-delegated-staking + bump: none + - name: pallet-nomination-pools + bump: none + - name: pallet-democracy + bump: none + - name: pallet-elections-phragmen + bump: none + - name: pallet-membership + bump: none + - name: pallet-multisig + bump: none + - name: polkadot-sdk-frame + bump: none + - name: pallet-dev-mode + bump: none + - name: pallet-verify-signature + bump: none + - name: pallet-nomination-pools-benchmarking + bump: none + - name: pallet-offences-benchmarking + bump: none + - name: pallet-im-online + bump: none + - name: pallet-parameters + bump: none + - name: pallet-proxy + bump: none + - name: pallet-recovery + bump: none + - name: pallet-referenda + bump: none + - name: pallet-society + bump: none + - name: pallet-state-trie-migration + bump: none + - name: pallet-whitelist + bump: none + - name: pallet-xcm-benchmarks + bump: none + - name: rococo-runtime + bump: none + - name: pallet-bounties + bump: none + - name: pallet-child-bounties + bump: none + - name: pallet-nis + bump: none + - name: pallet-tips + bump: none + - name: parachains-common + bump: none + - name: pallet-asset-tx-payment + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none + - name: bp-xcm-bridge-hub-router + bump: none + - name: pallet-xcm-bridge-hub-router + bump: none + - name: assets-common + bump: none + - name: bp-messages + bump: none + - name: bp-parachains + bump: none + - name: bp-polkadot-core + bump: none + - name: bp-relayers + bump: none + - name: bp-xcm-bridge-hub + bump: none + - name: bridge-hub-common + bump: none + - name: snowbridge-core + bump: none + - name: snowbridge-beacon-primitives + bump: none + - name: snowbridge-ethereum + bump: none + - name: pallet-bridge-grandpa + bump: none + - name: pallet-bridge-messages + bump: none + - name: pallet-bridge-parachains + bump: none + - name: pallet-bridge-relayers + bump: none + - name: pallet-xcm-bridge-hub + bump: none + - name: cumulus-pallet-dmp-queue + bump: none + - name: cumulus-pallet-solo-to-para + bump: none + - name: cumulus-pallet-xcm + bump: none + - name: cumulus-ping + bump: none + - name: frame-benchmarking-pallet-pov + bump: none + - name: pallet-alliance + bump: none + - name: pallet-asset-conversion-ops + bump: none + - name: pallet-asset-conversion-tx-payment + bump: none + - name: pallet-assets-freezer + bump: none + - name: pallet-atomic-swap + bump: none + - name: pallet-collective-content + bump: none + - name: pallet-contracts + bump: none + - name: pallet-contracts-uapi + bump: none + - name: pallet-insecure-randomness-collective-flip + bump: none + - name: pallet-contracts-mock-network + bump: none + - name: xcm-simulator + bump: none + - name: pallet-core-fellowship + bump: none + - name: pallet-lottery + bump: none + - name: pallet-mixnet + bump: none + - name: pallet-nft-fractionalization + bump: none + - name: pallet-nfts + bump: none + - name: pallet-node-authorization + bump: none + - name: pallet-paged-list + bump: none + - name: pallet-remark + bump: none + - name: pallet-revive + bump: none + - name: pallet-revive-uapi + bump: none + - name: pallet-revive-eth-rpc + bump: none + - name: pallet-skip-feeless-payment + bump: none + - name: pallet-revive-mock-network + bump: none + - name: pallet-root-offences + bump: none + - name: pallet-safe-mode + bump: none + - name: pallet-scored-pool + bump: none + - name: pallet-statement + bump: none + - name: pallet-transaction-storage + bump: none + - name: pallet-tx-pause + bump: none + - name: pallet-uniques + bump: none + - name: snowbridge-outbound-queue-merkle-tree + bump: none + - name: snowbridge-pallet-ethereum-client + bump: none + - name: snowbridge-pallet-inbound-queue + bump: none + - name: snowbridge-router-primitives + bump: none + - name: snowbridge-pallet-outbound-queue + bump: none + - name: snowbridge-pallet-system + bump: none + - name: bp-asset-hub-rococo + bump: none + - name: bp-asset-hub-westend + bump: none + - name: bp-polkadot-bulletin + bump: none + - name: asset-hub-rococo-runtime + bump: none + - name: asset-hub-westend-runtime + bump: none + - name: bridge-hub-rococo-runtime + bump: none + - name: bridge-hub-westend-runtime + bump: none + - name: collectives-westend-runtime + bump: none + - name: coretime-rococo-runtime + bump: none + - name: coretime-westend-runtime + bump: none + - name: people-rococo-runtime + bump: none + - name: people-westend-runtime + bump: none + - name: penpal-runtime + bump: none + - name: contracts-rococo-runtime + bump: none + - name: glutton-westend-runtime + bump: none + - name: rococo-parachain-runtime + bump: none + - name: xcm-simulator-example + bump: none \ No newline at end of file From 5ad8780b653350050c6a854205de20c439aa7b65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexandre=20R=2E=20Bald=C3=A9?= Date: Fri, 29 Nov 2024 19:35:06 +0000 Subject: [PATCH 42/68] People chain integration tests (#6377) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Made as a follow-up of https://github.com/polkadot-fellows/runtimes/pull/499 ## Integration N/A ## Review Notes N/A --------- Co-authored-by: Dónal Murray --- Cargo.lock | 1 + .../tests/people/people-westend/Cargo.toml | 1 + .../people-westend/src/tests/governance.rs | 503 ++++++++++++++++++ .../people/people-westend/src/tests/mod.rs | 1 + 4 files changed, 506 insertions(+) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs diff --git a/Cargo.lock b/Cargo.lock index 1fe2d766f16a..a945d148e051 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16662,6 +16662,7 @@ dependencies = [ "sp-runtime 31.0.1", "staging-xcm 7.0.0", "staging-xcm-executor 7.0.0", + "westend-runtime", "westend-runtime-constants 7.0.0", "westend-system-emulated-network", ] diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index aa6eebc5458f..53acd038cdf5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -21,6 +21,7 @@ sp-runtime = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true, default-features = true } westend-runtime-constants = { workspace = true, default-features = true } +westend-runtime = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs new file mode 100644 index 000000000000..3dadcdd94870 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs @@ -0,0 +1,503 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::imports::*; +use frame_support::traits::ProcessMessageError; + +use codec::Encode; +use frame_support::sp_runtime::traits::Dispatchable; +use parachains_common::AccountId; +use people_westend_runtime::people::IdentityInfo; +use westend_runtime::governance::pallet_custom_origins::Origin::GeneralAdmin as GeneralAdminOrigin; +use westend_system_emulated_network::people_westend_emulated_chain::people_westend_runtime; + +use pallet_identity::Data; + +use emulated_integration_tests_common::accounts::{ALICE, BOB}; + +#[test] +fn relay_commands_add_registrar() { + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_registrar_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::RegistrarAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_registrar_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + let mut signed_origin = true; + + for (origin_kind, origin) in origins { + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_registrar_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + if signed_origin { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { error: ProcessMessageError::Unsupported, .. }) => {}, + ] + ); + } else { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + } + }); + + signed_origin = false; + } +} + +#[test] +fn relay_commands_kill_identity() { + // To kill an identity, first one must be set + PeopleWestend::execute_with(|| { + type PeopleRuntime = ::Runtime; + type PeopleRuntimeEvent = ::RuntimeEvent; + + let people_westend_alice = + ::RuntimeOrigin::signed(PeopleWestend::account_id_of(ALICE)); + + let identity_info = IdentityInfo { + email: Data::Raw(b"test@test.io".to_vec().try_into().unwrap()), + ..Default::default() + }; + let identity: Box<::IdentityInformation> = + Box::new(identity_info); + + assert_ok!(::Identity::set_identity( + people_westend_alice, + identity + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::IdentitySet { .. }) => {}, + ] + ); + }); + + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: kill_identity_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::IdentityKilled { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_kill_identity_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(BOB); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: kill_identity_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} + +#[test] +fn relay_commands_add_remove_username_authority() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + let people_westend_bob = PeopleWestend::account_id_of(BOB); + + let (origin_kind, origin, usr) = + (OriginKind::Superuser, ::RuntimeOrigin::root(), "rootusername"); + + // First, add a username authority. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_username_authority = + PeopleCall::Identity(pallet_identity::Call::::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_username_authority.encode().into() } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); + + // Now, use the previously added username authority to concede a username to an account. + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::set_username_for( + ::RuntimeOrigin::signed(people_westend_alice.clone()), + people_westend_runtime::MultiAddress::Id(people_westend_bob.clone()), + full_username, + None, + true + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameQueued { .. }) => {}, + ] + ); + }); + + // Accept the given username + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::accept_username( + ::RuntimeOrigin::signed(people_westend_bob.clone()), + full_username.try_into().unwrap(), + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameSet { .. }) => {}, + ] + ); + }); + + // Now, remove the username authority with another priviledged XCM call. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + let remove_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: remove_username_authority.encode().into() } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Final event check. + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityRemoved { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_remove_username_authority_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice.clone()), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_username_authority.encode().into() } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + let remove_authority_xcm_msg = + RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::SovereignAccount, + call: remove_username_authority.encode().into(), + } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs index 08749b295dc2..b9ad9e3db467 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs @@ -14,4 +14,5 @@ // limitations under the License. mod claim_assets; +mod governance; mod teleport; From 8eac4e887c827ea0bac8915901c305a05457a8d9 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 29 Nov 2024 23:28:34 +0200 Subject: [PATCH 43/68] network/libp2p-backend: Suppress warning adding already reserved node as reserved (#6703) Fixes https://github.com/paritytech/polkadot-sdk/issues/6598. --------- Co-authored-by: GitHub Action --- prdoc/pr_6703.prdoc | 7 +++++++ substrate/client/network/src/protocol_controller.rs | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_6703.prdoc diff --git a/prdoc/pr_6703.prdoc b/prdoc/pr_6703.prdoc new file mode 100644 index 000000000000..2dd0962a3eea --- /dev/null +++ b/prdoc/pr_6703.prdoc @@ -0,0 +1,7 @@ +title: 'network/libp2p-backend: Suppress warning adding already reserved node as reserved' +doc: +- audience: Node Dev + description: Fixes https://github.com/paritytech/polkadot-sdk/issues/6598. +crates: +- name: sc-network + bump: patch diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index af7adb50907f..11f5321294d0 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -464,7 +464,7 @@ impl ProtocolController { /// maintain connections with such peers. fn on_add_reserved_peer(&mut self, peer_id: PeerId) { if self.reserved_nodes.contains_key(&peer_id) { - warn!( + debug!( target: LOG_TARGET, "Trying to add an already reserved node {peer_id} as reserved on {:?}.", self.set_id, From 5e0bcb0ee9788b7bb16ccfbda4fdc153b24c6386 Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 30 Nov 2024 00:31:27 +0100 Subject: [PATCH 44/68] Let's be a bit less strict here. (#6662) This might actually happen in non malicious cases. Co-authored-by: eskimor --- polkadot/node/network/collator-protocol/src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/network/collator-protocol/src/error.rs b/polkadot/node/network/collator-protocol/src/error.rs index ae7f9a8c1fbc..598cdcf43900 100644 --- a/polkadot/node/network/collator-protocol/src/error.rs +++ b/polkadot/node/network/collator-protocol/src/error.rs @@ -122,7 +122,7 @@ impl SecondingError { PersistedValidationDataMismatch | CandidateHashMismatch | RelayParentMismatch | - Duplicate | ParentHeadDataMismatch | + ParentHeadDataMismatch | InvalidCoreIndex(_, _) | InvalidSessionIndex(_, _) | InvalidReceiptVersion(_) From d1fafa85fa1254af143b8e9b0ebf5d2731f8d91a Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Sun, 1 Dec 2024 17:30:09 +0100 Subject: [PATCH 45/68] [pallet-revive] eth-prc fix geth diff (#6608) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add a bunch of differential tests to ensure that responses from eth-rpc matches the one from `geth` - These [tests](https://github.com/paritytech/polkadot-sdk/blob/pg/fix-geth-diff/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts) are not run in CI for now but can be run locally with ```bash cd revive/rpc/examples/js bun test ``` * EVM RPC server will not fail gas_estimation if no gas is specified, I updated pallet-revive to add an extra `skip_transfer` boolean check to replicate this behavior in our pallet * `eth_transact` and `bare_eth_transact` api have been updated to use `GenericTransaction` directly as this is what is used by `eth_estimateGas` and `eth_call` ## TODO - [ ] Add tests the new `skip_transfer` flag --------- Co-authored-by: GitHub Action Co-authored-by: Alexander Theißen --- Cargo.lock | 1 + .../assets/asset-hub-westend/src/lib.rs | 30 +- prdoc/pr_6608.prdoc | 14 + substrate/bin/node/runtime/src/lib.rs | 25 +- substrate/frame/revive/Cargo.toml | 1 + .../frame/revive/mock-network/src/tests.rs | 4 +- substrate/frame/revive/rpc/Cargo.toml | 2 +- .../revive/rpc/examples/js/abi/errorTester.ts | 106 ++++++ .../revive/rpc/examples/js/abi/event.json | 34 -- .../frame/revive/rpc/examples/js/abi/event.ts | 34 ++ .../revive/rpc/examples/js/abi/piggyBank.json | 65 ---- .../piggyBank.ts} | 19 +- .../revive/rpc/examples/js/abi/revert.json | 14 - .../frame/revive/rpc/examples/js/bun.lockb | Bin 45391 -> 33662 bytes .../rpc/examples/js/contracts/.solhint.json | 3 + .../rpc/examples/js/contracts/ErrorTester.sol | 51 +++ .../rpc/examples/js/contracts/PiggyBank.sol | 8 +- .../frame/revive/rpc/examples/js/package.json | 41 ++- .../rpc/examples/js/pvm/errorTester.polkavm | Bin 0 -> 12890 bytes .../revive/rpc/examples/js/src/balance.ts | 8 + .../rpc/examples/js/src/build-contracts.ts | 27 +- .../frame/revive/rpc/examples/js/src/event.ts | 40 +- .../rpc/examples/js/src/geth-diff-setup.ts | 162 ++++++++ .../rpc/examples/js/src/geth-diff.test.ts | 245 +++++++++++++ .../frame/revive/rpc/examples/js/src/lib.ts | 126 ++++--- .../revive/rpc/examples/js/src/piggy-bank.ts | 81 +++- .../revive/rpc/examples/js/src/revert.ts | 10 - .../revive/rpc/examples/js/src/transfer.ts | 15 +- .../js/types/ethers-contracts/Event.ts | 117 ------ .../js/types/ethers-contracts/PiggyBank.ts | 96 ----- .../js/types/ethers-contracts/Revert.ts | 78 ---- .../js/types/ethers-contracts/common.ts | 100 ----- .../factories/Event__factory.ts | 51 --- .../factories/Revert__factory.ts | 31 -- .../types/ethers-contracts/factories/index.ts | 6 - .../js/types/ethers-contracts/index.ts | 10 - .../frame/revive/rpc/revive_chain.metadata | Bin 658056 -> 659977 bytes substrate/frame/revive/rpc/src/client.rs | 125 ++++--- substrate/frame/revive/rpc/src/lib.rs | 44 +-- .../frame/revive/rpc/src/rpc_methods_gen.rs | 1 + .../frame/revive/rpc/src/subxt_client.rs | 12 +- substrate/frame/revive/rpc/src/tests.rs | 3 +- .../frame/revive/src/benchmarking/mod.rs | 2 +- .../frame/revive/src/evm/api/rlp_codec.rs | 18 +- .../frame/revive/src/evm/api/rpc_types.rs | 148 ++++---- .../frame/revive/src/evm/api/rpc_types_gen.rs | 24 +- substrate/frame/revive/src/evm/runtime.rs | 345 ++++++++++-------- substrate/frame/revive/src/exec.rs | 73 +++- substrate/frame/revive/src/lib.rs | 215 +++++++---- substrate/frame/revive/src/primitives.rs | 45 ++- substrate/frame/revive/src/storage/meter.rs | 52 ++- .../frame/revive/src/test_utils/builder.rs | 11 +- substrate/frame/revive/src/tests.rs | 12 +- .../frame/revive/src/tests/test_debug.rs | 5 +- substrate/frame/revive/src/wasm/mod.rs | 11 +- 55 files changed, 1553 insertions(+), 1248 deletions(-) create mode 100644 prdoc/pr_6608.prdoc create mode 100644 substrate/frame/revive/rpc/examples/js/abi/errorTester.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/event.json create mode 100644 substrate/frame/revive/rpc/examples/js/abi/event.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/piggyBank.json rename substrate/frame/revive/rpc/examples/js/{types/ethers-contracts/factories/PiggyBank__factory.ts => abi/piggyBank.ts} (62%) delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/revert.json create mode 100644 substrate/frame/revive/rpc/examples/js/contracts/.solhint.json create mode 100644 substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol create mode 100644 substrate/frame/revive/rpc/examples/js/pvm/errorTester.polkavm create mode 100644 substrate/frame/revive/rpc/examples/js/src/balance.ts create mode 100644 substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts create mode 100644 substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/revert.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts diff --git a/Cargo.lock b/Cargo.lock index a945d148e051..bc2ebb2a057d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14633,6 +14633,7 @@ dependencies = [ "assert_matches", "bitflags 1.3.2", "derive_more 0.99.17", + "env_logger 0.11.3", "environmental", "ethereum-types 0.15.1", "frame-benchmarking 28.0.0", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f20b6b1fece0..98d647d868db 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -124,7 +124,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("westmint"), impl_name: alloc::borrow::Cow::Borrowed("westmint"), authoring_version: 1, - spec_version: 1_016_006, + spec_version: 1_016_008, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -2081,18 +2081,10 @@ impl_runtime_apis! { let account = ::AddressMapper::to_account_id(&address); System::account_nonce(account) } - fn eth_transact( - from: H160, - dest: Option, - value: U256, - input: Vec, - gas_limit: Option, - storage_deposit_limit: Option, - ) -> pallet_revive::EthContractResult + + fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> { - use pallet_revive::AddressMapper; - let blockweights = ::BlockWeights::get(); - let origin = ::AddressMapper::to_account_id(&from); + let blockweights: BlockWeights = ::BlockWeights::get(); let encoded_size = |pallet_call| { let call = RuntimeCall::Revive(pallet_call); @@ -2101,15 +2093,9 @@ impl_runtime_apis! { }; Revive::bare_eth_transact( - origin, - dest, - value, - input, - gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + tx, + blockweights.max_block, encoded_size, - pallet_revive::DebugInfo::UnsafeDebug, - pallet_revive::CollectEvents::UnsafeCollect, ) } @@ -2127,7 +2113,7 @@ impl_runtime_apis! { dest, value, gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), input_data, pallet_revive::DebugInfo::UnsafeDebug, pallet_revive::CollectEvents::UnsafeCollect, @@ -2149,7 +2135,7 @@ impl_runtime_apis! { RuntimeOrigin::signed(origin), value, gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), code, data, salt, diff --git a/prdoc/pr_6608.prdoc b/prdoc/pr_6608.prdoc new file mode 100644 index 000000000000..b9cd7008de47 --- /dev/null +++ b/prdoc/pr_6608.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-revive] eth-prc fix geth diff' +doc: +- audience: Runtime Dev + description: |- + * Add a bunch of differential tests to ensure that responses from eth-rpc matches the one from `geth` + * EVM RPC server will not fail gas_estimation if no gas is specified, I updated pallet-revive to add an extra `skip_transfer` boolean check to replicate this behavior in our pallet + * `eth_transact` and `bare_eth_transact` api have been updated to use `GenericTransaction` directly as this is what is used by `eth_estimateGas` and `eth_call` +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor +- name: asset-hub-westend-runtime + bump: minor diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index bff263548087..faffcd23fbcf 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -3218,18 +3218,9 @@ impl_runtime_apis! { System::account_nonce(account) } - fn eth_transact( - from: H160, - dest: Option, - value: U256, - input: Vec, - gas_limit: Option, - storage_deposit_limit: Option, - ) -> pallet_revive::EthContractResult + fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> { - use pallet_revive::AddressMapper; let blockweights: BlockWeights = ::BlockWeights::get(); - let origin = ::AddressMapper::to_account_id(&from); let encoded_size = |pallet_call| { let call = RuntimeCall::Revive(pallet_call); @@ -3238,15 +3229,9 @@ impl_runtime_apis! { }; Revive::bare_eth_transact( - origin, - dest, - value, - input, - gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + tx, + blockweights.max_block, encoded_size, - pallet_revive::DebugInfo::UnsafeDebug, - pallet_revive::CollectEvents::UnsafeCollect, ) } @@ -3263,7 +3248,7 @@ impl_runtime_apis! { dest, value, gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), input_data, pallet_revive::DebugInfo::UnsafeDebug, pallet_revive::CollectEvents::UnsafeCollect, @@ -3284,7 +3269,7 @@ impl_runtime_apis! { RuntimeOrigin::signed(origin), value, gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), code, data, salt, diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 677ef0e1367f..098a66df8dee 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -65,6 +65,7 @@ pallet-revive-fixtures = { workspace = true, default-features = true } secp256k1 = { workspace = true, features = ["recovery"] } serde_json = { workspace = true } hex-literal = { workspace = true } +env_logger = { workspace = true } # Polkadot SDK Dependencies pallet-balances = { workspace = true, default-features = true } diff --git a/substrate/frame/revive/mock-network/src/tests.rs b/substrate/frame/revive/mock-network/src/tests.rs index bd05726a1a45..34f797c2b530 100644 --- a/substrate/frame/revive/mock-network/src/tests.rs +++ b/substrate/frame/revive/mock-network/src/tests.rs @@ -24,7 +24,7 @@ use frame_support::traits::{fungibles::Mutate, Currency}; use frame_system::RawOrigin; use pallet_revive::{ test_utils::{self, builder::*}, - Code, + Code, DepositLimit, }; use pallet_revive_fixtures::compile_module; use pallet_revive_uapi::ReturnErrorCode; @@ -52,7 +52,7 @@ fn instantiate_test_contract(name: &str) -> Contract { RawOrigin::Signed(ALICE).into(), Code::Upload(wasm), ) - .storage_deposit_limit(1_000_000_000_000) + .storage_deposit_limit(DepositLimit::Balance(1_000_000_000_000)) .build_and_unwrap_contract() }); diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml index 9f89b74c668f..fe9cc82dd4d9 100644 --- a/substrate/frame/revive/rpc/Cargo.toml +++ b/substrate/frame/revive/rpc/Cargo.toml @@ -67,13 +67,13 @@ hex = { workspace = true } hex-literal = { workspace = true, optional = true } scale-info = { workspace = true } secp256k1 = { workspace = true, optional = true, features = ["recovery"] } -env_logger = { workspace = true } ethabi = { version = "18.0.0" } [features] example = ["hex-literal", "rlp", "secp256k1", "subxt-signer"] [dev-dependencies] +env_logger = { workspace = true } static_init = { workspace = true } hex-literal = { workspace = true } pallet-revive-fixtures = { workspace = true } diff --git a/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts new file mode 100644 index 000000000000..93daf34e02b6 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts @@ -0,0 +1,106 @@ +export const abi = [ + { + inputs: [ + { + internalType: 'string', + name: 'message', + type: 'string', + }, + ], + name: 'CustomError', + type: 'error', + }, + { + inputs: [ + { + internalType: 'bool', + name: 'newState', + type: 'bool', + }, + ], + name: 'setState', + outputs: [], + stateMutability: 'nonpayable', + type: 'function', + }, + { + inputs: [], + name: 'state', + outputs: [ + { + internalType: 'bool', + name: '', + type: 'bool', + }, + ], + stateMutability: 'view', + type: 'function', + }, + { + inputs: [], + name: 'triggerAssertError', + outputs: [], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [], + name: 'triggerCustomError', + outputs: [], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [], + name: 'triggerDivisionByZero', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256', + }, + ], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [], + name: 'triggerOutOfBoundsError', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256', + }, + ], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [], + name: 'triggerRequireError', + outputs: [], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [], + name: 'triggerRevertError', + outputs: [], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [ + { + internalType: 'uint256', + name: 'value', + type: 'uint256', + }, + ], + name: 'valueMatch', + outputs: [], + stateMutability: 'payable', + type: 'function', + }, +] as const diff --git a/substrate/frame/revive/rpc/examples/js/abi/event.json b/substrate/frame/revive/rpc/examples/js/abi/event.json deleted file mode 100644 index d36089fbc84e..000000000000 --- a/substrate/frame/revive/rpc/examples/js/abi/event.json +++ /dev/null @@ -1,34 +0,0 @@ -[ - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "sender", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "value", - "type": "uint256" - }, - { - "indexed": false, - "internalType": "string", - "name": "message", - "type": "string" - } - ], - "name": "ExampleEvent", - "type": "event" - }, - { - "inputs": [], - "name": "triggerEvent", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - } -] diff --git a/substrate/frame/revive/rpc/examples/js/abi/event.ts b/substrate/frame/revive/rpc/examples/js/abi/event.ts new file mode 100644 index 000000000000..c389e2daf1da --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/event.ts @@ -0,0 +1,34 @@ +export const abi = [ + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'address', + name: 'sender', + type: 'address', + }, + { + indexed: false, + internalType: 'uint256', + name: 'value', + type: 'uint256', + }, + { + indexed: false, + internalType: 'string', + name: 'message', + type: 'string', + }, + ], + name: 'ExampleEvent', + type: 'event', + }, + { + inputs: [], + name: 'triggerEvent', + outputs: [], + stateMutability: 'nonpayable', + type: 'function', + }, +] as const diff --git a/substrate/frame/revive/rpc/examples/js/abi/piggyBank.json b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.json deleted file mode 100644 index 2c2cfd5f7533..000000000000 --- a/substrate/frame/revive/rpc/examples/js/abi/piggyBank.json +++ /dev/null @@ -1,65 +0,0 @@ -[ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "deposit", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "payable", - "type": "function" - }, - { - "inputs": [], - "name": "getDeposit", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "owner", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "withdrawAmount", - "type": "uint256" - } - ], - "name": "withdraw", - "outputs": [ - { - "internalType": "uint256", - "name": "remainingBal", - "type": "uint256" - } - ], - "stateMutability": "nonpayable", - "type": "function" - } -] diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/PiggyBank__factory.ts b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts similarity index 62% rename from substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/PiggyBank__factory.ts rename to substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts index 0efea80ed2dc..3d44cd998ad1 100644 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/PiggyBank__factory.ts +++ b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts @@ -1,11 +1,4 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ - -import { Contract, Interface, type ContractRunner } from 'ethers' -import type { PiggyBank, PiggyBankInterface } from '../PiggyBank' - -const _abi = [ +export const abi = [ { inputs: [], stateMutability: 'nonpayable', @@ -70,13 +63,3 @@ const _abi = [ type: 'function', }, ] as const - -export class PiggyBank__factory { - static readonly abi = _abi - static createInterface(): PiggyBankInterface { - return new Interface(_abi) as PiggyBankInterface - } - static connect(address: string, runner?: ContractRunner | null): PiggyBank { - return new Contract(address, _abi, runner) as unknown as PiggyBank - } -} diff --git a/substrate/frame/revive/rpc/examples/js/abi/revert.json b/substrate/frame/revive/rpc/examples/js/abi/revert.json deleted file mode 100644 index be2945fcc0a5..000000000000 --- a/substrate/frame/revive/rpc/examples/js/abi/revert.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "doRevert", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - } -] diff --git a/substrate/frame/revive/rpc/examples/js/bun.lockb b/substrate/frame/revive/rpc/examples/js/bun.lockb index 700dca51da2ad3f843e890258b59c16fd4df6457..0ff3d54157db21a636e30076634343a24c812dca 100755 GIT binary patch delta 9083 zcmeG?X;@UpvgZr~3@FH^4u~MAfFQ##AUh)niZIIJ!ib8>GK5h!83aV*0Ad7{1S~O% zpc2<;Hkah8i3@5pafwT!*EL=f;~HENm*9U39E zSNC*xaq0&_$6JCr$)KX9gr5`ge;?ae+b?_Z>T92NPgpE(-Sy$9H=3Pp4H{e(a=(({ zQgXf0SvzmMuD~#vs@!~{i`A-J!lein3{yTskEiJI7{uMNzJz6%Ziq+d%W3KAqS*y1 zMy<&&(O?~yA8RuV9yC`$A=O^+2i ztSt{fq9+QtBDO2(bcOo8ucp2h;h>P|0 zV-feqvM*v+#5RbX5&w#v5I%?4NsGj1`ie%x)R7i(AH;=-Y2r~o&crkgNS8l%(9P!` zr%sL^(LC3GH_>Wj?&=*KjV(s|e(lw|es;}r@y@5BfP?!TFPlEE{BZBowl{~b*)Z?+ z#gWlTM~i#?J;ZbHyc6;jvyM&vt?%Yi^KmI#bC!AuH%)7G5~O@~Ol2yJeRJ2_VaIp0 zx3s@EO`iRZ`ISJ6+)|%SHEk-lG>;hghp~r8M`YyDss0=9^l>#>leE_Vr}LJTq#}{oS6B{34hwAeKrN`>VG$@;W(?1<@-|imVWknU4uOJxCJ@qFF32%~GDIs)U=O0>Ch#1Qi7AAL z<$^d(Lh=bhhJ`R!eXQ2XZDMfb+ z<4UY$pjjD~{5d5m>;eV5%^;+Y+;|Tz=XeP36KX8NO>h)Pjq9Y=c2YlgQbFkRahzNw zQX@I)SSMwJJD9F+Vkh-pCv~@zlJ(%_$~vioNR80xg5IK|;ybBjozx|yqB%JyYo1E) zq&9X^cXgCg7;6I|E^=cPI_D$^cL_BWYvW%her4Di!v+gb8tHRC86e zNTqSqeWX%3N`d?58yvMADHTWc7BkFLjwaesEOnd#X5Enyi462a z#sOEF7vS2nK8UsC6SOjjLtS`j(a#tH+S$;C46YQ~ySYS|6rv8O9?iie!nk;7>*eaP zVNMQrOsO|0wxFKAe&X;+2uXN;MtA3!J8_y0QM zkAi1`1Hs?qN6D|ZFK9Y&>!i~c)!*3kxjVq;$&QrN z@rs?ygCA0#{cwZ3We+^jm`q`2PIC_j7o z*4XKn1CFjs+~zdSwsg&+I@fL!8w#&UZJY}Vd%SgfQ{auW|M+C8&0UZ5yPH(ILi2mt z;c8)-jvgI|X`|+o@VtC|MDjl)9qzaNPy?zv9+#_)Q|49{REC$| zYjCqqsQL%n^GHLf`-rG|Z|S!mw7r=b*Y@B>?{aukBG$$yFC1R*ebuV^A1#51#eX+WIbz=Ftq1UGV@cgNL$=#-( zjYo73Ef{{ctmbj))rHNEqsKS8+MWV$AF=jPWy`y*3#-o#-7vbrcZ>6k%-bLD_&Qqj zFyfQQJ%cjc&$jgRQSDUGiO}_VXo)C;!&lvx|1Ldif-L z82e>n>NlOEoZ?j-N_v|)2?edLBMdA|gCIRwu%EB_;SYg<+wl2~1cUY@flYwXVVN9_;ZElk?b=DE@R=$QAfbkPpac%4bO?9ss)?xklJ z?EUo@|2@AsIBuwnYxm2@J6@Z9`EPz3?=7kOE%)@zvJCs$zo|FG@2+XM;gy{GJo3ec zXKR;xGikqzGoV(AwoAU2^5}T(7m{l~mIZCKl+~ps)btLMwmvzSd2xGLqjR37x2oy# z(Q^;Bw$zxfxHkHz#rAjP=T82(ckRzlPc#P&?V=qXHanAWdcyBs(YYJN;rohW8+J6S zwMlJXZ|jla8h0UMLRMYT*MU8D%@8Esw)eZg_{hBLfz2yt%(<8PbWT~xjX533Z`VW} zf^c6kv_&gM;B7X}Q7^z@;!puk|?84HyNX2M65b#O3ct(`Jh2@P!CqnnlX?T7$ zvaacp-${#jHO%E8zl`MvqXi{bA(@B~%AnzzZx5gxDA*1cg|@hcOD)9ol1*tOq2GRI-EMeat<9 zja9NV2lk_stUt`eduVK zRv|t_s?U;{*q_Iji7T(7^4%l%@RskuPkAiKOj|Q{x zN;V$SFi(Kp;}gvIO)(KJdMmX_{L~>9dS?2L6!)AksBE~`>W{`8$r{t^%`2In8$Ucd zcl5ofTRva%sYmc*N$QXHUnIQSx+#0_r5EvU*{jC7Ikit;{aJnA=to^#e@xeQ^Y$ek z6CU)^f{>o%7;jrgt}=^3J6?=fySEHrl{#=HIWc z|I#5jf0bwdpB_w3*x#k$v0WS9_VuA#->Ih!opq$tD^hTzE@97@ZQl(IT%^iQTVmIE zrD*-ZfYTbSd}~p*OOo_Uw*HVt{>}=uzma^oammbyUX3gsYIu8$>)Nne;*!u?Ek(o3 zw^6dJ4 zMbE6eW$U{rJicqY?;RRiF>P$vW>e*tH{$QyD)SYdaXtR!-6d|?Cb`>b=TOVgi&@v&D ztDKxH5?#Pw6udK14EwPeLo&0mzM_5OKJL;#N z&~^4ORV0$&xdiX5<*33qmK6ePb&;RVutdI)3Zy$A}m+Mt0!zBK`UyP@D7}wMRCl0n@#rS%)BprBUN{1I1$hT~ z33*Ep0vR{}VF&_w26>1Kfha3`7MGGjsDq@M-slmO&P{2Bhmo_|9{j-Leoz-g|dmXAO1 z&%MZ$No2kf|3E&Hz`xrHSSj{L=HX)v{L8L@4U+fnGYn(}lLvPLW)Jzeqnj+KX_3TV97J3_N_I=tS|pL6&Dz4nckou}vV0UrK2T|gXP z3A*X)Dj}$>ayVlU*UmdDem2MboS$-zk0o^^|$9pl++EzQ(L!kR%`QwaRv7VeH z!z@J!vRt<2qnFQnY*pK_Q_wmRTtQ99!JVc2^bosoDOVnw^8LuV`hu1V6`<4U{e%|D ztG<97&KYQVyy^?c;U?mB`yl5}?8CRlhsDzQD1?Be`KE=>N6z>-1*e=Z@m@6@Z#mL| z503HS3ho;jP2LeB$g30!P8mUUrA%dE@oJMF#ntMo!neYX>GmUIXVHS z*Axci^Tm#@Pq{CyWm=WvO`veTr(l{1tezhd@IRRsxhYsyiS7Ti3uvvn7r@7j_&AFl z8-_bQZs6q!h`AZsHB0kV&^Z~p0`3<$vCJe9{IEdg$VdA$H6uHAZWu6EXIWac*F_+G zOB}!l(Z2tFfAdz^-e4iFP;3CVXFklP-W|5t-z4{@fn#S5^YBwXFxUE1{bt+VG0zMF zeCRG<)QQhpmhLPwaN^A2#9LxVKGyf%`}3yX7Y#gX5GXKTx=ZGR5e5N1AbELy)(GF_ms$;+o9588Q0&M@ zFw=YeZq~!u?x{h*vKySnw)v3fS5M2G=dK?Q1_3@Oy1I{a&$o9nCK@=3Zs1fc4&cM6 z7QLhItp2F@s6l`av~C@tPAwej-elm^c7vR1u_GUuJ=0><_pryE1cN}K4Xmw}Ir8zx z&+U5Iq`tGVQpoyCWZXJ!w1NBBIUlf0YP^3xV(pm@g8&!PL`O0yI-8mE&94Fj=T{pD zs}Vc$LC~Owa}q4A7M?K(_}W52jW~dhn*KIrW?cBIWpWLaJ%8d z@O&bcESS4!7#vt6b(o9CELzD`K|@ZkrIsBJ^&&Sb z!F;isZ3O-2fyO6&Rvt%W6sim0QJtd?oh5v{IpnQBL`aI>D2vc!%qmp*q^k@3WL@ZB zIJwHfqC}ORHmfK*&$~c2Bmk_J_v!yi4L0{m*;`7xG*o=(0!htcyueq~xn9J#Em@R5gwYry(!Qy zzZ3=XOL26#V^SmBze}ic)kQ_BLcfT#%)+#+tl2)QJdJl=eqnA}j=GH0ha=6SoL>>e zW?m_K4_Ugb|BlL)<5>81Spb}G{GfZfI?snc62i7M&QP_)&WMhKX)9fzc~u{{y($Wx zHTT&urD-&~BW(2?8#uimALh@E{S?4O|C9$s(jW_4`TvvD%UhyyJQ*qP) delta 15944 zcmc(G2VB$1vv?8`AyT9lr6W~BuhN?!qF8_kNC^-i5Nc>P0-gmGMO_hy-@E&N-dmWR-I?9lnc4C!nQz}*TFVpK5-ao9I@cc* zPpLoS7!^}Itw%0A{M`#~XU}Nfum`0IS*E{kYi^(rsc>r%>ZLVc$ul(Twb1lmzm7xrKIK+=5n&Lr&1_cnTcF1 zFIm76a*OhEIk!N4Y{5myu;4U7R%SAf&rRYcRh_jT+TdXu!K=7iE!44J z){F5x!0J#3+bF7NCUuzbm5Qjv6TsNwOipGNFNs3o3KH^p{3ObDXs!YD)qru&Sz^o= z;|MYK0jvgm7GS)lw8WSp#&3X$>1|?sPK1Red&PumFO372$Ch%X<4B2qN8v*MB zE)~;b0UJTx9k3x_9l+4Hqz|NG{0K0RN{)%^n*rm2762XwcqU*x2TQL z$YV23`u&Z@C6*(EbGQjUuZokunT%lEkF}Y&Ys7&?KQj}U7QXr$>7dJE6Nf zJF>Z(vF<^a*||kC6f|fzir5pbkBxbI?y=IE4h_wR*HhFsTORLxa^?ETb$2eC&M5eD zw!yb@brEN9%*^XU3@2!fUwjBYjo?4s3P?eM!x+0~CUQ2l^Rt&!HET2(i-FNZ^dl{wb$!;5Z2^BqJ z1Tmdp8;yjil|ec3&a`?8>d^9(e+fiiAW8$KAY0iHNPm!wyek}aEBdxm1KAVU6!d10 zGcyEg?ogv6Tc$H}CDcIA5(e5S@65aiHGin}i)Jd)$<}X}&hjBpv%`EChz>nOYSsZ2 zs|nhINjnFL)cgWef01Mu)eFvcY@igWG_w#WSSu;CQ^8sOIMlFFSO&A-Eec=}))iAl zhUv>QWq=qAL{M45nb`ofAgF;kvd+w2sCh$8G|VGKrVRKQOOOKhIWzO1hOMO&T2#pp zY-|u1Bje1x4{WTmG~o<=8Ij7O#xP6F$w0v>i^f_BHLNlXJ5ug8)JRp4jkG167^W^9 z)(+5wikA8lQ>8Q!D1-)J5uCylHPFNqJAERds6 zB32)%CZiWqP?xkD!v-!C4!9K~2}(AICGEys4kg@PnvnApYTjs@jGK}X9AsWVhnc6~ zB_bNRN|h-C4BTPBm1}?^a=h&jXZedzvjjfwB%Wv36un|`mHu_E5n0H2(o~dC8lV6r zR12s?33UKEse~-#J!uSOlm^IK8Px)su8cZlJmt^8RU-gY1YIcody%H0f;!|qnF6>Z zi5$37)|pnPg3=T`X`fY4Eg&~l)B$LYDzX>?L#U#(A)bs=>J-X2lrhAOp#qoB5TX>< zU#jabed;gyX~L=}+AV@o5K+3(U((ScY25zOw*JxwqQp{jrw=8Y&FL>)g3>6FDsVj# zl_L8~8~aPopyW@qGuI|d8U3Z5{iP3Ni58@T($qW|*>D+*Mj2{uj0;eTMBCKe>G~u| z2&G8`^D2~Lh?2Ddg~B6BtD(dpN}r)5;zk%!C@BQ(2$Z-)$lov9BgAz{ zj1^#s`Bq{+#<;z;7{h@>BpVoCT=*--UV)Q~NRk)}at6Qyxf0wTW6XCG^D)N#+ySr$ z;LsqFmw0@FfE#!NVC(~clO)DRBpiwae>7%7`k!FjA1+&j;uCRaNJ6rJF&=mlrb3d$ zm>xz@MVJBQNB}qoDO@ofV{BQfxc(yydz*APiG^h0{|JNs$tn0xFy0AE#TNd5!FbSr zDhTU=7b%8+x_yOsTm6guORSlHv3*5O|L6M`w1+gXp9kvy+4dDr`j7VUf3$x^m#rS`B|NnLSf*SvH`~BN~6iVNaK6FYy)%(mB z#e}Bp_uJn#7uI)=bTM~)|C`|(*S>23d^6psr=#?w-*FB;8S+b4^h)>2c)P{5+ntXe zoV0$1=95&nkCRNFK5tmdeTaDD?Z(Afbj*yE zyO!MgOSwkq2e@=lv7LbNRf9})y&aLq+@0$u+?&g*tjsef$EuYwKml6plV$xT7o)T>|Ygc3M6-BRlmcHtU7b zsP>VK>tbU3f9_y6zug_GZ5Cwg`jOon6%*Ea-RGlAvA}ZE)|P<0JMWg8ubgsXERW@1 zog|?h-dtp&Hxx$({odE*zin5P@xJ)SuS^fjY}W9c@V4sfwJnzA2THb0J9MFQ<94l# zh-rS?rl_}aJGpiyZ1=D0=mF|SRXkuhVhz_6E~yzyG3n7gU~Z(a)#JHTaEkA^uIjvFFVE#(%kf>a zVn@>|-N|2M&VB24d$eZ9vtM6C@NG|oO3inhw^rfE@}<8tNN9&IK4hX7d>UVJ<&sIo zVd?Zz-v#TlVq(V|I2tn^H`z2Z*Tx@Ns~i4cQv9hGT6Lj24g~%hvtKy6>qw5%^2QIJ z;^#S?9@Jxq+CV$S4XTb6*Kf8}{J|T!sg`wM<}5+p?;Dk~8!pMW)~R&ePTw<=yEnvd z<4;j5OibLz)(sBe<@|B0UB_mKG-HcP%u@;N;GwEN(H-}POm|NcKG0rw>!xMXjXv`| z6Piob?|W}Jm(%RB;a#`1>HLenug+*4vhq;(SuL|{-&tmA%G3>Ix+P&pH)rm0Y(k5S zHHCJ!n}m0G!*AJWER0~cT;Ar;r7>A+;ft^8HM!KgR@-W(sBy+mj%jkD%nR21joDnL zs@(D6Q039FIl2C-PgX5EAfX++UhPlxBYRTobps|acC6SoZPH0$?VveECi}MXUn}fR zX0Barv9E2k-=LR^BDG|al%{@GiWs(dN$HC9-N*cj{YL-pSo=0j9eJ2&3Vj_DkKH)C zf04UQS;@vExA8<|aEY=8@}C5T+ux5Aa(Vn%x<5YZ?6R>6Hymv7w(~DRHGGe{qdXf`6u$q zGv+rRdYV>z)K+=L;wzuN?b_ir{ek)!l^XZ@nQA%#tMs?Ij=$tO%UdhCc-e*<(L>RCQ%zw{%GGH_AtyL@Y#jrO^gYbXYZT|ciyCfO z{;*W%X!MwSoj21net(&)L|TouLYZ{ueG>dZHRZg9?!ediV@=Z2- z=OQNh2HK4qvr}pgZRFQXx$TR%bB?D)+pD{%HM~w(CnYxD4EtoG-xDsi{jg_`(vt4JE1ACs z?HM=S(qr&)htA23t0lC97X$r?Uc02^h}O(q_ZI7ZTCl75%=+UKn)>o?ZW@|he`R@3 zpS9eSPhA%GXm7QeOCB~obogrj;_jNogO0~2Sm;kln%Ud=IuONJXbM%#nW66;EgWv< z-sP2)=+89TvuATAH|TN6`48Q}=wPDpOX;5bzRD|}xz((&ug*RdLcJzuB|oyXIYrG` z`E!Vegm&x7gS(D>cn*!ln4b@%uvMCum3BpPM}-XPVRf=$Pd+ ztqpmbt{<@NEqOWT+B5a_`|dHkEz4q)mMbeG`4O7J0418|*)I!v-ej#A%(=kOvEZrR zc~#L@;2PFy-em8!#x#x*@X#^2a-@9ri_@}?mMW|||83li$M5tXo(^PI8=FgLr`N9= zoG5v(n(oW4Y}?l0xG&J~sn%v^{qppMlfIN%e_5Nebf#;;iJ#9reAHv76|uob7`SoF zh1t~$Ts>u6kt0eA#%@39X&H8UsLqt|MWv zzkOxZmXr5-^nd;|N!UfDR~Ys@(^I=Wy0RuVTzXoSOkVvl7Sds93aJsV+k6YA-|XmI zc$H`Ey*BXa^htN$t~_1i@T#)#uA!0nh9klgV0JX70i7GC zHTu^sO&jD@TD4uioX@-IcH`@a=%UYW%7>s!pq=T#X%AErXzq>{zKAm!{9A~y>N#UYD|J~Pi|%iFJ0zI{0_nm*KvW^7=< zoHNJOOF}UD_6zGxM_*3!ioFsd_D8#y4}O2ewLn*YdWTD0%2ticOSZ!H%gePc$__U> z8+m)j>~)&U?C+lrp2a9S&mN)Pm_M)5VDN@x5`xLcZ;{}(vTsM+>&mUw&uGXT>L@;T zeEY~#J!Y$R%9cfZ@PGBtDO$Luc3t;`E78Sj>n^;B3>5YnNkn$rpxL zI6Z#JU73~s>$KKo$rl!X^0vQXIymYa6#Jr~Zs~ z%*H!6&n?u|xV(bVs=A-s_+B+ZK}PoOZ-2CXjUUIUqWMknY^}e(OhPXC<^#*72ke^K z=Q=0*ny)b1aq7>v&^qOFU#Htg#ti8xxZlg}U2olK;iobNUl+T^-8EmL!&)-klV84h$)!Wv=ROgt{>lhg z+oB@q);U=ozV2Z|($g*gHv1IA6(_XcSAwV@dU$%#?8Hn^V8M`Z&$FnQsjQM zq_$K-uDyg@A^l~f;J}^>Ng3zv?6?%BZ(J0VczJv3Y3kclV~=xlda@5$S4Ye<+41Uy z+S$$zx4h~EU3xi`&cO9X&3mT@v?WcFyiiH=FKFHtsoCJAK8DQj_7I=%v03AJkNI_8vB3o^tx=7rx9idqLJO@e&$4 zqDo^n)d{_U+!>8FVN+dDHRP`7Gvp&tjVYVzhNxz2sym8=+ygPq*+|n#9%Y!1p?aYM z=4`4rQnFxEeGm_FUvvy|KcqE+P4!1PkO!dl;q1s!{p!P1YCQgBd5;l;$LEC?e=NQ0 zsXNyA*0g%sq9=EZwceV>g}KcO2`!!2^6)CR?a}t-ucy|$3hIdn&>QruLRV|bt(T1w z`Uj$OFh&qEuw+w5qp6SwqgKerAafR*8iGn8AB*llJ`UMhv8kbGKIG%kW5_2US8FzP zB3cUhB-9Rh81k`UQ^QduCZin4Q_wlcQ;~r)o618| zAx}fCkf$Sa7dDlTN+HiccOcJ1wytby7Mc%vHhK(s4sspIrskrhkPA>d*pH*TTmDMJ;0t)wgB*Q@XVUxKe>IGP46@i)T^r0*@GWdRPwMEtf9A8|N}MF?0O zJ@%fod%T}F6~%iN;pXyW6ptzh8$7_qlK5W!40#YkkfFm_bj4eaCZ~cPcqgbALI?;J zi-#cNSPrNHD}t2oPN-w)C&O@syT>77*$&j*E&-zyiAF!l;P)e?!SMwk6{kW}7VeJU z>h=90N|=}l0LI`@7b4+0eo2K@!M`G5UXHkq>EwOCKup7Lr}f3~TRHI0Q7iD@>N2}yM-N{jGR!@pug0Yn2#0l>dm5SOvh*eL7(Y&bRwI|Vxs z-@;)BVk5E9*pb+Y*qOKub}$A!GkAuu^N1fQ@f6|da|0L&fGu?ez+T6*gJ&QBz#jlF zSswsz051SM-Qqp@hcA>dAA3m(0DF@PfW5;77z;2O052J`&mV{tsUZvj${2uPfFyuK zfCK;zKs-PkKrBEEz!ZRJfGB`SfXM)CfCzwafG~hb0C;gu02mJt3NQ|!0057k50D2S z0LaCL4;5V{NQla5FNFlZ+fA zMB74BNAem$#+Cuc7DU5F5rPTWax$C_ICfTcSQ;7sN(S5!970h9edM8!I3!QV02bC1 zY`_VeSwe*ELDZfi)zbr;2l9TvvA2S56f$m=43q+6#7@{-DphoB@D_cVx&i86*iDY$_bM5WFlTqL;~dNkSaqZZeLU44fo7 zxnjM^0Aw|E7 zivf-!xL(0k5iXu3WaQ~UCo)u=j7_CcVHD!P_li{rd#};o_G8~-@fCUcdhL_f#`_M&46I@!!5Os)&9YF-eLMME}!y+c5 z&Z~7k25&ezD;PL1j1^py3lxw|tR_vUfP!P~XcY>mI5vcKUIAT<)zl_~&dE4hVowk* zTCcb)PE(rBGt*Nh?tr`Ul0|ku`ZycwsVNG|pcU5cO zKm+pAgF$&$llYh4(FQnuI%r0crZ)Mlg3Igap$dxgS_c}q>!JpbO@7?a8(UfIuFe$TQg&6(`#b ztpW<(N3&fVFH(yX`fL`8n(S{t=aWsG;KIulBK7DTyb@ zvrgpmFj%Dt5S2eHbf7)Uy1&fA``*qfFJ&4bPhd^nPUZ<%iCGyLoXjK^pO?vHadJ}x zK;sGo+{`>4hc7_<(nP24DApiXvIPs@Y}#Yl3A{`eDOjuyddF8~!D$PB(4Y{$N8++F zIJxObS%sN|_TQ7ly|_P8OEm6Zv<2z-E)=Byh4mdFU%*M>3i5J!nJKK~tlSJv9(I?( zcN7@nyE@3WK|5y{ppASPFTy{B6lnHW(JXw!OSplPo6DKXO5h}>=W!DFTo#`*H7lPO zZKNat+atke!67M{qQb1)Bz#Rx6C`oD+4w1ho0)`_)sd(}Z;2WUZb7l4)&qT^!9azD zueyYja=A(QiClb-m;_TYh$SRxUB^{H$D9`+cC*tRps7gS=^k7G#kHcX^m+BcUmXEU zKkZ*UeIlKbd9bf@x!HV9B3E>4VU-BN_=zYhRoC=83P*qk{4)t=&>lNLPf`-JldO_X z`rB0i4+dDq57rr8AHY%ietp3GJzh2c&xvyutVn&kq_)-%+;Dazxf!Nv(c=9wv4hD{>09wEs0Xth_VxOlw zx!`vTz{Lp8Rk)4E(ldCOJUFisQv z63Z4Fq$42+8cEcMT?12M)juZi`^zxZ_}&>CoC`||j;3tv(k$W}@&6MII{t}5EMi+? zwvZ1zE%g zmK8A(D&(UZ!4-WVi^lLwl!=dIfhc;YA%!X6=OUnD1T@qz!_a|zK?G#7NL&gD`%L14Vpt(@@^lg|(UF+%`;z^BPEp+34&I!KGWW&@8bLJMcnd9!<2S{M~+ z^Xrjgnl`e`7^=oe%H<>{PqpS|3Rs!=O3LR=Q6GH?R}1NGm`fYEUM-rbccR7Mj#SN|7sAwr`7 diff --git a/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json b/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json new file mode 100644 index 000000000000..ce2220e0b756 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json @@ -0,0 +1,3 @@ +{ + "extends": "solhint:recommended" +} diff --git a/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol b/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol new file mode 100644 index 000000000000..f1fdd219624a --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract ErrorTester { + bool public state; + + // Payable function that can be used to test insufficient funds errors + function valueMatch(uint256 value) public payable { + require(msg.value == value , "msg.value does not match value"); + } + + function setState(bool newState) public { + state = newState; + } + + // Trigger a require statement failure with a custom error message + function triggerRequireError() public pure { + require(false, "This is a require error"); + } + + // Trigger an assert statement failure + function triggerAssertError() public pure { + assert(false); + } + + // Trigger a revert statement with a custom error message + function triggerRevertError() public pure { + revert("This is a revert error"); + } + + // Trigger a division by zero error + function triggerDivisionByZero() public pure returns (uint256) { + uint256 a = 1; + uint256 b = 0; + return a / b; + } + + // Trigger an out-of-bounds array access + function triggerOutOfBoundsError() public pure returns (uint256) { + uint256[] memory arr = new uint256[](1); + return arr[2]; + } + + // Trigger a custom error + error CustomError(string message); + + function triggerCustomError() public pure { + revert CustomError("This is a custom error"); + } +} + diff --git a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol index 1906c4658889..0c8a4d26f4dc 100644 --- a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol +++ b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol @@ -3,7 +3,7 @@ pragma solidity ^0.8.0; contract PiggyBank { - uint private balance; + uint256 private balance; address public owner; constructor() { @@ -11,16 +11,16 @@ contract PiggyBank { balance = 0; } - function deposit() public payable returns (uint) { + function deposit() public payable returns (uint256) { balance += msg.value; return balance; } - function getDeposit() public view returns (uint) { + function getDeposit() public view returns (uint256) { return balance; } - function withdraw(uint withdrawAmount) public returns (uint remainingBal) { + function withdraw(uint256 withdrawAmount) public returns (uint256 remainingBal) { require(msg.sender == owner); balance -= withdrawAmount; (bool success, ) = payable(msg.sender).call{value: withdrawAmount}(""); diff --git a/substrate/frame/revive/rpc/examples/js/package.json b/substrate/frame/revive/rpc/examples/js/package.json index 3ae1f0fbd799..6d8d00fd4214 100644 --- a/substrate/frame/revive/rpc/examples/js/package.json +++ b/substrate/frame/revive/rpc/examples/js/package.json @@ -1,22 +1,23 @@ { - "name": "demo", - "private": true, - "version": "0.0.0", - "type": "module", - "scripts": { - "dev": "vite", - "build": "tsc && vite build", - "preview": "vite preview", - "generate-types": "typechain --target=ethers-v6 'abi/*.json'" - }, - "dependencies": { - "@typechain/ethers-v6": "^0.5.1", - "ethers": "^6.13.4", - "solc": "^0.8.28", - "typechain": "^8.3.2" - }, - "devDependencies": { - "typescript": "^5.5.3", - "vite": "^5.4.8" - } + "name": "demo", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "dependencies": { + "ethers": "^6.13.4", + "solc": "^0.8.28", + "viem": "^2.21.47", + "@parity/revive": "^0.0.5" + }, + "devDependencies": { + "prettier": "^3.3.3", + "@types/bun": "^1.1.13", + "typescript": "^5.5.3", + "vite": "^5.4.8" + } } diff --git a/substrate/frame/revive/rpc/examples/js/pvm/errorTester.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/errorTester.polkavm new file mode 100644 index 0000000000000000000000000000000000000000..aebe24c4c0f597fb3d0171a9f527bc86d2d77b93 GIT binary patch literal 12890 zcmds73v?URnVvg$tb0cq$+9w*M$$m!Fl3xG(7>8vsJkmtQ>0*Wyw+^0a=c3tVr(ZN zq_$(rk3(r<;z!cfh?F=Xv?&QmoP;gqWE%o$PulL1@Yu8MS^5GBp}b$G910X@+U$2n zvh!##ER8g&lh0EKoX=-b0T3k{l?_RH5y>QvG-J3m&n^r8luG{my2mVRky}eo5E!`#k zm$Zb8$@j~@mEV#LT2^v)NvUTEyPj|4>(raQ8+^BETm2oShlBIVE)D&2s4H}H=)0kZ zLeGcjl&Mp`QZ9wh46lro8STa+h8F#7v^lya`b5khzbKxKf7iVHj62T=oO$rfOQ+_h z4o$sh>N8Vm;>wD*Dzek|PgBlHoOR(@4QKt)+JE*1(|>A5DsQWNu9D0+Z^ptI-7^Mf z+&1II8T~V#oVoc^cYLa$JJ1tUSgH3C#VbkV)590>AubP04Rsy)?1kv?Dr}16<&q@N z$mwB*?}nVhf|AS!066|@iX@jEp+S?D8#JuT<%v+)kx(a5n8H0l`L;0QlI$mvBHo}4 z#ynC+BGc(Cg-)lH;$ewLLOe|IFvLSwXqW{R$~?>yR8&vUbGa;+9i+h;T3$`VRdRU} zR2sqxODa6!3G&YzV=OEy9JHp>#gdH8zF|6DApUCNBP}Lb6(p<4kWPk8vQ{JO6f$6tL9X5F z)gDl_F;yF2+F4vYH?UH6J=R~Cn)lW}GuBH;vq3uQ$%-WD&62(p>2XL~9cix(RPxZE zbW94&Ftkw~nt`9{My^gXw7iNA+F}N(ix?NV7#9^W{?5>DR<$7-*jWURx!|!Pc$XpO zG$aQO7r_s>;0KD}hYd07AuVuE5xn08?=OPyGqnAJd_d))jpz&bL`TRc!1r-gGPJvV z=m=cSL-z^rTmyUvU0wv&8roi;U<_t(m}402e3dLQ#lNGxlrJYO;9O&1s5thpm}+Q1 zyxhgrL8HJ!?1&H`t?^Hc)p)rk$&C9HHd~?dXc;L01OEWvR54t$$#Wbcr_Phh3X&H* z>rbUjkzA2b7*SM33IQmWZTT||HWFH%B{b|1xqJbED2?xy{_4@fTM$04kU$z~^m-tZ zUJcUeRUo!r31aK@LSnr_cI~w7bJ9NhLVL`)P}P#m=~bzb^>WCteaK$q9Fj?-G2W$2 z?IrdAHR_xJ9}U-Q_cN!>r+uE$n4@8N1I+36DNr{^=LeEsS9qE4r-~v&uP|4M26DyC z>@_s(Yq#JiWU0%5NNNKrjit^_zkBm-KRS<`o4)hKTa>R*mi||KZ}G77aQqhQc3zr} z8vmHNLGf4A5PwBAS{2%Cv{`7CXck(6$dLpQY7v@Eh>;*9T17~#hLHFyLQIR0a5W*3 zN^)E|nweHPp&Y9MB$6dfqoeJsV^4g=;TF%$_oCtO;3ihr%rd=P-f;|4VC7hRC(-*k&#A!Aba8Di zi+2&dUFD0fQEF0gIn7WHNw%5+v&6!0^YgL^MUhOAL z^QjOBC+98o-pvZDkG=H{W7?mNuU=@%zH8qxy1TL1Y4W%CER(TVf5m3WUoncd9&H3| z9oisTKiUnF9JxVq#bQJv#tjmQu9rw`vqa+SBx3eUBs?mS$e{EdV(}kfSC(wi$quqx zBV!8L!(G-kg0;Mx%l*g9_1s#oW(HLG&@sM&TabX9ObsV~i0HkHx5?(+%(|OqIw{#5 zG}md`A(rW;^1frdonqJH`-wiH@@~!A?8$U_$PR;ynVQcRzl-S1$G`8fzVEXb)4Lhp z$3M5$ZgtkG_H6s1%t4>FM(C$~8HqN=HzuZTB+g|jw#?pU+Zl^#Gri6ZRlAxAa|ccC zRZG>M!&nIeX5RWXW8eGZNvvh*KSB-9QA*SMSL}YL3@Y_;s@s6~lc8?su-p9wo}4-G z2gYuD_e7p3QljJqc=E)viI0~X-&8*-+*n;&;KsD>w;21zJ126Z%vH9JQz&+E63&ul z(xQ=8g)HH6?@2efb-V_5a}%9vp>neH-l!wAavHL3;u18MGrX zx?Gwa>g`t8z%v|(3W4Sj^;sS}(k~W33>!d>?D@@XpAbp&48YCAC?BJnZ5sU9I zw0$hJyBLcj4?rNkgCUL$4De8wz(ByggN23##x6JD-YL8mjbsDOMT`~^goj!L#voU( zF|?Kfh7&kc#5n9?94=xYop3!LjnoHN5rexJyoiC6;fBH(b%D!@7%3McRm4En&|ko) z4P02nsCF@`ix^0GRu?dm0lSD%>0(qCF_1Q`@@So&4{A3eh*=H9tcYLDW=3KNUE@jf z9zlc{wdN>CNN0}0(^nIuE+mSORzMK-#_NUoizD_m9TTaPK+2jsIf*p_pn;=Edj=R| zrJnKJV%ANsF}CCPB$6U+^`t#XIOf$cnW4b~ArBG;;hwu>cQ!&PkH z60f$&b4t2&^6nu~G#Wojsy8GBQpMyU?Ph{$K_1Bic4a*hMP>3V)H_)k+wc$)O>GaS z!K|i;6c9m_NH1Fp>EJhCr?Dh8>u9)^>h(145F_=z>7fT{e?zk;c<$$Z$5`fdyn>9t zZK1pZ=gq^Mx~$gpLB1LTNJAWdtA?ZR0Pa4oU+E;FfiiqX0O$QG_8)? z@OD~enwGgngRUVQxBAeZ<+fsVcrw#G{A$UX%}6MTV3P*r$O0NtJi*zHL1K$3GN9-J z5^N#yWyGjAhnY1j)95Tr4tWSQsxvv67d|lU=xaPyAIDSQ%k@6QfR4UGq$ng_1@}zivvs&(+lQxaGsrVBYxDy?Vpi)9`5?J4 z5yZ&CZHs*>RyAIFO$e_sOV4>4T0Ft?55CISFJ41-O^nKh)``@+YA5=C_}WR9t6Uij3p|H@H2h>oV(3pq0`UOK=OB^*WMokgp zVgo@FEC!8c1KmZ89=8b6BQSPx8cC{s6G%>ACJ*frgl37p&@6#;KPNXRG*qYB29jUb zz#lignt{wGq!q}KVNF%ta!&!UNsJ^!-PQX# z?INM{kSDtf9?4%}BKI-SbhIFvhQ`qpGzo3;(8pi#D}+Fgqx}@^5wyc-52O7%+CdaC zWkW*g=a?KroD@HZs^vTqLQFK}1;j~+k79_K;)t0{#7u6SbW{=rEv!RZ>w7;ZLL&zx z8oo!O=3a@$?viM9k3^^JlxS$1MB`f|8I=*2_7+L;B|ut1d{Zb2)~mI*q?{SJ}zkm3Pw0rfklt**28*$XnwlJ5m!ynC_1S{V~lSqkzQ@eXjwWsd!Jp`9i}x&xxgLFF_goVbdQr{9)Z6 z4*J8IKg|7M#qXJy>isfw1TT1k&Vw%cZ(j6=6JSv2w67@K5<}1bmW%tn7mxc9%XO0l zJ&bl(ew9ld0-9sQSwUEC+1KmCp_xYGq6eBRyB|8122w{1ChKlh<6=c4&f ziydlx?gYyzV*39Z%lZ1VlUdHi_x@JQ?LVJA#jbsL%ee|e!8T7=&TFzRg{7xbhHPA_);3`}+nP#tzwZIj`$u;7AVNEUb{E=S zw4G>s(6*s%aU-{4^n@S|!4$$LUl_4mF@RcfsXOEBGw^bM)fme2%=^bsurer?c!J9g z9uaK&@26pt?3_pwJ3TnRJe>o!2;o??-6pj_$Gtd*APp9mH%Q5>8QIPoCHdWGZKeVaJ?S4WC#SfqQe=${; zJvEuB>M3>SHtngunyLEbk&{hT-<`h^t3QbDzfzF5(xotAs;;kd30x%zoM@^}4Dvdg z+ByoBs!SeSNOR&EN20Yo;b%@uYrDc1W6qugjw{`v^dqinEG9QMPDlC{8>*@$XR4c~_f zlS;$0NF>QOc&tt>vr)_3haywGTVw)GoqJ5oGWQ{2c6g3jt9>NY$nTab8}W2Us+)p_uiLy2a8_F#yWC8%3c{%gK|5e~8|ii*L8MZ?_cRE)<0k zy^WU?cDmr5MKH>isDTc!056smy`m%J6<`#dQ4ZcBs-%Ir#nJ|9D7XYNw+Kenahs@+ zAWNcSjdW+0?r`Y#dJ>*R_teqbQ*qjmOZW&?`4j=cj_ zOM7!>gQjg_{;8DqAJ_bG?vE=t%C}qM_7S9B;))0Aj1Z71j2{3S%VP^;eRpIlfxdSuFe}(V9Y!pmqT;p^64FI#IuhW^oCE44GuuAZvB9E=Vzv z9)tGkv@b}8G&0P|T7?W!%%g|XUWMkyIk!&w*{=odryqyEBOPRgLOMC-D{gY+f}~H! zeGVb3qKFkhSqf(iGmHBPS-pMy6k@!#k17~2E-qrAw!gK2k)4po2VIN@ix@a#K-H{= zPI`=y;}gQZ$$fiM@ofk@viNR!LYkMj;3Y*cq#0HA9?!(-1;iDz^A^DcaqT&t=+wFO zvy(48?PGSYtvS7#z0{uREcI$De9i%{z1FUAaA7Q-wfDNW2dKC`uvf)VhO+PM0U|?ehW_LYG|_p z+ew>I8mG-1+E1H>^`y-~SaaH}L#HV2*hL!Mi(`p~4W7h$Dd7%u{-cVDOuwN;*mpPQ zPuwDvxS)ub+G)=X{QQKZKQSu({1<{V&;Q!R7m5G*r+uxE_$M|=cewcbetq15ScGaag_>OD6ge)6Yc{AAR4WrdZm{3X-idV zCTL9P1ueI@xZkNvZ1!KDqHzNe35uJZ=lDbZ`%HBlFXUDTg zPY=Ir)&Mhbjm2w=eMP|z@g)I|P+a|60IgZEBsq>X3{&Hb<&-9lY{&L-4 z9`u)M{&Maw$EMGYcO7&!0`ebp6+t6W#TkWaaAytkmz1&xP`;flH;Jl>7<#*8W9Yro z#Lz37FmM9bcrp0Gj;mWh;JS{)wII^eo>&e-O^Lo2*sz4=^u(E@7`#H@-B z;PRF?Gw3A|gYUrYGPr7!@58~6Cv%@CvkS7BK-_P20-=)c!1+;0W>*P`SXh*`5}mVn z51zD*cEgr(iW^>?L1AuJ8nnkG!6YfK!F@TotagfoEXR}BDeZj(5s4?UOWOU2bVt9X z&b0J~)Y<9$aPT|fIlQ8Ogs1cCQSlSM`z-)At+Ysjk2Tj@EWyrrg7`F^YLyFyLp zNBXUFY7T$}kRA$cnv-`vwK&yUTWhJ&Ibxu>XQy{sGX#b^Zm`0#D$=2#_>|%EwZXx` zRNlJDMFG~tU-`6cRh)CcCc4CrhVw6@Jpo#t!$ z$ukvOpLZWZazIqjPnU~d?oJBc;*$uuTuuo_xSyLg70|I%3?SC)O*dTO&P)vSUZRO% z@h1cn61Q#r`usN0Beot5l)vi2#XqUMQb2CVZv$t!vPyh`4=EW#+-O+^VkdE0Ol4dF z1Oc%n7^LAQF|QEVihuR|NELSI_!M2?KV<_1+}#GOEIrcDFgGvuqoLSNn3|wc)Q5B) zR4z#bt#KCNdsV9TESDJGObNyoR*tb}s5j~OKc7(3i8NN&zYQxsMT$WYOoKT?}1i8fRf-*mjwgy>*{?hrx+#BHgDpx~@{`_lZX)SN_+EAl1o P|DsnIq4bp_3F&_TlmMih literal 0 HcmV?d00001 diff --git a/substrate/frame/revive/rpc/examples/js/src/balance.ts b/substrate/frame/revive/rpc/examples/js/src/balance.ts new file mode 100644 index 000000000000..1261dcab7812 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/balance.ts @@ -0,0 +1,8 @@ +import { walletClient } from './lib.ts' + +const recipient = '0x8D97689C9818892B700e27F316cc3E41e17fBeb9' +try { + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) +} catch (err) { + console.error(err) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts index c6b7700d1ccf..b25b5a7f2199 100644 --- a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts +++ b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts @@ -1,11 +1,23 @@ import { compile } from '@parity/revive' +import { format } from 'prettier' +import { parseArgs } from 'node:util' import solc from 'solc' import { readFileSync, writeFileSync } from 'fs' import { join } from 'path' type CompileInput = Parameters[0] -type CompileOutput = Awaited> -type Abi = CompileOutput['contracts'][string][string]['abi'] + +const { + values: { filter }, +} = parseArgs({ + args: process.argv.slice(2), + options: { + filter: { + type: 'string', + short: 'f', + }, + }, +}) function evmCompile(sources: CompileInput) { const input = { @@ -27,9 +39,9 @@ console.log('Compiling contracts...') const input = [ { file: 'Event.sol', contract: 'EventExample', keypath: 'event' }, - { file: 'Revert.sol', contract: 'RevertExample', keypath: 'revert' }, { file: 'PiggyBank.sol', contract: 'PiggyBank', keypath: 'piggyBank' }, -] + { file: 'ErrorTester.sol', contract: 'ErrorTester', keypath: 'errorTester' }, +].filter(({ keypath }) => !filter || keypath.includes(filter)) for (const { keypath, contract, file } of input) { const input = { @@ -41,7 +53,12 @@ for (const { keypath, contract, file } of input) { const out = JSON.parse(evmCompile(input)) const entry = out.contracts[file][contract] writeFileSync(join('evm', `${keypath}.bin`), Buffer.from(entry.evm.bytecode.object, 'hex')) - writeFileSync(join('abi', `${keypath}.json`), JSON.stringify(entry.abi, null, 2)) + writeFileSync( + join('abi', `${keypath}.ts`), + await format(`export const abi = ${JSON.stringify(entry.abi, null, 2)} as const`, { + parser: 'typescript', + }) + ) } { diff --git a/substrate/frame/revive/rpc/examples/js/src/event.ts b/substrate/frame/revive/rpc/examples/js/src/event.ts index 94cc2560272e..2e672a9772ff 100644 --- a/substrate/frame/revive/rpc/examples/js/src/event.ts +++ b/substrate/frame/revive/rpc/examples/js/src/event.ts @@ -1,15 +1,29 @@ //! Run with bun run script-event.ts -import { call, getContract, deploy } from './lib.ts' - -try { - const { abi, bytecode } = getContract('event') - const contract = await deploy(bytecode, abi) - const receipt = await call('triggerEvent', await contract.getAddress(), abi) - if (receipt) { - for (const log of receipt.logs) { - console.log('Event log:', JSON.stringify(log, null, 2)) - } - } -} catch (err) { - console.error(err) + +import { abi } from '../abi/event.ts' +import { assert, getByteCode, walletClient } from './lib.ts' + +const deployHash = await walletClient.deployContract({ + abi, + bytecode: getByteCode('event'), +}) +const deployReceipt = await walletClient.waitForTransactionReceipt({ hash: deployHash }) +const contractAddress = deployReceipt.contractAddress +console.log('Contract deployed:', contractAddress) +assert(contractAddress, 'Contract address should be set') + +const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'triggerEvent', +}) + +const hash = await walletClient.writeContract(request) +const receipt = await walletClient.waitForTransactionReceipt({ hash }) +console.log(`Receipt: ${receipt.status}`) +console.log(`Logs receipt: ${receipt.status}`) + +for (const log of receipt.logs) { + console.log('Event log:', log) } diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts new file mode 100644 index 000000000000..92b20473d165 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts @@ -0,0 +1,162 @@ +import { spawn, spawnSync, Subprocess } from 'bun' +import { join, resolve } from 'path' +import { readFileSync } from 'fs' +import { createWalletClient, defineChain, Hex, http, publicActions } from 'viem' +import { privateKeyToAccount } from 'viem/accounts' + +export function getByteCode(name: string, evm: boolean): Hex { + const bytecode = evm ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) + return `0x${Buffer.from(bytecode).toString('hex')}` +} + +export type JsonRpcError = { + code: number + message: string + data: Hex +} + +export function killProcessOnPort(port: number) { + // Check which process is using the specified port + const result = spawnSync(['lsof', '-ti', `:${port}`]) + const output = result.stdout.toString().trim() + + if (output) { + console.log(`Port ${port} is in use. Killing process...`) + const pids = output.split('\n') + + // Kill each process using the port + for (const pid of pids) { + spawnSync(['kill', '-9', pid]) + console.log(`Killed process with PID: ${pid}`) + } + } +} + +export let jsonRpcErrors: JsonRpcError[] = [] +export async function createEnv(name: 'geth' | 'kitchensink') { + const gethPort = process.env.GETH_PORT || '8546' + const kitchensinkPort = process.env.KITCHENSINK_PORT || '8545' + const url = `http://localhost:${name == 'geth' ? gethPort : kitchensinkPort}` + const chain = defineChain({ + id: name == 'geth' ? 1337 : 420420420, + name, + nativeCurrency: { + name: 'Westie', + symbol: 'WST', + decimals: 18, + }, + rpcUrls: { + default: { + http: [url], + }, + }, + testnet: true, + }) + + const transport = http(url, { + onFetchResponse: async (response) => { + const raw = await response.clone().json() + if (raw.error) { + jsonRpcErrors.push(raw.error as JsonRpcError) + } + }, + }) + + const wallet = createWalletClient({ + transport, + chain, + }) + + const [account] = await wallet.getAddresses() + const serverWallet = createWalletClient({ + account, + transport, + chain, + }).extend(publicActions) + + const accountWallet = createWalletClient({ + account: privateKeyToAccount( + '0xa872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f' + ), + transport, + chain, + }).extend(publicActions) + + return { serverWallet, accountWallet, evm: name == 'geth' } +} + +// wait for http request to return 200 +export function waitForHealth(url: string) { + return new Promise((resolve, reject) => { + const start = Date.now() + const interval = setInterval(() => { + fetch(url) + .then((res) => { + if (res.status === 200) { + clearInterval(interval) + resolve() + } + }) + .catch(() => { + const elapsed = Date.now() - start + if (elapsed > 30_000) { + clearInterval(interval) + reject(new Error('hit timeout')) + } + }) + }, 1000) + }) +} + +export const procs: Subprocess[] = [] +const polkadotSdkPath = resolve(__dirname, '../../../../../../..') +if (!process.env.USE_LIVE_SERVERS) { + procs.push( + // Run geth on port 8546 + // + (() => { + killProcessOnPort(8546) + return spawn( + 'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split( + ' ' + ), + { stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') } + ) + })(), + //Run the substate node + (() => { + killProcessOnPort(9944) + return spawn( + [ + './target/debug/substrate-node', + '--dev', + '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', + ], + { + stdout: Bun.file('/tmp/kitchensink.out.log'), + stderr: Bun.file('/tmp/kitchensink.err.log'), + cwd: polkadotSdkPath, + } + ) + })(), + // Run eth-rpc on 8545 + await (async () => { + killProcessOnPort(8545) + const proc = spawn( + [ + './target/debug/eth-rpc', + '--dev', + '--node-rpc-url=ws://localhost:9944', + '-l=rpc-metrics=debug,eth-rpc=debug', + ], + { + stdout: Bun.file('/tmp/eth-rpc.out.log'), + stderr: Bun.file('/tmp/eth-rpc.err.log'), + cwd: polkadotSdkPath, + } + ) + await waitForHealth('http://localhost:8545/health').catch() + return proc + })() + ) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts new file mode 100644 index 000000000000..468e7860bb9a --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts @@ -0,0 +1,245 @@ +import { jsonRpcErrors, procs, createEnv, getByteCode } from './geth-diff-setup.ts' +import { afterAll, afterEach, beforeAll, describe, expect, test } from 'bun:test' +import { encodeFunctionData, Hex, parseEther } from 'viem' +import { abi } from '../abi/errorTester' + +afterEach(() => { + jsonRpcErrors.length = 0 +}) + +afterAll(async () => { + procs.forEach((proc) => proc.kill()) +}) + +const envs = await Promise.all([createEnv('geth'), createEnv('kitchensink')]) + +for (const env of envs) { + describe(env.serverWallet.chain.name, () => { + let errorTesterAddr: Hex = '0x' + beforeAll(async () => { + const hash = await env.serverWallet.deployContract({ + abi, + bytecode: getByteCode('errorTester', env.evm), + }) + const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) + if (!deployReceipt.contractAddress) throw new Error('Contract address should be set') + errorTesterAddr = deployReceipt.contractAddress + }) + + test('triggerAssertError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi, + functionName: 'triggerAssertError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000001' + ) + expect(lastJsonRpcError?.message).toBe('execution reverted: assert(false)') + } + }) + + test('triggerRevertError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi, + functionName: 'triggerRevertError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.message).toBe('execution reverted: This is a revert error') + expect(lastJsonRpcError?.data).toBe( + '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120726576657274206572726f7200000000000000000000' + ) + } + }) + + test('triggerDivisionByZero', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi, + functionName: 'triggerDivisionByZero', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000012' + ) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: division or modulo by zero' + ) + } + }) + + test('triggerOutOfBoundsError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi, + functionName: 'triggerOutOfBoundsError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000032' + ) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: out-of-bounds access of an array or bytesN' + ) + } + }) + + test('triggerCustomError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi, + functionName: 'triggerCustomError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x8d6ea8be0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120637573746f6d206572726f7200000000000000000000' + ) + expect(lastJsonRpcError?.message).toBe('execution reverted') + } + }) + + test('eth_call (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.simulateContract({ + address: errorTesterAddr, + abi, + functionName: 'valueMatch', + value: parseEther('10'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_call transfer (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.sendTransaction({ + to: '0x75E480dB528101a381Ce68544611C169Ad7EB342', + value: parseEther('10'), + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.estimateContractGas({ + address: errorTesterAddr, + abi, + functionName: 'valueMatch', + value: parseEther('10'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (revert)', async () => { + expect.assertions(3) + try { + await env.serverWallet.estimateContractGas({ + address: errorTesterAddr, + abi, + functionName: 'valueMatch', + value: parseEther('11'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: msg.value does not match value' + ) + expect(lastJsonRpcError?.data).toBe( + '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e6d73672e76616c756520646f6573206e6f74206d617463682076616c75650000' + ) + } + }) + + test('eth_get_balance (no account)', async () => { + const balance = await env.serverWallet.getBalance({ + address: '0x0000000000000000000000000000000000000123', + }) + expect(balance).toBe(0n) + }) + + test('eth_estimate (not enough funds to cover gas specified)', async () => { + expect.assertions(4) + try { + let balance = await env.serverWallet.getBalance(env.accountWallet.account) + expect(balance).toBe(0n) + + await env.accountWallet.estimateContractGas({ + address: errorTesterAddr, + abi, + functionName: 'setState', + args: [true], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (no gas specified)', async () => { + let balance = await env.serverWallet.getBalance(env.accountWallet.account) + expect(balance).toBe(0n) + + const data = encodeFunctionData({ + abi, + functionName: 'setState', + args: [true], + }) + + await env.accountWallet.request({ + method: 'eth_estimateGas', + params: [ + { + data, + from: env.accountWallet.account.address, + to: errorTesterAddr, + }, + ], + }) + }) + }) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/lib.ts b/substrate/frame/revive/rpc/examples/js/src/lib.ts index 975d8faf15b3..e1f0e780d95b 100644 --- a/substrate/frame/revive/rpc/examples/js/src/lib.ts +++ b/substrate/frame/revive/rpc/examples/js/src/lib.ts @@ -1,22 +1,11 @@ -import { - Contract, - ContractFactory, - JsonRpcProvider, - TransactionReceipt, - TransactionResponse, - Wallet, -} from 'ethers' import { readFileSync } from 'node:fs' -import type { compile } from '@parity/revive' import { spawn } from 'node:child_process' import { parseArgs } from 'node:util' -import { BaseContract } from 'ethers' - -type CompileOutput = Awaited> -type Abi = CompileOutput['contracts'][string][string]['abi'] +import { createWalletClient, defineChain, Hex, http, parseEther, publicActions } from 'viem' +import { privateKeyToAccount } from 'viem/accounts' const { - values: { geth, westend, ['private-key']: privateKey }, + values: { geth, proxy, westend, endowment, ['private-key']: privateKey }, } = parseArgs({ args: process.argv.slice(2), options: { @@ -24,6 +13,13 @@ const { type: 'string', short: 'k', }, + endowment: { + type: 'string', + short: 'e', + }, + proxy: { + type: 'boolean', + }, geth: { type: 'boolean', }, @@ -42,7 +38,7 @@ if (geth) { '--http.api', 'web3,eth,debug,personal,net', '--http.port', - '8546', + process.env.GETH_PORT ?? '8546', '--dev', '--verbosity', '0', @@ -55,56 +51,78 @@ if (geth) { await new Promise((resolve) => setTimeout(resolve, 500)) } -export const provider = new JsonRpcProvider( - westend +const rpcUrl = proxy + ? 'http://localhost:8080' + : westend ? 'https://westend-asset-hub-eth-rpc.polkadot.io' : geth ? 'http://localhost:8546' : 'http://localhost:8545' -) -export const signer = privateKey ? new Wallet(privateKey, provider) : await provider.getSigner() -console.log(`Signer address: ${await signer.getAddress()}, Nonce: ${await signer.getNonce()}`) +export const chain = defineChain({ + id: geth ? 1337 : 420420420, + name: 'Asset Hub Westend', + network: 'asset-hub', + nativeCurrency: { + name: 'Westie', + symbol: 'WST', + decimals: 18, + }, + rpcUrls: { + default: { + http: [rpcUrl], + }, + }, + testnet: true, +}) + +const wallet = createWalletClient({ + transport: http(), + chain, +}) +const [account] = await wallet.getAddresses() +export const serverWalletClient = createWalletClient({ + account, + transport: http(), + chain, +}) + +export const walletClient = await (async () => { + if (privateKey) { + const account = privateKeyToAccount(`0x${privateKey}`) + console.log(`Wallet address ${account.address}`) + + const wallet = createWalletClient({ + account, + transport: http(), + chain, + }) + + if (endowment) { + await serverWalletClient.sendTransaction({ + to: account.address, + value: parseEther(endowment), + }) + console.log(`Endowed address ${account.address} with: ${endowment}`) + } + + return wallet.extend(publicActions) + } else { + return serverWalletClient.extend(publicActions) + } +})() /** * Get one of the pre-built contracts * @param name - the contract name */ -export function getContract(name: string): { abi: Abi; bytecode: string } { +export function getByteCode(name: string): Hex { const bytecode = geth ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) - const abi = JSON.parse(readFileSync(`abi/${name}.json`, 'utf8')) as Abi - return { abi, bytecode: Buffer.from(bytecode).toString('hex') } + return `0x${Buffer.from(bytecode).toString('hex')}` } -/** - * Deploy a contract - * @returns the contract address - **/ -export async function deploy(bytecode: string, abi: Abi, args: any[] = []): Promise { - console.log('Deploying contract with', args) - const contractFactory = new ContractFactory(abi, bytecode, signer) - - const contract = await contractFactory.deploy(args) - await contract.waitForDeployment() - const address = await contract.getAddress() - console.log(`Contract deployed: ${address}`) - - return contract -} - -/** - * Call a contract - **/ -export async function call( - method: string, - address: string, - abi: Abi, - args: any[] = [], - opts: { value?: bigint } = {} -): Promise { - console.log(`Calling ${method} at ${address} with`, args, opts) - const contract = new Contract(address, abi, signer) - const tx = (await contract[method](...args, opts)) as TransactionResponse - console.log('Call transaction hash:', tx.hash) - return tx.wait() +export function assert(condition: any, message: string): asserts condition { + if (!condition) { + throw new Error(message) + } } diff --git a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts index 7a8edbde3662..0040b0c78dc4 100644 --- a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts +++ b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts @@ -1,24 +1,69 @@ -import { provider, call, getContract, deploy } from './lib.ts' -import { parseEther } from 'ethers' -import { PiggyBank } from '../types/ethers-contracts/PiggyBank' +import { assert, getByteCode, walletClient } from './lib.ts' +import { abi } from '../abi/piggyBank.ts' +import { parseEther } from 'viem' -try { - const { abi, bytecode } = getContract('piggyBank') - const contract = (await deploy(bytecode, abi)) as PiggyBank - const address = await contract.getAddress() +const hash = await walletClient.deployContract({ + abi, + bytecode: getByteCode('piggyBank'), +}) +const deployReceipt = await walletClient.waitForTransactionReceipt({ hash }) +const contractAddress = deployReceipt.contractAddress +console.log('Contract deployed:', contractAddress) +assert(contractAddress, 'Contract address should be set') - let receipt = await call('deposit', address, abi, [], { - value: parseEther('10.0'), +// Deposit 10 WST +{ + const result = await walletClient.estimateContractGas({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'deposit', + value: parseEther('10'), }) - console.log('Deposit receipt:', receipt?.status) - console.log(`Contract balance: ${await provider.getBalance(address)}`) - console.log('deposit: ', await contract.getDeposit()) + console.log(`Gas estimate: ${result}`) - receipt = await call('withdraw', address, abi, [parseEther('5.0')]) - console.log('Withdraw receipt:', receipt?.status) - console.log(`Contract balance: ${await provider.getBalance(address)}`) - console.log('deposit: ', await contract.getDeposit()) -} catch (err) { - console.error(err) + const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'deposit', + value: parseEther('10'), + }) + + request.nonce = 0 + const hash = await walletClient.writeContract(request) + + const receipt = await walletClient.waitForTransactionReceipt({ hash }) + console.log(`Deposit receipt: ${receipt.status}`) + if (process.env.STOP) { + process.exit(0) + } +} + +// Withdraw 5 WST +{ + const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'withdraw', + args: [parseEther('5')], + }) + + const hash = await walletClient.writeContract(request) + const receipt = await walletClient.waitForTransactionReceipt({ hash }) + console.log(`Withdraw receipt: ${receipt.status}`) + + // Check remaining balance + const balance = await walletClient.readContract({ + address: contractAddress, + abi, + functionName: 'getDeposit', + }) + + console.log(`Get deposit: ${balance}`) + console.log( + `Get contract balance: ${await walletClient.getBalance({ address: contractAddress })}` + ) } diff --git a/substrate/frame/revive/rpc/examples/js/src/revert.ts b/substrate/frame/revive/rpc/examples/js/src/revert.ts deleted file mode 100644 index ea1bf4eceeb9..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/revert.ts +++ /dev/null @@ -1,10 +0,0 @@ -//! Run with bun run script-revert.ts -import { call, getContract, deploy } from './lib.ts' - -try { - const { abi, bytecode } = getContract('revert') - const contract = await deploy(bytecode, abi) - await call('doRevert', await contract.getAddress(), abi) -} catch (err) { - console.error(err) -} diff --git a/substrate/frame/revive/rpc/examples/js/src/transfer.ts b/substrate/frame/revive/rpc/examples/js/src/transfer.ts index ae2dd50f2af8..aef9a487b0c0 100644 --- a/substrate/frame/revive/rpc/examples/js/src/transfer.ts +++ b/substrate/frame/revive/rpc/examples/js/src/transfer.ts @@ -1,17 +1,18 @@ -import { parseEther } from 'ethers' -import { provider, signer } from './lib.ts' +import { parseEther } from 'viem' +import { walletClient } from './lib.ts' const recipient = '0x75E480dB528101a381Ce68544611C169Ad7EB342' try { - console.log(`Signer balance: ${await provider.getBalance(signer.address)}`) - console.log(`Recipient balance: ${await provider.getBalance(recipient)}`) - await signer.sendTransaction({ + console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) + + await walletClient.sendTransaction({ to: recipient, value: parseEther('1.0'), }) console.log(`Sent: ${parseEther('1.0')}`) - console.log(`Signer balance: ${await provider.getBalance(signer.address)}`) - console.log(`Recipient balance: ${await provider.getBalance(recipient)}`) + console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) } catch (err) { console.error(err) } diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts deleted file mode 100644 index d65f953969f0..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts +++ /dev/null @@ -1,117 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - BaseContract, - BigNumberish, - BytesLike, - FunctionFragment, - Result, - Interface, - EventFragment, - AddressLike, - ContractRunner, - ContractMethod, - Listener, -} from 'ethers' -import type { - TypedContractEvent, - TypedDeferredTopicFilter, - TypedEventLog, - TypedLogDescription, - TypedListener, - TypedContractMethod, -} from './common' - -export interface EventInterface extends Interface { - getFunction(nameOrSignature: 'triggerEvent'): FunctionFragment - - getEvent(nameOrSignatureOrTopic: 'ExampleEvent'): EventFragment - - encodeFunctionData(functionFragment: 'triggerEvent', values?: undefined): string - - decodeFunctionResult(functionFragment: 'triggerEvent', data: BytesLike): Result -} - -export namespace ExampleEventEvent { - export type InputTuple = [sender: AddressLike, value: BigNumberish, message: string] - export type OutputTuple = [sender: string, value: bigint, message: string] - export interface OutputObject { - sender: string - value: bigint - message: string - } - export type Event = TypedContractEvent - export type Filter = TypedDeferredTopicFilter - export type Log = TypedEventLog - export type LogDescription = TypedLogDescription -} - -export interface Event extends BaseContract { - connect(runner?: ContractRunner | null): Event - waitForDeployment(): Promise - - interface: EventInterface - - queryFilter( - event: TCEvent, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - queryFilter( - filter: TypedDeferredTopicFilter, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - - on( - event: TCEvent, - listener: TypedListener - ): Promise - on( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - once( - event: TCEvent, - listener: TypedListener - ): Promise - once( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - listeners( - event: TCEvent - ): Promise>> - listeners(eventName?: string): Promise> - removeAllListeners(event?: TCEvent): Promise - - triggerEvent: TypedContractMethod<[], [void], 'nonpayable'> - - getFunction(key: string | FunctionFragment): T - - getFunction(nameOrSignature: 'triggerEvent'): TypedContractMethod<[], [void], 'nonpayable'> - - getEvent( - key: 'ExampleEvent' - ): TypedContractEvent< - ExampleEventEvent.InputTuple, - ExampleEventEvent.OutputTuple, - ExampleEventEvent.OutputObject - > - - filters: { - 'ExampleEvent(address,uint256,string)': TypedContractEvent< - ExampleEventEvent.InputTuple, - ExampleEventEvent.OutputTuple, - ExampleEventEvent.OutputObject - > - ExampleEvent: TypedContractEvent< - ExampleEventEvent.InputTuple, - ExampleEventEvent.OutputTuple, - ExampleEventEvent.OutputObject - > - } -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts deleted file mode 100644 index ca137fcc8b30..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts +++ /dev/null @@ -1,96 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - BaseContract, - BigNumberish, - BytesLike, - FunctionFragment, - Result, - Interface, - ContractRunner, - ContractMethod, - Listener, -} from 'ethers' -import type { - TypedContractEvent, - TypedDeferredTopicFilter, - TypedEventLog, - TypedListener, - TypedContractMethod, -} from './common' - -export interface PiggyBankInterface extends Interface { - getFunction(nameOrSignature: 'deposit' | 'getDeposit' | 'owner' | 'withdraw'): FunctionFragment - - encodeFunctionData(functionFragment: 'deposit', values?: undefined): string - encodeFunctionData(functionFragment: 'getDeposit', values?: undefined): string - encodeFunctionData(functionFragment: 'owner', values?: undefined): string - encodeFunctionData(functionFragment: 'withdraw', values: [BigNumberish]): string - - decodeFunctionResult(functionFragment: 'deposit', data: BytesLike): Result - decodeFunctionResult(functionFragment: 'getDeposit', data: BytesLike): Result - decodeFunctionResult(functionFragment: 'owner', data: BytesLike): Result - decodeFunctionResult(functionFragment: 'withdraw', data: BytesLike): Result -} - -export interface PiggyBank extends BaseContract { - connect(runner?: ContractRunner | null): PiggyBank - waitForDeployment(): Promise - - interface: PiggyBankInterface - - queryFilter( - event: TCEvent, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - queryFilter( - filter: TypedDeferredTopicFilter, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - - on( - event: TCEvent, - listener: TypedListener - ): Promise - on( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - once( - event: TCEvent, - listener: TypedListener - ): Promise - once( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - listeners( - event: TCEvent - ): Promise>> - listeners(eventName?: string): Promise> - removeAllListeners(event?: TCEvent): Promise - - deposit: TypedContractMethod<[], [bigint], 'payable'> - - getDeposit: TypedContractMethod<[], [bigint], 'view'> - - owner: TypedContractMethod<[], [string], 'view'> - - withdraw: TypedContractMethod<[withdrawAmount: BigNumberish], [bigint], 'nonpayable'> - - getFunction(key: string | FunctionFragment): T - - getFunction(nameOrSignature: 'deposit'): TypedContractMethod<[], [bigint], 'payable'> - getFunction(nameOrSignature: 'getDeposit'): TypedContractMethod<[], [bigint], 'view'> - getFunction(nameOrSignature: 'owner'): TypedContractMethod<[], [string], 'view'> - getFunction( - nameOrSignature: 'withdraw' - ): TypedContractMethod<[withdrawAmount: BigNumberish], [bigint], 'nonpayable'> - - filters: {} -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts deleted file mode 100644 index ad6e23b38a65..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts +++ /dev/null @@ -1,78 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - BaseContract, - BytesLike, - FunctionFragment, - Result, - Interface, - ContractRunner, - ContractMethod, - Listener, -} from 'ethers' -import type { - TypedContractEvent, - TypedDeferredTopicFilter, - TypedEventLog, - TypedListener, - TypedContractMethod, -} from './common' - -export interface RevertInterface extends Interface { - getFunction(nameOrSignature: 'doRevert'): FunctionFragment - - encodeFunctionData(functionFragment: 'doRevert', values?: undefined): string - - decodeFunctionResult(functionFragment: 'doRevert', data: BytesLike): Result -} - -export interface Revert extends BaseContract { - connect(runner?: ContractRunner | null): Revert - waitForDeployment(): Promise - - interface: RevertInterface - - queryFilter( - event: TCEvent, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - queryFilter( - filter: TypedDeferredTopicFilter, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - - on( - event: TCEvent, - listener: TypedListener - ): Promise - on( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - once( - event: TCEvent, - listener: TypedListener - ): Promise - once( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - listeners( - event: TCEvent - ): Promise>> - listeners(eventName?: string): Promise> - removeAllListeners(event?: TCEvent): Promise - - doRevert: TypedContractMethod<[], [void], 'nonpayable'> - - getFunction(key: string | FunctionFragment): T - - getFunction(nameOrSignature: 'doRevert'): TypedContractMethod<[], [void], 'nonpayable'> - - filters: {} -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts deleted file mode 100644 index 247b9468ece2..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts +++ /dev/null @@ -1,100 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - FunctionFragment, - Typed, - EventFragment, - ContractTransaction, - ContractTransactionResponse, - DeferredTopicFilter, - EventLog, - TransactionRequest, - LogDescription, -} from 'ethers' - -export interface TypedDeferredTopicFilter<_TCEvent extends TypedContractEvent> - extends DeferredTopicFilter {} - -export interface TypedContractEvent< - InputTuple extends Array = any, - OutputTuple extends Array = any, - OutputObject = any, -> { - ( - ...args: Partial - ): TypedDeferredTopicFilter> - name: string - fragment: EventFragment - getFragment(...args: Partial): EventFragment -} - -type __TypechainAOutputTuple = T extends TypedContractEvent ? W : never -type __TypechainOutputObject = - T extends TypedContractEvent ? V : never - -export interface TypedEventLog extends Omit { - args: __TypechainAOutputTuple & __TypechainOutputObject -} - -export interface TypedLogDescription - extends Omit { - args: __TypechainAOutputTuple & __TypechainOutputObject -} - -export type TypedListener = ( - ...listenerArg: [...__TypechainAOutputTuple, TypedEventLog, ...undefined[]] -) => void - -export type MinEthersFactory = { - deploy(...a: ARGS[]): Promise -} - -export type GetContractTypeFromFactory = F extends MinEthersFactory ? C : never -export type GetARGsTypeFromFactory = - F extends MinEthersFactory ? Parameters : never - -export type StateMutability = 'nonpayable' | 'payable' | 'view' - -export type BaseOverrides = Omit -export type NonPayableOverrides = Omit -export type PayableOverrides = Omit -export type ViewOverrides = Omit -export type Overrides = S extends 'nonpayable' - ? NonPayableOverrides - : S extends 'payable' - ? PayableOverrides - : ViewOverrides - -export type PostfixOverrides, S extends StateMutability> = - | A - | [...A, Overrides] -export type ContractMethodArgs, S extends StateMutability> = PostfixOverrides< - { [I in keyof A]-?: A[I] | Typed }, - S -> - -export type DefaultReturnType = R extends Array ? R[0] : R - -// export interface ContractMethod = Array, R = any, D extends R | ContractTransactionResponse = R | ContractTransactionResponse> { -export interface TypedContractMethod< - A extends Array = Array, - R = any, - S extends StateMutability = 'payable', -> { - ( - ...args: ContractMethodArgs - ): S extends 'view' ? Promise> : Promise - - name: string - - fragment: FunctionFragment - - getFragment(...args: ContractMethodArgs): FunctionFragment - - populateTransaction(...args: ContractMethodArgs): Promise - staticCall(...args: ContractMethodArgs): Promise> - send(...args: ContractMethodArgs): Promise - estimateGas(...args: ContractMethodArgs): Promise - staticCallResult(...args: ContractMethodArgs): Promise -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts deleted file mode 100644 index 2e16b18a7ed8..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts +++ /dev/null @@ -1,51 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ - -import { Contract, Interface, type ContractRunner } from 'ethers' -import type { Event, EventInterface } from '../Event' - -const _abi = [ - { - anonymous: false, - inputs: [ - { - indexed: true, - internalType: 'address', - name: 'sender', - type: 'address', - }, - { - indexed: false, - internalType: 'uint256', - name: 'value', - type: 'uint256', - }, - { - indexed: false, - internalType: 'string', - name: 'message', - type: 'string', - }, - ], - name: 'ExampleEvent', - type: 'event', - }, - { - inputs: [], - name: 'triggerEvent', - outputs: [], - stateMutability: 'nonpayable', - type: 'function', - }, -] as const - -export class Event__factory { - static readonly abi = _abi - static createInterface(): EventInterface { - return new Interface(_abi) as EventInterface - } - static connect(address: string, runner?: ContractRunner | null): Event { - return new Contract(address, _abi, runner) as unknown as Event - } -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts deleted file mode 100644 index ece1c6b5426e..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts +++ /dev/null @@ -1,31 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ - -import { Contract, Interface, type ContractRunner } from 'ethers' -import type { Revert, RevertInterface } from '../Revert' - -const _abi = [ - { - inputs: [], - stateMutability: 'nonpayable', - type: 'constructor', - }, - { - inputs: [], - name: 'doRevert', - outputs: [], - stateMutability: 'nonpayable', - type: 'function', - }, -] as const - -export class Revert__factory { - static readonly abi = _abi - static createInterface(): RevertInterface { - return new Interface(_abi) as RevertInterface - } - static connect(address: string, runner?: ContractRunner | null): Revert { - return new Contract(address, _abi, runner) as unknown as Revert - } -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts deleted file mode 100644 index 67370dba411c..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts +++ /dev/null @@ -1,6 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -export { Event__factory } from './Event__factory' -export { PiggyBank__factory } from './PiggyBank__factory' -export { Revert__factory } from './Revert__factory' diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts deleted file mode 100644 index 3e324e80dcb1..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -export type { Event } from './Event' -export type { PiggyBank } from './PiggyBank' -export type { Revert } from './Revert' -export * as factories from './factories' -export { Event__factory } from './factories/Event__factory' -export { PiggyBank__factory } from './factories/PiggyBank__factory' -export { Revert__factory } from './factories/Revert__factory' diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata index 3560b3b90407acce7f602ce91ac089843be8dea8..64b1f2014dd06815fcea6a87bc96306eb00eda8b 100644 GIT binary patch delta 13838 zcmbt*4OmrG*6`Wq-h1}h_a6lX0llcGC?F^(D41xZSR|-ezKM8+i{6BL;r^&hkul{Y zjWikIip-KJGG}s(96MPtMrM;GCY6m;_%o$xrD~dt*uWbXI?LQ8 z&VbiW$n z&*w@|q3Uby7`ZDetvteL0ZAYGR)C2W!Y z!^31Gd>LnW{EkAhV1>(3;qsKYJbGN*rq!&&YPdIAn_sUv{j%3WNjj`E;CfLe37 zDud24hZj4%J{JNwJ&vW>Yux^T%M)-r%Wia4c>V6c(!_iMdCLu#>fd70#+Mud`(76bGLv zR6K2nQQ&DqWH<>`cD-tdB*wffkHcA9jFX10*9;3Ci?bZ7ya8A5^;Yg(s$!zs?o}aU&D(C+KB+0}~pUd5piJ$RBX7L|sLBOG{mzVpqmo zt)1^$g`)?m5L$SXMCG}B%iLvUg3!p|_--;(x!WxbXVF`bn%Kky{2^7LT_-fFPup3V zupM6YN#n#F44euXto-VeZYE+2t8coLG;uf6&*zl)FcQAdRh+S4njq}vm}YybC}1~| z#rIZ~SQ`r$+EhF!dWj4q`=Mr)6anU-G@TrP+@R#fM(sKrl%5D{XC!Tbv#hKtV}`TL ziBm$_?fiQ&>EO*k_-U1N3poPWtEF4TlMD)T=uq(#gX$bQ0*Z^;tAoLhSJ7_i`sYVKL~|J zMKUS-s-)Wu;w5dqo_spcd|3q+x*7hUs8hN_Bt|%}R@$lTyifW{gyD}#Nx&hE7Ne?$ z^+Jg9!hO;ik%eK07-1A5py>}%6^Z6e6ij|VS}MjE>&MR@skA;I4b_uah&ios^0V!VQ8uUzA2d#zRslNmue8lDb8b33E3}wIoZW zQ$P38K#~nZACbZN%)QlNBil131+h@;IUh2VNj zdYm|4YLr3VIOTQNSNA(v)x@Pe@_V{kSxbRA~cO!@(y|E-!4B29p|id$Tkw zYMqfR$np3CzMyJ!x4GQQR|Nd(LaouDhdm)ZPuB70nWPr%Tcl)C2Q#)v@nj=-w@9N& z1Hcw(n!eFUK7_~dGZ>$g>|_f>J}D)VCYbXi!qmasr?3%V*i+It5)La!sUADj5&IMNJvVIyug!h2K12w+cBlXCDWDK>=cfNx)r%3=PC(o`{>z{4*} zgAM6K*ki1Z@J7Ps36#O7Ez)Yzs)-df86VbWyk>=Me8qI=g#GZ>7bSn_0aa#;NSe^D zNzX2HaLiNty>OBbgRiI5 zp766q;jBhhweWN5^R$aSBrf+NfqmgxeAVk0kV{4)+i&%HTqqj*d~dl+FI+;3Xs?%g zIP>k6R2 zt~3lAJ{8T$xZ%ZjRe&GfmC~#(6N$RX=fsU=&&S*1$#{;$hkHc zgev&zuryYzHq{r_Mkq-g(f|>Smk*J-u15lOz0k0)*TSjoua#P)i(09LmJg)Mtd5Ve z&LnK)uLI$&52bvu!Bl_j!AK?Lh_slHM%eg~^dp*c?Z2ctH0LK|8UtxZrIGktcoc2A z8P*+@Qip8Ek>U$HK1_66nAMcXBs}0QUg-+#fDeyK(?|;p?3Ct^?XaX%T0C(Fm!&Lh zHwi6$_}$T;Uk-}=?$&g%M@jxzdQ3F6ss!3h7&28^gzfK<#erV3*bhUxq>Td)@Hy@` z3GFIQ2jX;SI7fPLI(y+90sS%Q9%CoRIbssJOpx?}7^cOn=Z{HaMB_=740_#DaO{Ni z9BG3GPD*3QS>=V3(h@y62e#7~S1!UoPD^p*qH^XmCIsY?%D#BnR3G#B5ctbJ$*eqe zMtV;q-QfL7`a&e;`e{!NQaoQv-Gmr`T#%xa_2(rA8NkdWKFjZS1?C5wfgmm%#=j1N z!{10r#KNB|BSXw2B}+}ku0WA+pUdg@dcvUYf^-83(QrPyfZjVydmeXD+H8uz^^C~~ zF8OHaxG0sOm8!%Sd@GID$C`yuXtL4hVIF6>D;CF?fyo{wneziaOr-=8?yo9e<}Hhb zec!5<-6qjwnEN0dK;o3l@1%tiN$6>lfq!d=qVk|r^EJ~>#o@(Hose#Z{FilMyXVqF zqL^uh7g|XuOw>^xXcl-SaY2rnhTm zk!CJexojhLp|aSz1gP9oH)phL%XqM50w)qGb#O@BJCFo_*J1|vnm zjsFW>Jv>Cv4sud^<`NG-Wk!f3l9Go>xhrs!3$4|M0s9mODG<+^>s#I%q)dyXLrmx+ zLPpbJux1o}3_S!sPvagUZw!5(T!s;G)H$@evBe~zPD zL}F30#?p~G(O{|H`dv7zO`v5~X5nJ9h#{h2v6${~|!+;NUbm0#o)2)95HMM}vl> z8|W+q+j;{XJvQIcJJJ?d`sF_bC?H%g1uAX?{QCyFXyT$iNQM283N<9&pu?*sf>YI7 z-1Quoi)+Ab5%7n=jilTnl=m=tVmduNveH6AX9j(%Tmp$NcYBKb!HNp6FL13}sO$k3 zXV5pM2Q9>LlY5P;Bxh2JKpbm|aIcJU=QrI|*Xzd3Y|v64^GHkt$3;7+vJkVIKUN`c z2D4CYseky_NToGMcj?gMS!U4}&FfUQg+?OOTA<@C{SY`ci_XWrLi%i)joxbQY&t=# z>-p5q`!>SB96B5Q+~OQ`rA=^m4jqY|d|{7i=vml}y%u(ZI)Xh_5*qPI#Mdo7NM+w#T19Y6!ui!U7Ht-# zPCRSr@1@UL`g&=5&t@|L^B>`y1uDlIBH*(^IvjHyX$jgzH{7}eC-z+tj`sKxIul*m zu_e?YUdAltUs%&pMjE0ZsfY&g!?Q&+7Jc2pBDCplmDW%gxRm}uBvzU0u%eKm`)J}=?GCIca`~JVxZp&EvL57l%jgVZfwroi zNzP?f&Fh6&`_D7P+JBzRifcH`iWBAki4j%-f7~GPL$uWlP03iEyZ0A8RrjRi(QR{! zDJ3yV-ksE?pAkQw7X%l0y|;K*3o-iVcm=IbjSvfV0k79l=3R{~FAq8^D$3lhlEjRx zGR)gbs$ig(j)$*3G>MFWXfIueg~x!GF4e{K8TB617D4Aib z)c&xZ=ES?Kz5Te`+Ru->ky+%~eHGhQOLO(*S6>6Q^lse7U9818R;f-$AGDgak(8m7R^|eI0-YaM zRoH=^nYH(t*#Qk~_G7ewwey)BunHZ@vyagmjhMrI^=EpQ*lC5B6ls+5(9?7bAzi8h zVB$6!0;XpAI60*}+f41e&Zay|)5ux%J7!=h+vqa%T(Aut*x#Z3De1DZ>p40Pgs%YkcMjvz0nqY;<*suFxbtcddxHUbm};2V)9 zMqc7F8+qB$Zegaaw{fN0 z`WaU`qM>nRA>2HdKUfx!G1tBy4+ydmR-U_3FWisB1)Q-Db3qL_8xjxG;qcCTbQGp; z7vH19BXezJc7bQ52lp?%6^4{^6a>~2bcooFfQ23OR?P64J5Uw{@NNekn_OV)8vttb z80pe*T8nTv^*^!DCg2YPK#e{PGqO?d(|H5)Z6qnjv%=-WBb*zZ0aS=f<7>zJbW)PL z&xFcty=PR;>GqvcxrUnh0eu1Xeu$H!AJXKcU>_n?oJjQ*u~hXUQjO0z)hc-XLmC6M zAJR#rnm;eWRBPA~bd2jD=Ln6BtL>wWn*Q3T(FoN69HAd$X65^c9*S$|gVop{t5L&h z1oyva2Ifu&kJ6#Ap$naG6Kw6Gi*aw*-9DtOVqRsS(hI-~xOszX1ql-ok>9P^W9l=Q{QynRVxSvHh zogVR$@I&=Boe+nN{65?Rn>47K01alwXU8y)317}8Ht6S9+il8#tQ z?7iJbckl7K`;BL?ho)4hoAe@|Yn6MI%j{ZJZgy64Ift-epC2wCKlp>MAM1v-=e;e~f{Q z9t_Fc51eDLvG#FpyX7TvLR{%0+lCTcpn8 zy0kNTfvqjNF33_2g2Q|H3F}po*98src<~6cKbLaAd%X!`d2<9;RGcQGwJ2p(fLy8= zyo3EI2@IErHd9{BvDo`XA`6y5(TVa#46zF#|F>15>V2)I_-qXPc#{k zlq8TOKi=%Rt`q8~+ z*4f|Zb)pI2%wp^SC+Vosv3(>F(O(kC2*N}0DLSt3e=rr5A3NuVvm+BZh&e!VA1h3? zqi0G->S`2EMVeP)Ub;$ONVS9gI2{t5ksVlZC1~e(O1=81o)c}{RolVxZ<-n#<-&a) zE*zYkog0Hd?08+q*;@?h8nw04dr`xr!Rr%H(>y-+m6{Nhi>8C>&>Jr=u}R$^V>Ah%4x$v_<`uwn(G7h@(ERxUdhnvp={~1Luui z4i6VxeGuIJ5!@Pr+b%;3FV8=HijKvN-632gx^k^^ARc|-jPb4b+{cv5A#^ zq>zH*3|?6i`~bKo4GL=1gK*+BeQj-3ADpD#I7$6*s`~)faD`nRp$5tRW(eM;m)F_r z?7}*H6l?85tzEtmuZWbF`?$9%cf&WQsR0~kFzVHDhC*S{8O(^|;O;XtD{*6=@z(bq zZhgPe8sMulG-qHV`l9`4KSSJdG3MjP+Q*fzWShSp5c!<>|2DJROgV{I&B~bS;4M z=jlY8%J6S!0*;dR4KlO`s=vW+A+Ylsge-*PyeS9U1#GI}h6}jL8lZwVjnKq9_dv%5 zMDK!Md2<#LF5I3VBBG&p||52zCOGrA|3^(_vU4}ouSHCMt@ zylH@UzQvu`S@;3LG5s0;9i58Bb}0J}`Lw{+?`W1j+CrX#*OO5QzkY|M$OIVnJ?=F# z;g#>{WaRMM?~$h(7=DRP5_enbo2=2W`Vx&oHXpr2CyQ;CtMUT#54bz+g6JRUWPP-i zJOR^hmm;9@2c((|8-Jh+u{KhlWRHed&rx&zBKsh$h16Htqu{0=@jg$XwSI>^9ParM z!GrMRk2D2q4Ili7ldlH-Pgv@!ff+yHTL<`mq7!h-{OnIS?N0dmCpra>9!6iL8(7X8 zTfZn3OoC(1_6{=e*PegFod+TWVtUyN+Y-sZQtjWf#3VfSqbk!-RCdtEf+M{WT95TXC$MnJ&C8kiW;Jfnpm9k?{?|W&E;KT_I zH=Z-DftDrm$RVFP)a^LG9jV?etKdnSH|Q^`f^U|{L)QxV#DPK9%}W>jK2*HJ6lD|0OU%~i0aj@2vN#VaBRUaG^f{CuzCSb?QFKff!c z-nPTrb9h}%y-4O5hL`yQj^*BfdOhtLt5NGe};}e#ARdO4;4=CL$V)a3LQpHR2vQMr20FxCe#}#yb!p#M&`F6y*2U@H2Xs}a;T__Hr~%ik5Q!i9(klZLwP@Zp;hv^MY>@h>ki)@_o^A`CAf|fA#DY*$Pq3bDmA6i1wR(UuUg5KOJ zpQU(->2J@;r%?e7+vQU9JHKp~XObPt^uNffMX7}oKMa*_IaKL*UjCV&$+rJZoA|G{5?6Ml)We)Be*lU`S0>U+!clI zmQU)tjd&ZUd$+t0FXAW#FUj{2JiY5wMo@PMjd*{dUDBSUH{20ZK=gS|< z>GpQxZbTJGw1LzS0p&O36I5v{q<68+3Qmz-# z<2HXLuSAiBd@fJLg`NAkJQH z`YsT?Xg8SQx$kkxco_4&e5)?P91WkIlP~LHur8bb3k_4wotGamU`V+4dwG=je}Nt; zD!2V4dvxgQ-}+hR&iVAuayhQ<1;5A{23*w}2@4QiruhYKHt~?6V_DbUp3$+Fq%1Rv z^m*tAkt6F9f1&VuJ&VL8|D>Kx;WhP61H*Gp<-a6`J^=Mf@oRzdHDw#aQLpzn*espP zyp>ZQ0~Z`DgSa6fhLs}9qcQ9v*XAEavl7(iSEHE&ZJ;udHK0=Icvc0GaqL+`H6I_Q zCb3lbIgW*4YzP_4(y=&{l*~p^vnYzS<~mrC%u-O(JI1o7br=it6WK6Sv_?!pV&l?7F*IMJcRYy=K^-H#VGxnb2H{CP5*?>IU~car2=&Q0%_DFqnSDgMl;#O+v;ohi z+9$JQRQ#`#*>v=5)6Z%_@>~j7GyGq^3N$OR^;*ed`{1x5L& zVkT{7HPyyAkVHwLcth3ZwG{+C2#cVU#uDnpp8bq=iZYo6y?tz+8)Ilq> zl(KtpB7GhD&2F{|W&ehoJ%nB=|4#NhUAv_N4&BL4(Kd{`vWvjnKcR;?ypoklM>vmf z^Nuhjzl=SN<#XjyIXl1=v)9kIp(!p8u)FY3PWe253dRGumsYW5I$UzaRqRF`O6Vk_Fxz9_n$EW-z3~ zclR-m84rh$(L%y+oj@?hnm7{em-GKHnpq_1w;l4_r!;6L;VND^zrt0#P^(XM zTkUCT--0zgJ;Y{DQhRs_%ol>ta~2ewEEbSN!R#Xzi&G_8Qxz zPq*neKu9Ya1pj`GjU$~i5BDO~RlJmMYh zfTI|*PCWF*^b*CX{yj#NR`pZZd+bR)uIlGLU^)?3)$>Q#TKz_w{!OU-h>d`GAE6O$ zgz}Hr!x)4A^AStZ=^8cuy&`E-nm%P42nMLcW9$W7g~yMv?Y2FdfYR)KXIUVv6<+>~ z&4H80S(bRf1|6TXyJ71ICXo(h+X)_bIwAVs>?GPhN48(;ja-8<7_o1Y^(bP4J=qQg zpR?bgzdQUnyI;(=at- z$}bT=Upf6HE8t!t;~e{gs*nBW*qvf{xjh3*Wkvi-8sC_Ey``utnyPXL=6V}{V$B#5CtqfhFcx|)vpX@t>AcL|vaYj-szmGT z8||UrL)I^Bi1N?>hbC$2^+n121+5dq;1Atwh8}n8FT=h6VbQSYKWqd_xcWcrHqxvd o`43yCM>D@eXKW&Sly05zBHr^*zA+dv9<(WQnQ;=)?Y9g64VE!@asU7T delta 11763 zcmb_?eO#1P_V{z3nYs75^NxTF0yd+f;47#osHiAtn5g)=Zjp>I>Ll+BDw!FT-^9W+ z!jq;c-_|uMw^-L6*#I5pw%{xJ2F z7|2~~^4tZkB~A~d9wWUEY)+SpV2MW;avOjCLPwf+Z=Uepk^a_-wGwer+}@8kjB zk+sRN+{r>j-3q84EG9mqi4tW^lxFT-6<#hJp?$1B(ZP+m+7XafqoH!-JneH-!6bjQ zSo_(hBotm5tlObkK;}bVqb?Rc9-`}I&vs=mbrslGI19r_VV1LSh24{DU+S_u^YXIX zuFRoe7^({k^sI2%^PB}9d+sv3Qdein>xb&btO*JvgYD^g&f=_GXJ-01`+OXpJ9Ex3 z_~1`E2c79!?#w8bzyFgi#73;LwpJHHESU*8c4tOLZc&a0u{j~~UERXq^aT5=T#rlX z@E`%M9D9K)J9m{UvwUl=Lr(seuJsN1$(3|658KKGL9E~c{sgTg)N6$8YHb&f6?VZl z9y)|n%7!AELBzcr9Cx%)qpG-e9!?hab7EWQ%9t>Jq99Z$Kt^kfC}1~Y#CElc=ODKU z2RV?maFvt!t=17V3m|*U%Z_QC=Q4JqR4dbUHZ> z?qWJiJjtQt0oGSM!=ds4)*tQ^(}#c8*-u9V;kcL3f%QH@8@do$uTo|GR%lSj#MVjg zn!VQ2$%t58bLCn(#lKOV7$cq+n!F&mi`2C=uzcbqE}vUVojTGClS^o#p+y}RTDi~) z&zI1L#5N8sCG;(^otKkYq43cnW|U9NWB(FKhZlnARc3D#bo$@T;2eiUlU^{vq*59R zO&e(%@l)!LX#({|NGPR!;JjXB@;9Z_t|h_SHq%5gOb-wD5&J;;W?D`haBDN&M#ACQ z=V>O1gd5M(7%@_>$Sx30B+~(qzJ&&e!}R479}JeC-$E~nBpM3KX{i{a&WeKJFQPEw zV9Ja1I*Etq3L1*VtO`1xB*3c`bTCPhKdGQ|H6&SfY^VK+n4$tsA*oQhgYF;;z`Tow zz@(SyZL%1~?4&8;;!XlG*+4kFlNtik^dxr5`~sIN#hEwZfuZ(=s77gUekTnIav~E1 z<)7%%3;09L9BmWa3Rahx?OQ7!ZoGmV`eGLiB1Ld*7rl=ZgYgyG2V3gM)K}=6*oF0} zWF3s&O`}N(EZa?ok_}M4n+_tS@Q>YeqP9#=zJPae7>4hmRm;?w%K_ecW#bWJD)V` ziDaFXo8v-WSm)(tyRVh}f={Hu(Gyoo4IM zlf*d%xvSinu7U|uoLSDC3>WH3hj(AeHFUO-7!>y-SUdEBPX1F3O=5(@mBTcZm|#L3 zjf^xIy3*xm=s}ksVnDk5U~?TEM*`t^9gQKuaJP?>#yai^TV6Hjh?- zA`L!};kM@G1BXqnzUR?x1)3%L!;Ea*dSO*RHiG-Sqy#kEC|MYg66##^6Sxn7Aw>y zk~BC~kGK{?pHFD4d9i_nPAhP`axx3iD&_P~Xu6iTpzbpo7wGP?CAUJoPA6o8<#Re% z%s0TKYB5Nj`#E|Wv`K|$@x?x#m2`o_zAiemuBU9)A!3xxIyiZfw(}BYnPLN)4xxUp(8^RKu1aOUxZhBI{e?bq z-q&=UXsA}P95mnwsO~e5Z<^*Bg94n9KIsj~;Eg(3}x`dc&@ zi_5q0a0@mnhuahqCOg~cLP{LoQVdu7qhPYgq`*kyJu8jGMZ_~2VVImIvTsE(+PLkQ zCIA-cSSg8tCLP;jj8o{4tno%6-iM`@GO2e$7c2=1mKq`?86oOJO%N1+LJe{UWtBus z_QLf3Z!RLmI~JH~^tz7)T~MaN5Y86y1q$X=qp%pBeYB$WWvdw6Z$`rV5 z#Vr>r^&R$g>O|`?ClN~2H5!Ob*r3*IZwjTx^5^S<;qOmUqr9XSi_nT?MkraX?F;*Q zv)9F~YQrC%aBEF{DvYRv5{aFfi^OFpr@`FZEIb4%e3SMBv1oC-ukl?F>ld`kI3OV} z51qU-tB_P01tST_U7h2Ctrux)ixLx(1xZfz?Pw@iBglZqD*}-gu zc+l6l63l`^YK%kYc?#S)%V)Z?-5voq-AP>yg!W-0#Tp}2XKMo>vk%MaQ>!4fT8}l7 z1|rm{*w2uoM&YOrWk(!Gs2ajTv3P$7+a-J4&@_s@r8k*S=ebGnGlAm@3zXj) z&5nv_`z5i=A2yCfl@0dR2gkD6QkXJ3N^mG$Q5fs~HI^;GLY*8Q$7W&N>xpA;;SoAu z92<_sym73JM9azJSt3CjcP1fLdm^K-ZvtD3r)&Rs_5?<^=O?lVEcQ%fgWy&?v;D3s zZ6XWq)-|bHYY;?DLYTNN=+76iInXu%&!W#KvF9<2Zd%2nJ6HJUWTwZ1`-91BCP{&S z1oX!7U0~-Yu<4Nrrmm-Bys5_l6pyUH11Jf0Ca^(x{@>f0ZVLP3sFZH-sXgISz3@tb z4sRV17O1j{_;0>zvBLH=lYl>*yq!2rg0stl+Ntcj{w@;RM<{1d{{q|*}*&mwW9>mX0Rj-30|GS zMq*1HxjlnT#;d{D2iW}>Mm+HVyANINo(I?f>{PUOP^nLQ%erW9sfynkLxnP|iP+q# zU<-lRnQR)uV|^*Eqb?w(u%~{9d6yS+r4RGoE|@D-%-hii?^Q5kv&xHk_Xk=Ahn}Ps zdHyW6N<;R`AEmMiLaG&9iQ+-i@9va%?^ei%!wc~ktnIQwt%5g|3U$!9kWC^-z0+nb zV*Yq(P(Z@m)N8NxU7+d}C^yBZwE?a3hOYD(F<~x=H_J1$<57D951o2c6(MVaTPcT1Z1*l znvPC@qD(ebBxc_w8+Sdc!zWUmx#zZZ=I*DD6;GcI6Dl(|BO{c5qRA}aPq9cG@H4{` z^}2rW#%#*TmVJdT#z>&-$zm>TuL%iR7*k~yr?{6ZThBzFxHg}K!;E|u1q1T&5ZsfG z9;ZKime10~Koyf6<`*zo3^v2_6WB6{TE!eF;LzDQ%X9H4(+XiwS@{3W9g1PuV>@53IA!%k7T{%KdooW7&_vK8&PK+ z&!EL8!?b7ER3vB9Gb~a}_K|bx8Rjph_!?@GBvnS30itGsS%Mos>jL4?P0WoKr*Aj0 zDThjA)Kg5H||?5RL0T>^Bqm0o;FFR>iMUR9!26mFFl!51&#(YRl! zk3j#Hx*a`(TJ*vEBF@zrgoECR`?s^5VvTPrwZ5&?b=gX-cix|Oum$=$Wh=F2;iws| zo2X3=dzlT~VCSo`D;l!5VeW*~1vYb9-${oY{n{Gi_s+ApM8+2Vi}bE~<{+-~kL z>nz-2vIJU$7Bi3&$P{xeixOLXjm5QWh}h<9?5<^l#CBh!sh0H8TN>C5jM2_Eu*ikcmaaxK%+kYX zh9MXlO$^SP>nbeD@<^CzdRm)A;&84sd!<%*8j%agiUO?==ban}=Rafp;IYrt2 zha}*>7v!wW!E46+0%uO4Q@y7snB!%ftR+fh_ZeoHr7tjGNP-4|?dbh4$p z|1Qy@1CPaRrQmYvKQYxJ;1B(Gi8dA^s?%Svx&BEO5|x~@!d2kP@gzDuD5=F>x|W<| zW1`Z!ZOCcqx+AB8xBHfyUeL=Y+1uzO`h1BIX3Z%)m)uP`_oaE5&nS=tS%tS3~N z7pe?GzG4&5b3A^Q^@R^EV6agEXD_ga&=I_Kp2b@#EJQn3AU4-3WK-c@Xli6(!z;Qm zXS>3j6+UTi@3O@e>JGOnxMMNXBcEjsEY6%|;}N^M;MYwoIcje=TB~}}TIHoxDa1L# zelO^UUn838E_5I4M)$%0M)$#Pbk}sHyQT--HD25^&f$$^I~1Q|!-m&(gRkofU)KY^ z&I_N|j4oUHh9w3Z?FQXUk!!~x{Tmh_9rtb!bHj=hc6`I;VQ%>PH!P%2gN4{1EE+R< zynS?^#f*kVdZv z8ewA-i-msYSr{}nvHOQ#>PFa^uG>1(V_VH$ggF;jJREL96e0vQ$0sqmTkAd_{-=oI#gt%5|cbDrsn2&3eZvb{7$GZ8BJ{3 zx#W@s^Qy*WE|-mT_Jipn8w`2hGh;mFy%bkIcaHTLc$L0$ zg^i31Mr`-8^WdJWI{B8Y8a;lpFP?Qyi?2XPhEvn$Unoo>*(OdQ;}&H`X} z3wv#Sd^fpJkE|_wUwGy0lWVHPtF4V^vJC3Mp18u`q8*c6Py% zyaG)Bz}@Ri2OnKWQ%F^)8biWh&<`wO_=0XrZS13RX zhgHH0KVmF%iI-2t^zfMa4ykuplD6JV-c&-% z5IA@jslOzDc9%WJ?^~K;%MuKNeMVIS@xSZ)n=<{GV1XE-ZtsJ2T(avs+mHVO`?xer zgKhasE*%`DJ$<3_IeAOG)KBBBzB5_!>#wUPS|L>aI77-KNLpEeG|Pf^V$UmZt#aoU z6=oH$fohl3SI$``jn|Vh`Sm<0h>)%Fzw@O}2p+Fn3#B=zMHdRC$MNKHdZdMOG44sOfU1YekNM|K< z06tV=a$3n^3(XvMZIIT9EgVAsNBRjr3y|xdlC}}W2wWSaF=B^z0=}7S&EY@$#vKI4 zY(y$_@a9ITKfaB8x>0(VaCtrQXSO>>hYl@;RPuGCchxr(W^UO zK*%uo@&zdj)wk^hDGqPXBg&<vu~{Xo1c>(h$59m+g`67gO~xw1!WR{VSzlEmh;GI5{u9x99Yn$(Z{MGk*WYG(KfZ+Tn#4w--ZZD|==a$2=C6+bVj ztd>@bWUn0jH{2pB_3(G3kxDk}9cdI674JwhQ6v}Mkv#ah$Rb(Vgx`r=kfkaoRE-?NdVpMb zQ97wb7m@tEG*ENWa7HfpUMd&SbJ45PN^_I4{u=ufu56!6m06nv5@%6fB>~E5Upr!HvqqbtRQghmNZLa zGWtP$o7AoeG|oX)4v(K^lG^N@===XQQ4U z)bI&9)bE=T_n>|o-$wL9p+e6S{?pJI_&X@ie;W7{RPwt9K2FICFAwLLDAJiDxE&23 zdjzjQ$*vf~i{Y^-{%2jj5%~%`$MP6>D4GYNJAW{i-v?Wxc^|xUZH?svnNbwQb;c6- zS1ccmGB=OpuWB5n#^Hi>)-q2aQU6bfxc8o>a<L{n?5^DP3mVsZP&VMH81MH*x}jt|n2lQ4V|k4A+^pTsAludAHI4a!Y; z|789L^e;4l$6_%lfv@U&$rvTr=HcfU1;xZyO1927;b!c9%pb$| z-3`ecPbG)^!y=CEBofXn;SZ2zIXs;&(cni+8#3@qvls}=*%JrnoT1c{JufJ!l_c@V6KF)5IyqRq!|+nnK|YzQ>{XCT)~`bc%b59p870 zT*HUXbQUakEmZ0w@wrtV{W1!fWXo+kc?iXHrr)c4unxsD;WfS#EoAp=d<90FQWa`> z8N^oc*{JIqtB@@f=Izi@#W&$WzUFm43lH)SUgwP{y1j4k2T^q7O}-MPll>+?gwh$f zpMQspH2sB}2_7DyZ}BJb@Ywwp59N6K{pTSbuB7`m??E?y5{A_9xp<^JUW3}$C?Bao zW{XW`*!5>=ll;Ozcp1fOul?WLvA)?n;QpyATp26zL#IEw3UcS+hcd2$7W2l*YVW)} zSH?nDffC5Knys;F-~2Vco;I^JO6{5M%5fFC3){`3y)#nq#mt%I_7r!RM@MvG&v5B1 z8;7c`MK1U96&`NcI0p{@n+NnZSvDrB11b3ZY$oQG?#HiB1L4ZQaT8$>{~nrcB&>Rm zAHbk`@DZMfPocR-_L6T(wxIW-50gEk?XN|)W zu{0|;W91Tzu~v*ulb0RiQ$>r*GN2GY0&!(yrsgPf%ZEPXuZx-@OR;>TiHC^r#0m5P z>*U|=@Et^+)4;#eirSB%`U@Vo?G*3LeBaB;$4>EAw5Vsx&vK23f?nCk*K0Rev_~N7 z9PbZ)=g<;1z|eDi3v}G!(`C&!{EDb4^DdJsl7n*kxBNMR2SUpw{x<5yE8p=d%YN@Z zVyzgBd19+!^;JFt_FU!(7)4yT%%37hTJTLM%~uD1lZbMlHjvC#K2A0T#E;M7(AC_0JMYiI{tK6Q;hP9%rL znB~f``oTljd2nYdP+oVPZxoSe+R87XU0-WO$Uu4dO};=xVb$K^|D&>US{r{vv_)Gd ztaKGGQ9g=LuA8~J%aF|$dpsn!@uBjqpU@{zwI@yO0r7}t07PBjlHBqO#}H1Hg1<() z&LSlN>EHnsk^0Rfrs9x(42iG!LH`IE_8*DB!v1}z3PU+eXixbWi!eJdd)a-K<#UV4N4 MsTJ4Mlv;)V0}5B>2><{9 diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs index d37f1d760065..901c15e9756b 100644 --- a/substrate/frame/revive/rpc/src/client.rs +++ b/substrate/frame/revive/rpc/src/client.rs @@ -32,7 +32,7 @@ use pallet_revive::{ Block, BlockNumberOrTag, BlockNumberOrTagOrHash, Bytes256, GenericTransaction, Log, ReceiptInfo, SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256, }, - EthContractResult, + EthTransactError, EthTransactInfo, }; use sp_core::keccak_256; use sp_weights::Weight; @@ -116,18 +116,42 @@ fn unwrap_call_err(err: &subxt::error::RpcError) -> Option { /// Extract the revert message from a revert("msg") solidity statement. fn extract_revert_message(exec_data: &[u8]) -> Option { - let function_selector = exec_data.get(0..4)?; - - // keccak256("Error(string)") - let expected_selector = [0x08, 0xC3, 0x79, 0xA0]; - if function_selector != expected_selector { - return None; - } + let error_selector = exec_data.get(0..4)?; + + match error_selector { + // assert(false) + [0x4E, 0x48, 0x7B, 0x71] => { + let panic_code: u32 = U256::from_big_endian(exec_data.get(4..36)?).try_into().ok()?; + + // See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require + let msg = match panic_code { + 0x00 => "generic panic", + 0x01 => "assert(false)", + 0x11 => "arithmetic underflow or overflow", + 0x12 => "division or modulo by zero", + 0x21 => "enum overflow", + 0x22 => "invalid encoded storage byte array accessed", + 0x31 => "out-of-bounds array access; popping on an empty array", + 0x32 => "out-of-bounds access of an array or bytesN", + 0x41 => "out of memory", + 0x51 => "uninitialized function", + code => return Some(format!("execution reverted: unknown panic code: {code:#x}")), + }; - let decoded = ethabi::decode(&[ethabi::ParamType::String], &exec_data[4..]).ok()?; - match decoded.first()? { - ethabi::Token::String(msg) => Some(msg.to_string()), - _ => None, + Some(format!("execution reverted: {msg}")) + }, + // revert(string) + [0x08, 0xC3, 0x79, 0xA0] => { + let decoded = ethabi::decode(&[ethabi::ParamType::String], &exec_data[4..]).ok()?; + if let Some(ethabi::Token::String(msg)) = decoded.first() { + return Some(format!("execution reverted: {msg}")) + } + Some("execution reverted".to_string()) + }, + _ => { + log::debug!(target: LOG_TARGET, "Unknown revert function selector: {error_selector:?}"); + Some("execution reverted".to_string()) + }, } } @@ -146,42 +170,46 @@ pub enum ClientError { /// A [`codec::Error`] wrapper error. #[error(transparent)] CodecError(#[from] codec::Error), - /// The dry run failed. - #[error("Dry run failed: {0}")] - DryRunFailed(String), /// Contract reverted - #[error("Execution reverted: {}", extract_revert_message(.0).unwrap_or_default())] - Reverted(Vec), + #[error("contract reverted")] + Reverted(EthTransactError), /// A decimal conversion failed. - #[error("Conversion failed")] + #[error("conversion failed")] ConversionFailed, /// The block hash was not found. - #[error("Hash not found")] + #[error("hash not found")] BlockNotFound, /// The transaction fee could not be found - #[error("TransactionFeePaid event not found")] + #[error("transactionFeePaid event not found")] TxFeeNotFound, /// The cache is empty. - #[error("Cache is empty")] + #[error("cache is empty")] CacheEmpty, } -// TODO convert error code to https://eips.ethereum.org/EIPS/eip-1474#error-codes +const REVERT_CODE: i32 = 3; impl From for ErrorObjectOwned { fn from(err: ClientError) -> Self { - let msg = err.to_string(); match err { ClientError::SubxtError(subxt::Error::Rpc(err)) | ClientError::RpcError(err) => { if let Some(err) = unwrap_call_err(&err) { return err; } - ErrorObjectOwned::owned::>(CALL_EXECUTION_FAILED_CODE, msg, None) + ErrorObjectOwned::owned::>( + CALL_EXECUTION_FAILED_CODE, + err.to_string(), + None, + ) }, - ClientError::Reverted(data) => { + ClientError::Reverted(EthTransactError::Data(data)) => { + let msg = extract_revert_message(&data).unwrap_or_default(); let data = format!("0x{}", hex::encode(data)); - ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, Some(data)) + ErrorObjectOwned::owned::(REVERT_CODE, msg, Some(data)) }, - _ => ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, None), + ClientError::Reverted(EthTransactError::Message(msg)) => + ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, None), + _ => + ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, err.to_string(), None), } } } @@ -634,54 +662,25 @@ impl Client { Ok(result) } - /// Dry run a transaction and returns the [`EthContractResult`] for the transaction. + /// Dry run a transaction and returns the [`EthTransactInfo`] for the transaction. pub async fn dry_run( &self, - tx: &GenericTransaction, + tx: GenericTransaction, block: BlockNumberOrTagOrHash, - ) -> Result>, ClientError> { + ) -> Result, ClientError> { let runtime_api = self.runtime_api(&block).await?; + let payload = subxt_client::apis().revive_api().eth_transact(tx.into()); - // TODO: remove once subxt is updated - let value = subxt::utils::Static(tx.value.unwrap_or_default()); - let from = tx.from.map(|v| v.0.into()); - let to = tx.to.map(|v| v.0.into()); - - let payload = subxt_client::apis().revive_api().eth_transact( - from.unwrap_or_default(), - to, - value, - tx.input.clone().unwrap_or_default().0, - None, - None, - ); - - let EthContractResult { fee, gas_required, storage_deposit, result } = - runtime_api.call(payload).await?.0; + let result = runtime_api.call(payload).await?; match result { Err(err) => { log::debug!(target: LOG_TARGET, "Dry run failed {err:?}"); - Err(ClientError::DryRunFailed(format!("{err:?}"))) + Err(ClientError::Reverted(err.0)) }, - Ok(result) if result.did_revert() => { - log::debug!(target: LOG_TARGET, "Dry run reverted"); - Err(ClientError::Reverted(result.0.data)) - }, - Ok(result) => - Ok(EthContractResult { fee, gas_required, storage_deposit, result: result.0.data }), + Ok(result) => Ok(result.0), } } - /// Dry run a transaction and returns the gas estimate for the transaction. - pub async fn estimate_gas( - &self, - tx: &GenericTransaction, - block: BlockNumberOrTagOrHash, - ) -> Result { - let dry_run = self.dry_run(tx, block).await?; - Ok(U256::from(dry_run.fee / GAS_PRICE as u128) + GAS_PRICE) - } - /// Get the nonce of the given address. pub async fn nonce( &self, diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs index 6a324e63a857..ccd8bb043e90 100644 --- a/substrate/frame/revive/rpc/src/lib.rs +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -23,7 +23,7 @@ use jsonrpsee::{ core::{async_trait, RpcResult}, types::{ErrorCode, ErrorObjectOwned}, }; -use pallet_revive::{evm::*, EthContractResult}; +use pallet_revive::evm::*; use sp_core::{keccak_256, H160, H256, U256}; use thiserror::Error; @@ -128,10 +128,22 @@ impl EthRpcServer for EthRpcServerImpl { async fn estimate_gas( &self, transaction: GenericTransaction, - _block: Option, + block: Option, ) -> RpcResult { - let result = self.client.estimate_gas(&transaction, BlockTag::Latest.into()).await?; - Ok(result) + let dry_run = self.client.dry_run(transaction, block.unwrap_or_default().into()).await?; + Ok(dry_run.eth_gas) + } + + async fn call( + &self, + transaction: GenericTransaction, + block: Option, + ) -> RpcResult { + let dry_run = self + .client + .dry_run(transaction, block.unwrap_or_else(|| BlockTag::Latest.into())) + .await?; + Ok(dry_run.data.into()) } async fn send_raw_transaction(&self, transaction: Bytes) -> RpcResult { @@ -150,15 +162,17 @@ impl EthRpcServer for EthRpcServerImpl { let tx = GenericTransaction::from_signed(tx, Some(eth_addr)); // Dry run the transaction to get the weight limit and storage deposit limit - let dry_run = self.client.dry_run(&tx, BlockTag::Latest.into()).await?; + let dry_run = self.client.dry_run(tx, BlockTag::Latest.into()).await?; - let EthContractResult { gas_required, storage_deposit, .. } = dry_run; let call = subxt_client::tx().revive().eth_transact( transaction.0, - gas_required.into(), - storage_deposit, + dry_run.gas_required.into(), + dry_run.storage_deposit, ); - self.client.submit(call).await?; + self.client.submit(call).await.map_err(|err| { + log::debug!(target: LOG_TARGET, "submit call failed: {err:?}"); + err + })?; log::debug!(target: LOG_TARGET, "send_raw_transaction hash: {hash:?}"); Ok(hash) } @@ -234,18 +248,6 @@ impl EthRpcServer for EthRpcServerImpl { Ok(self.accounts.iter().map(|account| account.address()).collect()) } - async fn call( - &self, - transaction: GenericTransaction, - block: Option, - ) -> RpcResult { - let dry_run = self - .client - .dry_run(&transaction, block.unwrap_or_else(|| BlockTag::Latest.into())) - .await?; - Ok(dry_run.result.into()) - } - async fn get_block_by_number( &self, block: BlockNumberOrTag, diff --git a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs index 339080368969..ad34dbfdfb49 100644 --- a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs +++ b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs @@ -14,6 +14,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + //! Generated JSON-RPC methods. #![allow(missing_docs)] diff --git a/substrate/frame/revive/rpc/src/subxt_client.rs b/substrate/frame/revive/rpc/src/subxt_client.rs index a232b231bc7c..1e1c395028a4 100644 --- a/substrate/frame/revive/rpc/src/subxt_client.rs +++ b/substrate/frame/revive/rpc/src/subxt_client.rs @@ -27,8 +27,16 @@ use subxt::config::{signed_extensions, Config, PolkadotConfig}; with = "::subxt::utils::Static<::sp_core::U256>" ), substitute_type( - path = "pallet_revive::primitives::EthContractResult", - with = "::subxt::utils::Static<::pallet_revive::EthContractResult>" + path = "pallet_revive::evm::api::rpc_types_gen::GenericTransaction", + with = "::subxt::utils::Static<::pallet_revive::evm::GenericTransaction>" + ), + substitute_type( + path = "pallet_revive::primitives::EthTransactInfo", + with = "::subxt::utils::Static<::pallet_revive::EthTransactInfo>" + ), + substitute_type( + path = "pallet_revive::primitives::EthTransactError", + with = "::subxt::utils::Static<::pallet_revive::EthTransactError>" ), substitute_type( path = "pallet_revive::primitives::ExecReturnValue", diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index 920318b26f71..7f2d4e683c31 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -238,7 +238,8 @@ async fn revert_call() -> anyhow::Result<()> { .unwrap_err(); let call_err = unwrap_call_err!(err.source().unwrap()); - assert_eq!(call_err.message(), "Execution reverted: revert message"); + assert_eq!(call_err.message(), "execution reverted: revert message"); + assert_eq!(call_err.code(), 3); Ok(()) } diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs index 9c4d817a07de..b73815bfb9ea 100644 --- a/substrate/frame/revive/src/benchmarking/mod.rs +++ b/substrate/frame/revive/src/benchmarking/mod.rs @@ -103,7 +103,7 @@ where origin, 0u32.into(), Weight::MAX, - default_deposit_limit::(), + DepositLimit::Balance(default_deposit_limit::()), Code::Upload(module.code), data, salt, diff --git a/substrate/frame/revive/src/evm/api/rlp_codec.rs b/substrate/frame/revive/src/evm/api/rlp_codec.rs index 3442ed73acca..9b61cd042ec5 100644 --- a/substrate/frame/revive/src/evm/api/rlp_codec.rs +++ b/substrate/frame/revive/src/evm/api/rlp_codec.rs @@ -88,14 +88,14 @@ impl TransactionSigned { } } -impl TransactionLegacyUnsigned { - /// Get the rlp encoded bytes of a signed transaction with a dummy 65 bytes signature. +impl TransactionUnsigned { + /// Get a signed transaction payload with a dummy 65 bytes signature. pub fn dummy_signed_payload(&self) -> Vec { - let mut s = rlp::RlpStream::new(); - s.append(self); const DUMMY_SIGNATURE: [u8; 65] = [0u8; 65]; - s.append_raw(&DUMMY_SIGNATURE.as_ref(), 1); - s.out().to_vec() + self.unsigned_payload() + .into_iter() + .chain(DUMMY_SIGNATURE.iter().copied()) + .collect::>() } } @@ -567,7 +567,7 @@ mod test { #[test] fn dummy_signed_payload_works() { - let tx = TransactionLegacyUnsigned { + let tx: TransactionUnsigned = TransactionLegacyUnsigned { chain_id: Some(596.into()), gas: U256::from(21000), nonce: U256::from(1), @@ -576,10 +576,10 @@ mod test { value: U256::from(123123), input: Bytes(vec![]), r#type: TypeLegacy, - }; + } + .into(); let dummy_signed_payload = tx.dummy_signed_payload(); - let tx: TransactionUnsigned = tx.into(); let payload = Account::default().sign_transaction(tx).signed_payload(); assert_eq!(dummy_signed_payload.len(), payload.len()); } diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs index 1cf8d984b68b..ed046cb4da44 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -19,6 +19,27 @@ use super::*; use alloc::vec::Vec; use sp_core::{H160, U256}; +impl From for BlockNumberOrTagOrHash { + fn from(b: BlockNumberOrTag) -> Self { + match b { + BlockNumberOrTag::U256(n) => BlockNumberOrTagOrHash::U256(n), + BlockNumberOrTag::BlockTag(t) => BlockNumberOrTagOrHash::BlockTag(t), + } + } +} + +impl From for TransactionUnsigned { + fn from(tx: TransactionSigned) -> Self { + use TransactionSigned::*; + match tx { + Transaction4844Signed(tx) => tx.transaction_4844_unsigned.into(), + Transaction1559Signed(tx) => tx.transaction_1559_unsigned.into(), + Transaction2930Signed(tx) => tx.transaction_2930_unsigned.into(), + TransactionLegacySigned(tx) => tx.transaction_legacy_unsigned.into(), + } + } +} + impl TransactionInfo { /// Create a new [`TransactionInfo`] from a receipt and a signed transaction. pub fn new(receipt: ReceiptInfo, transaction_signed: TransactionSigned) -> Self { @@ -143,76 +164,69 @@ fn logs_bloom_works() { impl GenericTransaction { /// Create a new [`GenericTransaction`] from a signed transaction. pub fn from_signed(tx: TransactionSigned, from: Option) -> Self { - use TransactionSigned::*; + Self::from_unsigned(tx.into(), from) + } + + /// Create a new [`GenericTransaction`] from a unsigned transaction. + pub fn from_unsigned(tx: TransactionUnsigned, from: Option) -> Self { + use TransactionUnsigned::*; match tx { - TransactionLegacySigned(tx) => { - let tx = tx.transaction_legacy_unsigned; - GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: tx.chain_id, - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: tx.to, - gas: Some(tx.gas), - gas_price: Some(tx.gas_price), - ..Default::default() - } + TransactionLegacyUnsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: tx.chain_id, + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + ..Default::default() }, - Transaction4844Signed(tx) => { - let tx = tx.transaction_4844_unsigned; - GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: Some(tx.chain_id), - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: Some(tx.to), - gas: Some(tx.gas), - gas_price: Some(tx.max_fee_per_blob_gas), - access_list: Some(tx.access_list), - blob_versioned_hashes: Some(tx.blob_versioned_hashes), - max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas), - max_fee_per_gas: Some(tx.max_fee_per_gas), - max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), - ..Default::default() - } + Transaction4844Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: Some(tx.to), + gas: Some(tx.gas), + gas_price: Some(tx.max_fee_per_blob_gas), + access_list: Some(tx.access_list), + blob_versioned_hashes: tx.blob_versioned_hashes, + max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas), + max_fee_per_gas: Some(tx.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), + ..Default::default() }, - Transaction1559Signed(tx) => { - let tx = tx.transaction_1559_unsigned; - GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: Some(tx.chain_id), - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: tx.to, - gas: Some(tx.gas), - gas_price: Some(tx.gas_price), - access_list: Some(tx.access_list), - max_fee_per_gas: Some(tx.max_fee_per_gas), - max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), - ..Default::default() - } + Transaction1559Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + access_list: Some(tx.access_list), + max_fee_per_gas: Some(tx.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), + ..Default::default() }, - Transaction2930Signed(tx) => { - let tx = tx.transaction_2930_unsigned; - GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: Some(tx.chain_id), - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: tx.to, - gas: Some(tx.gas), - gas_price: Some(tx.gas_price), - access_list: Some(tx.access_list), - ..Default::default() - } + Transaction2930Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + access_list: Some(tx.access_list), + ..Default::default() }, } } @@ -269,7 +283,7 @@ impl GenericTransaction { max_fee_per_blob_gas: self.max_fee_per_blob_gas.unwrap_or_default(), max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), access_list: self.access_list.unwrap_or_default(), - blob_versioned_hashes: self.blob_versioned_hashes.unwrap_or_default(), + blob_versioned_hashes: self.blob_versioned_hashes, } .into()), _ => Err(()), diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index 5037ec05d881..1d65fdefdde6 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -94,8 +94,8 @@ pub struct Block { /// Uncles pub uncles: Vec, /// Withdrawals - #[serde(skip_serializing_if = "Option::is_none")] - pub withdrawals: Option>, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub withdrawals: Vec, /// Withdrawals root #[serde(rename = "withdrawalsRoot", skip_serializing_if = "Option::is_none")] pub withdrawals_root: Option, @@ -114,7 +114,7 @@ pub enum BlockNumberOrTag { } impl Default for BlockNumberOrTag { fn default() -> Self { - BlockNumberOrTag::U256(Default::default()) + BlockNumberOrTag::BlockTag(Default::default()) } } @@ -133,7 +133,7 @@ pub enum BlockNumberOrTagOrHash { } impl Default for BlockNumberOrTagOrHash { fn default() -> Self { - BlockNumberOrTagOrHash::U256(Default::default()) + BlockNumberOrTagOrHash::BlockTag(Default::default()) } } @@ -148,12 +148,12 @@ pub struct GenericTransaction { pub access_list: Option, /// blobVersionedHashes /// List of versioned blob hashes associated with the transaction's EIP-4844 data blobs. - #[serde(rename = "blobVersionedHashes", skip_serializing_if = "Option::is_none")] - pub blob_versioned_hashes: Option>, + #[serde(rename = "blobVersionedHashes", default, skip_serializing_if = "Vec::is_empty")] + pub blob_versioned_hashes: Vec, /// blobs /// Raw blob data. - #[serde(skip_serializing_if = "Option::is_none")] - pub blobs: Option>, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub blobs: Vec, /// chainId /// Chain ID that this transaction is valid on. #[serde(rename = "chainId", skip_serializing_if = "Option::is_none")] @@ -319,7 +319,7 @@ pub enum TransactionUnsigned { } impl Default for TransactionUnsigned { fn default() -> Self { - TransactionUnsigned::Transaction4844Unsigned(Default::default()) + TransactionUnsigned::TransactionLegacyUnsigned(Default::default()) } } @@ -341,13 +341,13 @@ pub type AccessList = Vec; )] pub enum BlockTag { #[serde(rename = "earliest")] - #[default] Earliest, #[serde(rename = "finalized")] Finalized, #[serde(rename = "safe")] Safe, #[serde(rename = "latest")] + #[default] Latest, #[serde(rename = "pending")] Pending, @@ -392,7 +392,7 @@ pub struct Log { #[serde(skip_serializing_if = "Option::is_none")] pub removed: Option, /// topics - #[serde(skip_serializing_if = "Vec::is_empty")] + #[serde(default, skip_serializing_if = "Vec::is_empty")] pub topics: Vec, /// transaction hash #[serde(rename = "transactionHash")] @@ -574,7 +574,7 @@ pub enum TransactionSigned { } impl Default for TransactionSigned { fn default() -> Self { - TransactionSigned::Transaction4844Signed(Default::default()) + TransactionSigned::TransactionLegacySigned(Default::default()) } } diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs index b5dc9a36065b..24b75de83569 100644 --- a/substrate/frame/revive/src/evm/runtime.rs +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -455,236 +455,265 @@ mod test { /// A builder for creating an unchecked extrinsic, and test that the check function works. #[derive(Clone)] struct UncheckedExtrinsicBuilder { - tx: TransactionLegacyUnsigned, + tx: GenericTransaction, gas_limit: Weight, storage_deposit_limit: BalanceOf, + before_validate: Option>, } impl UncheckedExtrinsicBuilder { /// Create a new builder with default values. fn new() -> Self { Self { - tx: TransactionLegacyUnsigned { + tx: GenericTransaction { + from: Some(Account::default().address()), chain_id: Some(::ChainId::get().into()), - gas_price: U256::from(GAS_PRICE), + gas_price: Some(U256::from(GAS_PRICE)), ..Default::default() }, gas_limit: Weight::zero(), storage_deposit_limit: 0, + before_validate: None, } } fn estimate_gas(&mut self) { - let dry_run = crate::Pallet::::bare_eth_transact( - Account::default().substrate_account(), - self.tx.to, - self.tx.value.try_into().unwrap(), - self.tx.input.clone().0, - Weight::MAX, - u64::MAX, - |call| { + let dry_run = + crate::Pallet::::bare_eth_transact(self.tx.clone(), Weight::MAX, |call| { let call = RuntimeCall::Contracts(call); let uxt: Ex = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); uxt.encoded_size() as u32 + }); + + match dry_run { + Ok(dry_run) => { + log::debug!(target: LOG_TARGET, "Estimated gas: {:?}", dry_run.eth_gas); + self.tx.gas = Some(dry_run.eth_gas); + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to estimate gas: {:?}", err); }, - crate::DebugInfo::Skip, - crate::CollectEvents::Skip, - ); - self.tx.gas = ((dry_run.fee + GAS_PRICE as u64) / (GAS_PRICE as u64)).into(); + } } /// Create a new builder with a call to the given address. fn call_with(dest: H160) -> Self { let mut builder = Self::new(); builder.tx.to = Some(dest); - builder.estimate_gas(); + ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); builder } /// Create a new builder with an instantiate call. fn instantiate_with(code: Vec, data: Vec) -> Self { let mut builder = Self::new(); - builder.tx.input = Bytes(code.into_iter().chain(data.into_iter()).collect()); - builder.estimate_gas(); + builder.tx.input = Some(Bytes(code.into_iter().chain(data.into_iter()).collect())); + ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); builder } /// Update the transaction with the given function. - fn update(mut self, f: impl FnOnce(&mut TransactionLegacyUnsigned) -> ()) -> Self { + fn update(mut self, f: impl FnOnce(&mut GenericTransaction) -> ()) -> Self { f(&mut self.tx); self } + /// Set before_validate function. + fn before_validate(mut self, f: impl Fn() + Send + Sync + 'static) -> Self { + self.before_validate = Some(std::sync::Arc::new(f)); + self + } /// Call `check` on the unchecked extrinsic, and `pre_dispatch` on the signed extension. fn check(&self) -> Result<(RuntimeCall, SignedExtra), TransactionValidityError> { - let UncheckedExtrinsicBuilder { tx, gas_limit, storage_deposit_limit } = self.clone(); - - // Fund the account. - let account = Account::default(); - let _ = ::Currency::set_balance( - &account.substrate_account(), - 100_000_000_000_000, - ); - - let payload = account.sign_transaction(tx.into()).signed_payload(); - let call = RuntimeCall::Contracts(crate::Call::eth_transact { - payload, - gas_limit, - storage_deposit_limit, - }); - - let encoded_len = call.encoded_size(); - let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); - let result: CheckedExtrinsic<_, _, _> = uxt.check(&TestContext {})?; - let (account_id, extra): (AccountId32, SignedExtra) = match result.format { - ExtrinsicFormat::Signed(signer, extra) => (signer, extra), - _ => unreachable!(), - }; - - extra.clone().validate_and_prepare( - RuntimeOrigin::signed(account_id), - &result.function, - &result.function.get_dispatch_info(), - encoded_len, - 0, - )?; + ExtBuilder::default().build().execute_with(|| { + let UncheckedExtrinsicBuilder { + tx, + gas_limit, + storage_deposit_limit, + before_validate, + } = self.clone(); + + // Fund the account. + let account = Account::default(); + let _ = ::Currency::set_balance( + &account.substrate_account(), + 100_000_000_000_000, + ); + + let payload = + account.sign_transaction(tx.try_into_unsigned().unwrap()).signed_payload(); + let call = RuntimeCall::Contracts(crate::Call::eth_transact { + payload, + gas_limit, + storage_deposit_limit, + }); + + let encoded_len = call.encoded_size(); + let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); + let result: CheckedExtrinsic<_, _, _> = uxt.check(&TestContext {})?; + let (account_id, extra): (AccountId32, SignedExtra) = match result.format { + ExtrinsicFormat::Signed(signer, extra) => (signer, extra), + _ => unreachable!(), + }; - Ok((result.function, extra)) + before_validate.map(|f| f()); + extra.clone().validate_and_prepare( + RuntimeOrigin::signed(account_id), + &result.function, + &result.function.get_dispatch_info(), + encoded_len, + 0, + )?; + + Ok((result.function, extra)) + }) } } #[test] fn check_eth_transact_call_works() { - ExtBuilder::default().build().execute_with(|| { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); - assert_eq!( - builder.check().unwrap().0, - crate::Call::call:: { - dest: builder.tx.to.unwrap(), - value: builder.tx.value.as_u64(), - gas_limit: builder.gas_limit, - storage_deposit_limit: builder.storage_deposit_limit, - data: builder.tx.input.0 - } - .into() - ); - }); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); + assert_eq!( + builder.check().unwrap().0, + crate::Call::call:: { + dest: builder.tx.to.unwrap(), + value: builder.tx.value.unwrap_or_default().as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + data: builder.tx.input.unwrap_or_default().0 + } + .into() + ); } #[test] fn check_eth_transact_instantiate_works() { - ExtBuilder::default().build().execute_with(|| { - let (code, _) = compile_module("dummy").unwrap(); - let data = vec![]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); - - assert_eq!( - builder.check().unwrap().0, - crate::Call::instantiate_with_code:: { - value: builder.tx.value.as_u64(), - gas_limit: builder.gas_limit, - storage_deposit_limit: builder.storage_deposit_limit, - code, - data, - salt: None - } - .into() - ); - }); + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + assert_eq!( + builder.check().unwrap().0, + crate::Call::instantiate_with_code:: { + value: builder.tx.value.unwrap_or_default().as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + code, + data, + salt: None + } + .into() + ); } #[test] fn check_eth_transact_nonce_works() { - ExtBuilder::default().build().execute_with(|| { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) - .update(|tx| tx.nonce = 1u32.into()); - - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) - ); - - >::inc_account_nonce(Account::default().substrate_account()); - - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) - ); - }); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.nonce = Some(1u32.into())); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) + ); + + let builder = + UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).before_validate(|| { + >::inc_account_nonce(Account::default().substrate_account()); + }); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) + ); } #[test] fn check_eth_transact_chain_id_works() { - ExtBuilder::default().build().execute_with(|| { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) - .update(|tx| tx.chain_id = Some(42.into())); - - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - ); - }); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.chain_id = Some(42.into())); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); } #[test] fn check_instantiate_data() { - ExtBuilder::default().build().execute_with(|| { - let code = b"invalid code".to_vec(); - let data = vec![1]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); - - // Fail because the tx input fail to get the blob length - assert_eq!( - builder.clone().update(|tx| tx.input = Bytes(vec![1, 2, 3])).check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - ); - }); + let code = b"invalid code".to_vec(); + let data = vec![1]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + // Fail because the tx input fail to get the blob length + assert_eq!( + builder.clone().update(|tx| tx.input = Some(Bytes(vec![1, 2, 3]))).check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); } #[test] fn check_transaction_fees() { - ExtBuilder::default().build().execute_with(|| { - let scenarios: [(_, Box, _); 5] = [ - ("Eth fees too low", Box::new(|tx| tx.gas_price /= 2), InvalidTransaction::Payment), - ("Gas fees too high", Box::new(|tx| tx.gas *= 2), InvalidTransaction::Call), - ("Gas fees too low", Box::new(|tx| tx.gas *= 2), InvalidTransaction::Call), - ( - "Diff > 10%", - Box::new(|tx| tx.gas = tx.gas * 111 / 100), - InvalidTransaction::Call, - ), - ( - "Diff < 10%", - Box::new(|tx| { - tx.gas_price *= 2; - tx.gas = tx.gas * 89 / 100 - }), - InvalidTransaction::Call, - ), - ]; - - for (msg, update_tx, err) in scenarios { - let builder = - UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); - - assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); - } - }); + let scenarios: [(_, Box, _); 5] = [ + ( + "Eth fees too low", + Box::new(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() / 2); + }), + InvalidTransaction::Payment, + ), + ( + "Gas fees too high", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 2); + }), + InvalidTransaction::Call, + ), + ( + "Gas fees too low", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 2); + }), + InvalidTransaction::Call, + ), + ( + "Diff > 10%", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 111 / 100); + }), + InvalidTransaction::Call, + ), + ( + "Diff < 10%", + Box::new(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() * 2); + tx.gas = Some(tx.gas.unwrap() * 89 / 100); + }), + InvalidTransaction::Call, + ), + ]; + + for (msg, update_tx, err) in scenarios { + let builder = + UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); + + assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); + } } #[test] fn check_transaction_tip() { - ExtBuilder::default().build().execute_with(|| { - let (code, _) = compile_module("dummy").unwrap(); - let data = vec![]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) - .update(|tx| tx.gas_price = tx.gas_price * 103 / 100); - - let tx = &builder.tx; - let expected_tip = tx.gas_price * tx.gas - U256::from(GAS_PRICE) * tx.gas; - let (_, extra) = builder.check().unwrap(); - assert_eq!(U256::from(extra.1.tip()), expected_tip); - }); + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) + .update(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100); + log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price); + }); + + let tx = &builder.tx; + let expected_tip = + tx.gas_price.unwrap() * tx.gas.unwrap() - U256::from(GAS_PRICE) * tx.gas.unwrap(); + let (_, extra) = builder.check().unwrap(); + assert_eq!(U256::from(extra.1.tip()), expected_tip); } } diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index 49c08166483e..b23d7e4e60ef 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -562,6 +562,9 @@ pub struct Stack<'a, T: Config, E> { debug_message: Option<&'a mut DebugBuffer>, /// Transient storage used to store data, which is kept for the duration of a transaction. transient_storage: TransientStorage, + /// Whether or not actual transfer of funds should be performed. + /// This is set to `true` exclusively when we simulate a call through eth_transact. + skip_transfer: bool, /// No executable is held by the struct but influences its behaviour. _phantom: PhantomData, } @@ -777,6 +780,7 @@ where storage_meter: &'a mut storage::meter::Meter, value: U256, input_data: Vec, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> ExecResult { let dest = T::AddressMapper::to_account_id(&dest); @@ -786,6 +790,7 @@ where gas_meter, storage_meter, value, + skip_transfer, debug_message, )? { stack.run(executable, input_data).map(|_| stack.first_frame.last_frame_output) @@ -812,6 +817,7 @@ where value: U256, input_data: Vec, salt: Option<&[u8; 32]>, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> Result<(H160, ExecReturnValue), ExecError> { let (mut stack, executable) = Self::new( @@ -825,6 +831,7 @@ where gas_meter, storage_meter, value, + skip_transfer, debug_message, )? .expect(FRAME_ALWAYS_EXISTS_ON_INSTANTIATE); @@ -853,6 +860,7 @@ where gas_meter, storage_meter, value.into(), + false, debug_message, ) .unwrap() @@ -869,6 +877,7 @@ where gas_meter: &'a mut GasMeter, storage_meter: &'a mut storage::meter::Meter, value: U256, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> Result, ExecError> { origin.ensure_mapped()?; @@ -896,6 +905,7 @@ where frames: Default::default(), debug_message, transient_storage: TransientStorage::new(limits::TRANSIENT_STORAGE_BYTES), + skip_transfer, _phantom: Default::default(), }; @@ -1073,6 +1083,7 @@ where &frame.account_id, frame.contract_info.get(&frame.account_id), executable.code_info(), + self.skip_transfer, )?; // Needs to be incremented before calling into the code so that it is visible // in case of recursion. @@ -2101,6 +2112,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, ), Ok(_) @@ -2193,6 +2205,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, ) .unwrap(); @@ -2233,6 +2246,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, )); @@ -2269,6 +2283,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ), ExecError { @@ -2286,6 +2301,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -2314,6 +2330,7 @@ mod tests { &mut storage_meter, 55u64.into(), vec![], + false, None, ) .unwrap(); @@ -2363,6 +2380,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2392,6 +2410,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2421,6 +2440,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![1, 2, 3, 4], + false, None, ); assert_matches!(result, Ok(_)); @@ -2457,6 +2477,7 @@ mod tests { min_balance.into(), vec![1, 2, 3, 4], Some(&[0; 32]), + false, None, ); assert_matches!(result, Ok(_)); @@ -2511,6 +2532,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, ); @@ -2575,6 +2597,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2640,6 +2663,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2672,6 +2696,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); assert_matches!(result, Ok(_)); @@ -2709,6 +2734,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2735,6 +2761,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2779,6 +2806,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2805,6 +2833,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2831,6 +2860,7 @@ mod tests { &mut storage_meter, 1u64.into(), vec![0], + false, None, ); assert_matches!(result, Err(_)); @@ -2875,6 +2905,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2920,6 +2951,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2946,6 +2978,7 @@ mod tests { U256::zero(), // <- zero value vec![], Some(&[0; 32]), + false, None, ), Err(_) @@ -2981,6 +3014,7 @@ mod tests { min_balance.into(), vec![], Some(&[0 ;32]), + false, None, ), Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address @@ -3032,10 +3066,10 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - min_balance.into(), vec![], Some(&[0; 32]), + false, None, ), Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address @@ -3100,6 +3134,7 @@ mod tests { &mut storage_meter, (min_balance * 10).into(), vec![], + false, None, ), Ok(_) @@ -3180,6 +3215,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ), Ok(_) @@ -3223,6 +3259,7 @@ mod tests { 100u64.into(), vec![], Some(&[0; 32]), + false, None, ), Err(Error::::TerminatedInConstructor.into()) @@ -3287,6 +3324,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -3349,6 +3387,7 @@ mod tests { 10u64.into(), vec![], Some(&[0; 32]), + false, None, ); assert_matches!(result, Ok(_)); @@ -3395,6 +3434,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3426,6 +3466,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, Some(&mut debug_buffer), ) .unwrap(); @@ -3459,6 +3500,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, Some(&mut debug_buffer), ); assert!(result.is_err()); @@ -3492,6 +3534,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, Some(&mut debug_buf_after), ) .unwrap(); @@ -3525,6 +3568,7 @@ mod tests { &mut storage_meter, U256::zero(), CHARLIE_ADDR.as_bytes().to_vec(), + false, None, )); @@ -3537,6 +3581,7 @@ mod tests { &mut storage_meter, U256::zero(), BOB_ADDR.as_bytes().to_vec(), + false, None, ) .map_err(|e| e.error), @@ -3587,6 +3632,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ) .map_err(|e| e.error), @@ -3621,6 +3667,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3705,6 +3752,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3831,6 +3879,7 @@ mod tests { (min_balance * 100).into(), vec![], Some(&[0; 32]), + false, None, ) .ok(); @@ -3844,6 +3893,7 @@ mod tests { (min_balance * 100).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 1); @@ -3856,6 +3906,7 @@ mod tests { (min_balance * 200).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 2); @@ -3868,6 +3919,7 @@ mod tests { (min_balance * 200).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 3); @@ -3936,6 +3988,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4047,6 +4100,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4086,6 +4140,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4125,6 +4180,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4178,6 +4234,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4234,6 +4291,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4309,6 +4367,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4379,6 +4438,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4417,6 +4477,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4479,6 +4540,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4512,6 +4574,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); assert_matches!(result, Ok(_)); @@ -4595,6 +4658,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4663,6 +4727,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4734,6 +4799,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); assert_matches!(result, Ok(_)); @@ -4785,6 +4851,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4854,6 +4921,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4900,6 +4968,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4944,6 +5013,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4999,6 +5069,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ), Ok(_) diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index b55854e2eec5..1dee1da03bc4 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -41,13 +41,13 @@ pub mod test_utils; pub mod weights; use crate::{ - evm::{runtime::GAS_PRICE, TransactionLegacyUnsigned}, + evm::{runtime::GAS_PRICE, GenericTransaction}, exec::{AccountIdOf, ExecError, Executable, Ext, Key, Origin, Stack as ExecStack}, gas::GasMeter, storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, wasm::{CodeInfo, RuntimeCosts, WasmBlob}, }; -use alloc::boxed::Box; +use alloc::{boxed::Box, format, vec}; use codec::{Codec, Decode, Encode}; use environmental::*; use frame_support::{ @@ -74,7 +74,7 @@ use pallet_transaction_payment::OnChargeTransaction; use scale_info::TypeInfo; use sp_core::{H160, H256, U256}; use sp_runtime::{ - traits::{BadOrigin, Convert, Dispatchable, Saturating, Zero}, + traits::{BadOrigin, Bounded, Convert, Dispatchable, Saturating, Zero}, DispatchError, }; @@ -823,7 +823,7 @@ pub mod pallet { dest, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), data, DebugInfo::Skip, CollectEvents::Skip, @@ -859,7 +859,7 @@ pub mod pallet { origin, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), Code::Existing(code_hash), data, salt, @@ -925,7 +925,7 @@ pub mod pallet { origin, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), Code::Upload(code), data, salt, @@ -1083,7 +1083,7 @@ fn dispatch_result( impl Pallet where - BalanceOf: Into + TryFrom, + BalanceOf: Into + TryFrom + Bounded, MomentOf: Into, T::Hash: frame_support::traits::IsType, { @@ -1098,7 +1098,7 @@ where dest: H160, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, data: Vec, debug: DebugInfo, collect_events: CollectEvents, @@ -1112,7 +1112,10 @@ where }; let try_call = || { let origin = Origin::from_runtime_origin(origin)?; - let mut storage_meter = StorageMeter::new(&origin, storage_deposit_limit, value)?; + let mut storage_meter = match storage_deposit_limit { + DepositLimit::Balance(limit) => StorageMeter::new(&origin, limit, value)?, + DepositLimit::Unchecked => StorageMeter::new_unchecked(BalanceOf::::max_value()), + }; let result = ExecStack::>::run_call( origin.clone(), dest, @@ -1120,9 +1123,14 @@ where &mut storage_meter, Self::convert_native_to_evm(value), data, + storage_deposit_limit.is_unchecked(), debug_message.as_mut(), )?; - storage_deposit = storage_meter.try_into_deposit(&origin)?; + storage_deposit = storage_meter + .try_into_deposit(&origin, storage_deposit_limit.is_unchecked()) + .inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to transfer deposit: {err:?}"); + })?; Ok(result) }; let result = Self::run_guarded(try_call); @@ -1151,7 +1159,7 @@ where origin: OriginFor, value: BalanceOf, gas_limit: Weight, - mut storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, code: Code, data: Vec, salt: Option<[u8; 32]>, @@ -1162,13 +1170,24 @@ where let mut storage_deposit = Default::default(); let mut debug_message = if debug == DebugInfo::UnsafeDebug { Some(DebugBuffer::default()) } else { None }; + + let unchecked_deposit_limit = storage_deposit_limit.is_unchecked(); + let mut storage_deposit_limit = match storage_deposit_limit { + DepositLimit::Balance(limit) => limit, + DepositLimit::Unchecked => BalanceOf::::max_value(), + }; + let try_instantiate = || { let instantiate_account = T::InstantiateOrigin::ensure_origin(origin.clone())?; let (executable, upload_deposit) = match code { Code::Upload(code) => { let upload_account = T::UploadOrigin::ensure_origin(origin)?; - let (executable, upload_deposit) = - Self::try_upload_code(upload_account, code, storage_deposit_limit)?; + let (executable, upload_deposit) = Self::try_upload_code( + upload_account, + code, + storage_deposit_limit, + unchecked_deposit_limit, + )?; storage_deposit_limit.saturating_reduce(upload_deposit); (executable, upload_deposit) }, @@ -1176,8 +1195,12 @@ where (WasmBlob::from_storage(code_hash, &mut gas_meter)?, Default::default()), }; let instantiate_origin = Origin::from_account_id(instantiate_account.clone()); - let mut storage_meter = - StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)?; + let mut storage_meter = if unchecked_deposit_limit { + StorageMeter::new_unchecked(storage_deposit_limit) + } else { + StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)? + }; + let result = ExecStack::>::run_instantiate( instantiate_account, executable, @@ -1186,10 +1209,11 @@ where Self::convert_native_to_evm(value), data, salt.as_ref(), + unchecked_deposit_limit, debug_message.as_mut(), ); storage_deposit = storage_meter - .try_into_deposit(&instantiate_origin)? + .try_into_deposit(&instantiate_origin, unchecked_deposit_limit)? .saturating_add(&StorageDeposit::Charge(upload_deposit)); result }; @@ -1215,28 +1239,15 @@ where /// /// # Parameters /// - /// - `origin`: The origin of the call. - /// - `dest`: The destination address of the call. - /// - `value`: The EVM value to transfer. - /// - `input`: The input data. + /// - `tx`: The Ethereum transaction to simulate. /// - `gas_limit`: The gas limit enforced during contract execution. - /// - `storage_deposit_limit`: The maximum balance that can be charged to the caller for storage - /// usage. /// - `utx_encoded_size`: A function that takes a call and returns the encoded size of the /// unchecked extrinsic. - /// - `debug`: Debugging configuration. - /// - `collect_events`: Event collection configuration. pub fn bare_eth_transact( - origin: T::AccountId, - dest: Option, - value: U256, - input: Vec, + mut tx: GenericTransaction, gas_limit: Weight, - storage_deposit_limit: BalanceOf, utx_encoded_size: impl Fn(Call) -> u32, - debug: DebugInfo, - collect_events: CollectEvents, - ) -> EthContractResult> + ) -> Result>, EthTransactError> where T: pallet_transaction_payment::Config, ::RuntimeCall: @@ -1247,26 +1258,58 @@ where T::Nonce: Into, T::Hash: frame_support::traits::IsType, { - log::debug!(target: LOG_TARGET, "bare_eth_transact: dest: {dest:?} value: {value:?} - gas_limit: {gas_limit:?} storage_deposit_limit: {storage_deposit_limit:?}"); + log::debug!(target: LOG_TARGET, "bare_eth_transact: tx: {tx:?} gas_limit: {gas_limit:?}"); + + let from = tx.from.unwrap_or_default(); + let origin = T::AddressMapper::to_account_id(&from); - // Get the nonce to encode in the tx. - let nonce: T::Nonce = >::account_nonce(&origin); + let storage_deposit_limit = if tx.gas.is_some() { + DepositLimit::Balance(BalanceOf::::max_value()) + } else { + DepositLimit::Unchecked + }; + + // TODO remove once we have revisited how we encode the gas limit. + if tx.nonce.is_none() { + tx.nonce = Some(>::account_nonce(&origin).into()); + } + if tx.gas_price.is_none() { + tx.gas_price = Some(GAS_PRICE.into()); + } + if tx.chain_id.is_none() { + tx.chain_id = Some(T::ChainId::get().into()); + } // Convert the value to the native balance type. - let native_value = match Self::convert_evm_to_native(value) { + let evm_value = tx.value.unwrap_or_default(); + let native_value = match Self::convert_evm_to_native(evm_value) { Ok(v) => v, - Err(err) => - return EthContractResult { - gas_required: Default::default(), - storage_deposit: Default::default(), - fee: Default::default(), - result: Err(err.into()), - }, + Err(_) => return Err(EthTransactError::Message("Failed to convert value".into())), + }; + + let input = tx.input.clone().unwrap_or_default().0; + let debug = DebugInfo::Skip; + let collect_events = CollectEvents::Skip; + + let extract_error = |err| { + if err == Error::::TransferFailed.into() || + err == Error::::StorageDepositNotEnoughFunds.into() || + err == Error::::StorageDepositLimitExhausted.into() + { + let balance = Self::evm_balance(&from); + return Err(EthTransactError::Message( + format!("insufficient funds for gas * price + value: address {from:?} have {balance} (supplied gas {})", + tx.gas.unwrap_or_default())) + ); + } + + return Err(EthTransactError::Message(format!( + "Failed to instantiate contract: {err:?}" + ))); }; // Dry run the call - let (mut result, dispatch_info) = match dest { + let (mut result, dispatch_info) = match tx.to { // A contract call. Some(dest) => { // Dry run the call. @@ -1281,11 +1324,24 @@ where collect_events, ); - let result = EthContractResult { + let data = match result.result { + Ok(return_value) => { + if return_value.did_revert() { + return Err(EthTransactError::Data(return_value.data)); + } + return_value.data + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to execute call: {err:?}"); + return extract_error(err) + }, + }; + + let result = EthTransactInfo { gas_required: result.gas_required, storage_deposit: result.storage_deposit.charge_or_zero(), - result: result.result, - fee: Default::default(), + data, + eth_gas: Default::default(), }; // Get the dispatch info of the call. let dispatch_call: ::RuntimeCall = crate::Call::::call { @@ -1326,11 +1382,24 @@ where collect_events, ); - let result = EthContractResult { + let returned_data = match result.result { + Ok(return_value) => { + if return_value.result.did_revert() { + return Err(EthTransactError::Data(return_value.result.data)); + } + return_value.result.data + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to instantiate: {err:?}"); + return extract_error(err) + }, + }; + + let result = EthTransactInfo { gas_required: result.gas_required, storage_deposit: result.storage_deposit.charge_or_zero(), - result: result.result.map(|v| v.result), - fee: Default::default(), + data: returned_data, + eth_gas: Default::default(), }; // Get the dispatch info of the call. @@ -1348,23 +1417,18 @@ where }, }; - let mut tx = TransactionLegacyUnsigned { - value, - input: input.into(), - nonce: nonce.into(), - chain_id: Some(T::ChainId::get().into()), - gas_price: GAS_PRICE.into(), - to: dest, - ..Default::default() - }; - // The transaction fees depend on the extrinsic's length, which in turn is influenced by // the encoded length of the gas limit specified in the transaction (tx.gas). // We iteratively compute the fee by adjusting tx.gas until the fee stabilizes. // with a maximum of 3 iterations to avoid an infinite loop. for _ in 0..3 { + let Ok(unsigned_tx) = tx.clone().try_into_unsigned() else { + log::debug!(target: LOG_TARGET, "Failed to convert to unsigned"); + return Err(EthTransactError::Message("Invalid transaction".into())); + }; + let eth_dispatch_call = crate::Call::::eth_transact { - payload: tx.dummy_signed_payload(), + payload: unsigned_tx.dummy_signed_payload(), gas_limit: result.gas_required, storage_deposit_limit: result.storage_deposit, }; @@ -1375,17 +1439,18 @@ where 0u32.into(), ) .into(); + let eth_gas: U256 = (fee / GAS_PRICE.into()).into(); - if fee == result.fee { - log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} fee: {fee:?}"); + if eth_gas == result.eth_gas { + log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} eth_gas: {eth_gas:?}"); break; } - result.fee = fee; - tx.gas = (fee / GAS_PRICE.into()).into(); - log::debug!(target: LOG_TARGET, "Adjusting Eth gas to: {:?}", tx.gas); + result.eth_gas = eth_gas; + tx.gas = Some(eth_gas.into()); + log::debug!(target: LOG_TARGET, "Adjusting Eth gas to: {eth_gas:?}"); } - result + Ok(result) } /// Get the balance with EVM decimals of the given `address`. @@ -1403,7 +1468,7 @@ where storage_deposit_limit: BalanceOf, ) -> CodeUploadResult> { let origin = T::UploadOrigin::ensure_origin(origin)?; - let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit)?; + let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit, false)?; Ok(CodeUploadReturnValue { code_hash: *module.code_hash(), deposit }) } @@ -1421,9 +1486,10 @@ where origin: T::AccountId, code: Vec, storage_deposit_limit: BalanceOf, + skip_transfer: bool, ) -> Result<(WasmBlob, BalanceOf), DispatchError> { let mut module = WasmBlob::from_code(code, origin)?; - let deposit = module.store_code()?; + let deposit = module.store_code(skip_transfer)?; ensure!(storage_deposit_limit >= deposit, >::StorageDepositLimitExhausted); Ok((module, deposit)) } @@ -1527,14 +1593,7 @@ sp_api::decl_runtime_apis! { /// Perform an Ethereum call. /// /// See [`crate::Pallet::bare_eth_transact`] - fn eth_transact( - origin: H160, - dest: Option, - value: U256, - input: Vec, - gas_limit: Option, - storage_deposit_limit: Option, - ) -> EthContractResult; + fn eth_transact(tx: GenericTransaction) -> Result, EthTransactError>; /// Upload new code without instantiating a contract from it. /// diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs index 024b1f3448e1..a7127f812b4b 100644 --- a/substrate/frame/revive/src/primitives.rs +++ b/substrate/frame/revive/src/primitives.rs @@ -17,8 +17,8 @@ //! A crate that hosts a common definitions that are relevant for the pallet-revive. -use crate::H160; -use alloc::vec::Vec; +use crate::{H160, U256}; +use alloc::{string::String, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::weights::Weight; use pallet_revive_uapi::ReturnFlags; @@ -28,6 +28,30 @@ use sp_runtime::{ DispatchError, RuntimeDebug, }; +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum DepositLimit { + /// Allows bypassing all balance transfer checks. + Unchecked, + + /// Specifies a maximum allowable balance for a deposit. + Balance(Balance), +} + +impl DepositLimit { + pub fn is_unchecked(&self) -> bool { + match self { + Self::Unchecked => true, + _ => false, + } + } +} + +impl From for DepositLimit { + fn from(value: T) -> Self { + Self::Balance(value) + } +} + /// Result type of a `bare_call` or `bare_instantiate` call as well as `ContractsApi::call` and /// `ContractsApi::instantiate`. /// @@ -84,15 +108,22 @@ pub struct ContractResult { /// The result of the execution of a `eth_transact` call. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct EthContractResult> { - /// The fee charged for the execution. - pub fee: Balance, +pub struct EthTransactInfo { /// The amount of gas that was necessary to execute the transaction. pub gas_required: Weight, /// Storage deposit charged. pub storage_deposit: Balance, - /// The execution result. - pub result: R, + /// The weight and deposit equivalent in EVM Gas. + pub eth_gas: U256, + /// The execution return value. + pub data: Vec, +} + +/// Error type of a `eth_transact` call. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum EthTransactError { + Data(Vec), + Message(String), } /// Result type of a `bare_code_upload` call. diff --git a/substrate/frame/revive/src/storage/meter.rs b/substrate/frame/revive/src/storage/meter.rs index 712010bc8257..6eddf048be98 100644 --- a/substrate/frame/revive/src/storage/meter.rs +++ b/substrate/frame/revive/src/storage/meter.rs @@ -373,24 +373,36 @@ where } } + /// Create new storage meter without checking the limit. + pub fn new_unchecked(limit: BalanceOf) -> Self { + return Self { limit, ..Default::default() } + } + /// The total amount of deposit that should change hands as result of the execution /// that this meter was passed into. This will also perform all the charges accumulated /// in the whole contract stack. /// /// This drops the root meter in order to make sure it is only called when the whole /// execution did finish. - pub fn try_into_deposit(self, origin: &Origin) -> Result, DispatchError> { - // Only refund or charge deposit if the origin is not root. - let origin = match origin { - Origin::Root => return Ok(Deposit::Charge(Zero::zero())), - Origin::Signed(o) => o, - }; - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; - } - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + pub fn try_into_deposit( + self, + origin: &Origin, + skip_transfer: bool, + ) -> Result, DispatchError> { + if !skip_transfer { + // Only refund or charge deposit if the origin is not root. + let origin = match origin { + Origin::Root => return Ok(Deposit::Charge(Zero::zero())), + Origin::Signed(o) => o, + }; + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } } + Ok(self.total_deposit) } } @@ -425,13 +437,18 @@ impl> RawMeter { contract: &T::AccountId, contract_info: &mut ContractInfo, code_info: &CodeInfo, + skip_transfer: bool, ) -> Result<(), DispatchError> { debug_assert!(matches!(self.contract_state(), ContractState::Alive)); // We need to make sure that the contract's account exists. let ed = Pallet::::min_balance(); self.total_deposit = Deposit::Charge(ed); - T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; + if skip_transfer { + T::Currency::set_balance(contract, ed); + } else { + T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; + } // A consumer is added at account creation and removed it on termination, otherwise the // runtime could remove the account. As long as a contract exists its account must exist. @@ -479,6 +496,7 @@ impl> RawMeter { } if let Deposit::Charge(amount) = total_deposit { if amount > self.limit { + log::debug!( target: LOG_TARGET, "Storage deposit limit exhausted: {:?} > {:?}", amount, self.limit); return Err(>::StorageDepositLimitExhausted.into()) } } @@ -811,7 +829,10 @@ mod tests { nested0.enforce_limit(Some(&mut nested0_info)).unwrap(); meter.absorb(nested0, &BOB, Some(&mut nested0_info)); - assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); + assert_eq!( + meter.try_into_deposit(&test_case.origin, false).unwrap(), + test_case.deposit + ); assert_eq!(nested0_info.extra_deposit(), 112); assert_eq!(nested1_info.extra_deposit(), 110); @@ -882,7 +903,10 @@ mod tests { nested0.absorb(nested1, &CHARLIE, None); meter.absorb(nested0, &BOB, None); - assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); + assert_eq!( + meter.try_into_deposit(&test_case.origin, false).unwrap(), + test_case.deposit + ); assert_eq!(TestExtTestValue::get(), test_case.expected) } } diff --git a/substrate/frame/revive/src/test_utils/builder.rs b/substrate/frame/revive/src/test_utils/builder.rs index e64f58894432..8ba5e7384070 100644 --- a/substrate/frame/revive/src/test_utils/builder.rs +++ b/substrate/frame/revive/src/test_utils/builder.rs @@ -18,7 +18,8 @@ use super::{deposit_limit, GAS_LIMIT}; use crate::{ address::AddressMapper, AccountIdOf, BalanceOf, Code, CollectEvents, Config, ContractResult, - DebugInfo, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor, Pallet, Weight, + DebugInfo, DepositLimit, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor, + Pallet, Weight, }; use frame_support::pallet_prelude::DispatchResultWithPostInfo; use paste::paste; @@ -133,7 +134,7 @@ builder!( origin: OriginFor, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, code: Code, data: Vec, salt: Option<[u8; 32]>, @@ -159,7 +160,7 @@ builder!( origin, value: 0u32.into(), gas_limit: GAS_LIMIT, - storage_deposit_limit: deposit_limit::(), + storage_deposit_limit: DepositLimit::Balance(deposit_limit::()), code, data: vec![], salt: Some([0; 32]), @@ -198,7 +199,7 @@ builder!( dest: H160, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, data: Vec, debug: DebugInfo, collect_events: CollectEvents, @@ -216,7 +217,7 @@ builder!( dest, value: 0u32.into(), gas_limit: GAS_LIMIT, - storage_deposit_limit: deposit_limit::(), + storage_deposit_limit: DepositLimit::Balance(deposit_limit::()), data: vec![], debug: DebugInfo::UnsafeDebug, collect_events: CollectEvents::Skip, diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 34afe8aabfe6..1df300f031a7 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -1249,7 +1249,7 @@ fn transfer_expendable_cannot_kill_account() { test_utils::contract_info_storage_deposit(&addr) ); - // Some ot the total balance is held, so it can't be transferred. + // Some or the total balance is held, so it can't be transferred. assert_err!( <::Currency as Mutate>::transfer( &account, @@ -2290,7 +2290,7 @@ fn gas_estimation_for_subcalls() { // Make the same call using the estimated gas. Should succeed. let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) .data(input.clone()) .build(); assert_ok!(&result.result); @@ -2298,7 +2298,7 @@ fn gas_estimation_for_subcalls() { // Check that it fails with too little ref_time let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required.sub_ref_time(1)) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) .data(input.clone()) .build(); assert_err!(result.result, error); @@ -2306,7 +2306,7 @@ fn gas_estimation_for_subcalls() { // Check that it fails with too little proof_size let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required.sub_proof_size(1)) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) .data(input.clone()) .build(); assert_err!(result.result, error); @@ -3592,7 +3592,7 @@ fn deposit_limit_in_nested_instantiate() { // Set enough deposit limit for the child instantiate. This should succeed. let result = builder::bare_call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(callee_info_len + 2 + ED + 4 + 2) + .storage_deposit_limit((callee_info_len + 2 + ED + 4 + 2).into()) .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 3 + 2)).encode()) .build(); @@ -3879,7 +3879,7 @@ fn locking_delegate_dependency_works() { // Locking a dependency with a storage limit too low should fail. assert_err!( builder::bare_call(addr_caller) - .storage_deposit_limit(dependency_deposit - 1) + .storage_deposit_limit((dependency_deposit - 1).into()) .data((1u32, hash2addr(&callee_hashes[0]), callee_hashes[0]).encode()) .build() .result, diff --git a/substrate/frame/revive/src/tests/test_debug.rs b/substrate/frame/revive/src/tests/test_debug.rs index 7c4fbba71f65..c9e19e52ace1 100644 --- a/substrate/frame/revive/src/tests/test_debug.rs +++ b/substrate/frame/revive/src/tests/test_debug.rs @@ -21,6 +21,7 @@ use crate::{ debug::{CallInterceptor, CallSpan, ExecResult, ExportedFunction, Tracing}, primitives::ExecReturnValue, test_utils::*, + DepositLimit, }; use frame_support::traits::Currency; use pretty_assertions::assert_eq; @@ -114,7 +115,7 @@ fn debugging_works() { RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, - deposit_limit::(), + DepositLimit::Balance(deposit_limit::()), Code::Upload(wasm), vec![], Some([0u8; 32]), @@ -198,7 +199,7 @@ fn call_interception_works() { RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, - deposit_limit::(), + deposit_limit::().into(), Code::Upload(wasm), vec![], // some salt to ensure that the address of this contract is unique among all tests diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index d87ec7112286..54fb02c866e1 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -183,7 +183,7 @@ where } /// Puts the module blob into storage, and returns the deposit collected for the storage. - pub fn store_code(&mut self) -> Result, Error> { + pub fn store_code(&mut self, skip_transfer: bool) -> Result, Error> { let code_hash = *self.code_hash(); >::mutate(code_hash, |stored_code_info| { match stored_code_info { @@ -195,15 +195,16 @@ where // the `owner` is always the origin of the current transaction. None => { let deposit = self.code_info.deposit; - T::Currency::hold( + + if !skip_transfer { + T::Currency::hold( &HoldReason::CodeUploadDepositReserve.into(), &self.code_info.owner, deposit, - ) - .map_err(|err| { - log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); + ) .map_err(|err| { log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); >::StorageDepositNotEnoughFunds })?; + } self.code_info.refcount = 0; >::insert(code_hash, &self.code); From c0921339f9d486981b3681760ee83ba9237f2eaa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 07:20:56 +0000 Subject: [PATCH 46/68] Bump the ci_dependencies group across 1 directory with 3 updates (#6516) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps the ci_dependencies group with 3 updates in the / directory: [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action), [actions/attest-build-provenance](https://github.com/actions/attest-build-provenance) and [codecov/codecov-action](https://github.com/codecov/codecov-action). Updates `lycheeverse/lychee-action` from 2.0.2 to 2.1.0
Commits

Updates `actions/attest-build-provenance` from 1.4.3 to 1.4.4
Release notes

Sourced from actions/attest-build-provenance's releases.

v1.4.4

What's Changed

Full Changelog: https://github.com/actions/attest-build-provenance/compare/v1.4.3...v1.4.4

Commits
  • ef24412 bump predicate from 1.1.3 to 1.1.4 (#310)
  • 36fa7d0 bump @​actions/attest from 1.4.2 to 1.5.0 (#309)
  • 390c0bb Bump @​types/node from 22.8.1 to 22.8.7 in the npm-development group (#305)
  • 21da615 Bump the npm-development group with 3 updates (#299)
  • 0704961 Bump actions/publish-immutable-action in the actions-minor group (#298)
  • d01b070 Bump the npm-development group with 3 updates (#278)
  • b1d65e4 Add workflow file for publishing releases to immutable action package (#277)
  • 3a27694 Bump @​actions/core from 1.10.1 to 1.11.1 (#275)
  • dff1ae6 prevent e2e workflows on forks (#272)
  • e5892d0 Bump the npm-development group with 3 updates (#263)
  • Additional commits viewable in compare view

Updates `codecov/codecov-action` from 4 to 5
Release notes

Sourced from codecov/codecov-action's releases.

v5.0.0

v5 Release

v5 of the Codecov GitHub Action will use the Codecov Wrapper to encapsulate the CLI. This will help ensure that the Action gets updates quicker.

Migration Guide

The v5 release also coincides with the opt-out feature for tokens for public repositories. In the Global Upload Token section of the settings page of an organization in codecov.io, you can set the ability for Codecov to receive a coverage reports from any source. This will allow contributors or other members of a repository to upload without needing access to the Codecov token. For more details see how to upload without a token.

[!WARNING]
The following arguments have been changed

  • file (this has been deprecated in favor of files)
  • plugin (this has been deprecated in favor of plugins)

The following arguments have been added:

  • binary
  • gcov_args
  • gcov_executable
  • gcov_ignore
  • gcov_include
  • report_type
  • skip_validation
  • swift_project

You can see their usage in the action.yml file.

What's Changed

... (truncated)

Changelog

Sourced from codecov/codecov-action's changelog.

4.0.0-beta.2

Fixes

  • #1085 not adding -n if empty to do-upload command

4.0.0-beta.1

v4 represents a move from the universal uploader to the Codecov CLI. Although this will unlock new features for our users, the CLI is not yet at feature parity with the universal uploader.

Breaking Changes

  • No current support for aarch64 and alpine architectures.
  • Tokenless uploading is unsuported
  • Various arguments to the Action have been removed

3.1.4

Fixes

  • #967 Fix typo in README.md
  • #971 fix: add back in working dir
  • #969 fix: CLI option names for uploader

Dependencies

  • #970 build(deps-dev): bump @​types/node from 18.15.12 to 18.16.3
  • #979 build(deps-dev): bump @​types/node from 20.1.0 to 20.1.2
  • #981 build(deps-dev): bump @​types/node from 20.1.2 to 20.1.4

3.1.3

Fixes

  • #960 fix: allow for aarch64 build

Dependencies

  • #957 build(deps-dev): bump jest-junit from 15.0.0 to 16.0.0
  • #958 build(deps): bump openpgp from 5.7.0 to 5.8.0
  • #959 build(deps-dev): bump @​types/node from 18.15.10 to 18.15.12

3.1.2

Fixes

  • #718 Update README.md
  • #851 Remove unsupported path_to_write_report argument
  • #898 codeql-analysis.yml
  • #901 Update README to contain correct information - inputs and negate feature
  • #955 fix: add in all the extra arguments for uploader

Dependencies

  • #819 build(deps): bump openpgp from 5.4.0 to 5.5.0
  • #835 build(deps): bump node-fetch from 3.2.4 to 3.2.10
  • #840 build(deps): bump ossf/scorecard-action from 1.1.1 to 2.0.4
  • #841 build(deps): bump @​actions/core from 1.9.1 to 1.10.0
  • #843 build(deps): bump @​actions/github from 5.0.3 to 5.1.1
  • #869 build(deps): bump node-fetch from 3.2.10 to 3.3.0
  • #872 build(deps-dev): bump jest-junit from 13.2.0 to 15.0.0
  • #879 build(deps): bump decode-uri-component from 0.2.0 to 0.2.2

... (truncated)

Commits

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/check-links.yml | 2 +- .github/workflows/release-reusable-rc-buid.yml | 6 +++--- .github/workflows/tests-linux-stable-coverage.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index dd9d3eaf824f..cea6b9a8636a 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -33,7 +33,7 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.0 (22. Sep 2023) - name: Lychee link checker - uses: lycheeverse/lychee-action@7cd0af4c74a61395d455af97419279d86aafaede # for v1.9.1 (10. Jan 2024) + uses: lycheeverse/lychee-action@f81112d0d2814ded911bd23e3beaa9dda9093915 # for v1.9.1 (10. Jan 2024) with: args: >- --config .config/lychee.toml diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index 7e31a4744b59..f5240878cba2 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -104,7 +104,7 @@ jobs: ./.github/scripts/release/build-linux-release.sh ${{ matrix.binaries }} ${{ inputs.package }} - name: Generate artifact attestation - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: /artifacts/${{ matrix.binaries }}/${{ matrix.binaries }} @@ -220,7 +220,7 @@ jobs: ./.github/scripts/release/build-macos-release.sh ${{ matrix.binaries }} ${{ inputs.package }} - name: Generate artifact attestation - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: ${{ env.ARTIFACTS_PATH }}/${{ matrix.binaries }} @@ -278,7 +278,7 @@ jobs: . "${GITHUB_WORKSPACE}"/.github/scripts/release/build-deb.sh ${{ inputs.package }} ${VERSION} - name: Generate artifact attestation - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: target/production/*.deb diff --git a/.github/workflows/tests-linux-stable-coverage.yml b/.github/workflows/tests-linux-stable-coverage.yml index c5af6bcae77f..61e01cda4428 100644 --- a/.github/workflows/tests-linux-stable-coverage.yml +++ b/.github/workflows/tests-linux-stable-coverage.yml @@ -102,7 +102,7 @@ jobs: merge-multiple: true - run: ls -al reports/ - name: Upload to Codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} verbose: true From 0845044454c005b577eab7afaea18583bd7e3dd3 Mon Sep 17 00:00:00 2001 From: clangenb <37865735+clangenb@users.noreply.github.com> Date: Mon, 2 Dec 2024 17:07:21 +0100 Subject: [PATCH 47/68] migrate pallet-session-benchmarking to bench V2 syntax (#6294) Migrates pallet-session-benchmarking to bench V2 syntax. Part of: * #6202 --------- Co-authored-by: Shawn Tabrizi Co-authored-by: Giuseppe Re --- .../frame/session/benchmarking/src/inner.rs | 68 ++++++++++++------- .../frame/session/benchmarking/src/mock.rs | 6 +- 2 files changed, 48 insertions(+), 26 deletions(-) diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs index 9ba47b34ed7a..9789b6bb593d 100644 --- a/substrate/frame/session/benchmarking/src/inner.rs +++ b/substrate/frame/session/benchmarking/src/inner.rs @@ -22,7 +22,7 @@ use alloc::{vec, vec::Vec}; use sp_runtime::traits::{One, StaticLookup, TrailingZeroInput}; use codec::Decode; -use frame_benchmarking::v1::benchmarks; +use frame_benchmarking::v2::*; use frame_support::traits::{Get, KeyOwnerProofSystem, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; @@ -45,8 +45,12 @@ impl OnInitialize> for Pallet { } } -benchmarks! { - set_keys { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn set_keys() -> Result<(), BenchmarkError> { let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, @@ -58,13 +62,19 @@ benchmarks! { let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; + let proof: Vec = vec![0, 1, 2, 3]; // Whitelist controller account from further DB operations. let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller), keys, proof) - purge_keys { + #[extrinsic_call] + _(RawOrigin::Signed(v_controller), keys, proof); + + Ok(()) + } + + #[benchmark] + fn purge_keys() -> Result<(), BenchmarkError> { let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, @@ -75,30 +85,33 @@ benchmarks! { )?; let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; + let proof: Vec = vec![0, 1, 2, 3]; Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; // Whitelist controller account from further DB operations. let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller)) - #[extra] - check_membership_proof_current_session { - let n in 2 .. MAX_VALIDATORS as u32; + #[extrinsic_call] + _(RawOrigin::Signed(v_controller)); + Ok(()) + } + + #[benchmark(extra)] + fn check_membership_proof_current_session(n: Linear<2, MAX_VALIDATORS>) { let (key, key_owner_proof1) = check_membership_proof_setup::(n); let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { + + #[block] + { + Historical::::check_proof(key, key_owner_proof1); + } + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } - #[extra] - check_membership_proof_historical_session { - let n in 2 .. MAX_VALIDATORS as u32; - + #[benchmark(extra)] + fn check_membership_proof_historical_session(n: Linear<2, MAX_VALIDATORS>) { let (key, key_owner_proof1) = check_membership_proof_setup::(n); // skip to the next session so that the session is historical @@ -106,14 +119,21 @@ benchmarks! { Session::::rotate_session(); let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { + + #[block] + { + Historical::::check_proof(key, key_owner_proof1); + } + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(), + crate::mock::Test, + extra = false + ); } /// Sets up the benchmark for checking a membership proof. It creates the given diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 2aec58cceded..346cd04c0fa9 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -27,7 +27,7 @@ use frame_support::{ derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; -use sp_runtime::{traits::IdentityLookup, BuildStorage}; +use sp_runtime::{traits::IdentityLookup, BuildStorage, KeyTypeId}; type AccountId = u64; type Nonce = u32; @@ -42,6 +42,7 @@ frame_support::construct_runtime!( Balances: pallet_balances, Staking: pallet_staking, Session: pallet_session, + Historical: pallet_session::historical } ); @@ -79,7 +80,8 @@ sp_runtime::impl_opaque_keys! { pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + // corresponds to the opaque key id above + const KEY_TYPE_IDS: &'static [KeyTypeId] = &[KeyTypeId([100u8, 117u8, 109u8, 121u8])]; fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} From 3d8da815ecd12b8f04daf87d6ffba5ec4a181806 Mon Sep 17 00:00:00 2001 From: clangenb <37865735+clangenb@users.noreply.github.com> Date: Mon, 2 Dec 2024 17:36:52 +0100 Subject: [PATCH 48/68] migrate pallet-offences-benchmarking to benchmark v2 syntax (#6300) Migrates pallet-offences-benchmarking to benchmark v2 syntax. Part of: * #6202 --------- Co-authored-by: Giuseppe Re --- .../frame/offences/benchmarking/src/inner.rs | 107 +++++++++++------- .../frame/offences/benchmarking/src/mock.rs | 5 +- 2 files changed, 66 insertions(+), 46 deletions(-) diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs index 573114de0742..75f3e9931e34 100644 --- a/substrate/frame/offences/benchmarking/src/inner.rs +++ b/substrate/frame/offences/benchmarking/src/inner.rs @@ -19,7 +19,7 @@ use alloc::{vec, vec::Vec}; -use frame_benchmarking::v1::{account, benchmarks}; +use frame_benchmarking::v2::*; use frame_support::traits::Get; use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; @@ -144,7 +144,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &' fn make_offenders( num_offenders: u32, num_nominators: u32, -) -> Result<(Vec>, Vec>), &'static str> { +) -> Result>, &'static str> { Staking::::new_session(0); let mut offenders = vec![]; @@ -167,21 +167,50 @@ fn make_offenders( .expect("failed to convert validator id to full identification") }) .collect::>>(); - Ok((id_tuples, offenders)) + Ok(id_tuples) } -benchmarks! { - where_clause { - where +#[cfg(test)] +fn assert_all_slashes_applied(offender_count: usize) +where + T: Config, + ::RuntimeEvent: TryInto>, + ::RuntimeEvent: TryInto>, + ::RuntimeEvent: TryInto, + ::RuntimeEvent: TryInto>, +{ + // make sure that all slashes have been applied + // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + + // reporter account endowed + some funds rescinded from issuance. + assert_eq!( + System::::read_events_for_pallet::>().len(), + 2 * (offender_count + 1) + 3 + ); + // (n nominators + one validator) * slashed + Slash Reported + assert_eq!( + System::::read_events_for_pallet::>().len(), + 1 * (offender_count + 1) + 1 + ); + // offence + assert_eq!(System::::read_events_for_pallet::().len(), 1); + // reporter new account + assert_eq!(System::::read_events_for_pallet::>().len(), 1); +} + +#[benchmarks( + where ::RuntimeEvent: TryInto>, ::RuntimeEvent: TryInto>, ::RuntimeEvent: TryInto, ::RuntimeEvent: TryInto>, - } - - report_offence_grandpa { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); - +)] +mod benchmarks { + use super::*; + + #[benchmark] + pub fn report_offence_grandpa( + n: Linear<0, { MAX_NOMINATORS.min(MaxNominationsOf::::get()) }>, + ) -> Result<(), BenchmarkError> { // for grandpa equivocation reports the number of reporters // and offenders is always 1 let reporters = vec![account("reporter", 1, SEED)]; @@ -189,7 +218,7 @@ benchmarks! { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let mut offenders = make_offenders::(1, n)?; let validator_set_count = Session::::validators().len() as u32; let offence = GrandpaEquivocationOffence { @@ -199,28 +228,24 @@ benchmarks! { offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { + + #[block] + { + let _ = Offences::::report_offence(reporters, offence); + } + #[cfg(test)] { - // make sure that all slashes have been applied - // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + reporter - // account endowed + some funds rescinded from issuance. - assert_eq!(System::::read_events_for_pallet::>().len(), 2 * (n + 1) as usize + 3); - // (n nominators + one validator) * slashed + Slash Reported - assert_eq!(System::::read_events_for_pallet::>().len(), 1 * (n + 1) as usize + 1); - // offence - assert_eq!(System::::read_events_for_pallet::().len(), 1); - // reporter new account - assert_eq!(System::::read_events_for_pallet::>().len(), 1); + assert_all_slashes_applied::(n as usize); } - } - report_offence_babe { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); + Ok(()) + } + #[benchmark] + fn report_offence_babe( + n: Linear<0, { MAX_NOMINATORS.min(MaxNominationsOf::::get()) }>, + ) -> Result<(), BenchmarkError> { // for babe equivocation reports the number of reporters // and offenders is always 1 let reporters = vec![account("reporter", 1, SEED)]; @@ -228,7 +253,7 @@ benchmarks! { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let mut offenders = make_offenders::(1, n)?; let validator_set_count = Session::::validators().len() as u32; let offence = BabeEquivocationOffence { @@ -238,23 +263,17 @@ benchmarks! { offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { + + #[block] + { + let _ = Offences::::report_offence(reporters, offence); + } #[cfg(test)] { - // make sure that all slashes have been applied - // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + reporter - // account endowed + some funds rescinded from issuance. - assert_eq!(System::::read_events_for_pallet::>().len(), 2 * (n + 1) as usize + 3); - // (n nominators + one validator) * slashed + Slash Reported - assert_eq!(System::::read_events_for_pallet::>().len(), 1 * (n + 1) as usize + 1); - // offence - assert_eq!(System::::read_events_for_pallet::().len(), 1); - // reporter new account - assert_eq!(System::::read_events_for_pallet::>().len(), 1); + assert_all_slashes_applied::(n as usize); } + + Ok(()) } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index efaec49a65b3..c5c178aa4443 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -29,7 +29,7 @@ use frame_system as system; use pallet_session::historical as pallet_session_historical; use sp_runtime::{ testing::{Header, UintAuthorityId}, - BuildStorage, Perbill, + BuildStorage, KeyTypeId, Perbill, }; type AccountId = u64; @@ -66,7 +66,8 @@ sp_runtime::impl_opaque_keys! { pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + // corresponds to the opaque key id above + const KEY_TYPE_IDS: &'static [KeyTypeId] = &[KeyTypeId([100u8, 117u8, 109u8, 121u8])]; fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} From 8f1606e9f9bd6269a4c2631a161dcc73e969a302 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 3 Dec 2024 12:55:50 +0200 Subject: [PATCH 49/68] Rococo People <> Bulletin bridge fixes (#6708) --- .../chains/chain-polkadot-bulletin/src/lib.rs | 2 +- bridges/relays/utils/src/initialize.rs | 7 ++-- .../src/bridge_to_bulletin_config.rs | 41 ++++--------------- .../src/genesis_config_presets.rs | 10 +++++ .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 - .../bridge-hub-rococo/tests/tests.rs | 12 +++--- 6 files changed, 28 insertions(+), 45 deletions(-) diff --git a/bridges/chains/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs index c5c18beb2cad..070bc7b0ba3d 100644 --- a/bridges/chains/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -225,4 +225,4 @@ impl ChainWithMessages for PolkadotBulletin { } decl_bridge_finality_runtime_apis!(polkadot_bulletin, grandpa); -decl_bridge_messages_runtime_apis!(polkadot_bulletin, bp_messages::HashedLaneId); +decl_bridge_messages_runtime_apis!(polkadot_bulletin, bp_messages::LegacyLaneId); diff --git a/bridges/relays/utils/src/initialize.rs b/bridges/relays/utils/src/initialize.rs index 564ed1f0e5cc..deb9b9d059d5 100644 --- a/bridges/relays/utils/src/initialize.rs +++ b/bridges/relays/utils/src/initialize.rs @@ -52,9 +52,10 @@ pub fn initialize_logger(with_timestamp: bool) { format, ); - let env_filter = EnvFilter::from_default_env() - .add_directive(Level::WARN.into()) - .add_directive("bridge=info".parse().expect("static filter string is valid")); + let env_filter = EnvFilter::builder() + .with_default_directive(Level::WARN.into()) + .with_default_directive("bridge=info".parse().expect("static filter string is valid")) + .from_env_lossy(); let builder = SubscriberBuilder::default().with_env_filter(env_filter); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index b284fa9e7af7..1e733503f43b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -22,14 +22,13 @@ use crate::{ bridge_common_config::RelayersForPermissionlessLanesInstance, weights, xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeRococoBulletinGrandpa, - BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, RuntimeHoldReason, - XcmOverRococoBulletin, XcmRouter, + BridgeRococoBulletinMessages, Runtime, RuntimeEvent, RuntimeHoldReason, XcmOverRococoBulletin, + XcmRouter, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, - target_chain::FromBridgedChainMessagesProof, HashedLaneId, + target_chain::FromBridgedChainMessagesProof, LegacyLaneId, }; -use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; use frame_support::{ parameter_types, @@ -46,6 +45,7 @@ use testnet_parachains_constants::rococo::currency::UNITS as ROC; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, + AlwaysV5, }; use xcm_builder::{BridgeBlobDispatcher, ParentIsPreset, SiblingParachainConvertsVia}; @@ -120,7 +120,7 @@ impl pallet_bridge_messages::Config for Runt type OutboundPayload = XcmAsPlainPayload; type InboundPayload = XcmAsPlainPayload; - type LaneId = HashedLaneId; + type LaneId = LegacyLaneId; type DeliveryPayments = (); type DeliveryConfirmationPayments = (); @@ -139,8 +139,7 @@ impl pallet_xcm_bridge_hub::Config for Runtime type BridgeMessagesPalletInstance = WithRococoBulletinMessagesInstance; type MessageExportPrice = (); - type DestinationVersion = - XcmVersionOfDestAndRemoteBridge; + type DestinationVersion = AlwaysV5; type ForceOrigin = EnsureRoot; // We don't want to allow creating bridges for this instance. @@ -253,7 +252,7 @@ where let universal_source = [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(sibling_para_id)].into(); let universal_destination = - [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get()), Parachain(2075)].into(); + [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); let bridge_id = BridgeId::new(&universal_source, &universal_destination); // insert only bridge metadata, because the benchmarks create lanes @@ -279,29 +278,3 @@ where universal_source } - -/// Contains the migration for the PeopleRococo<>RococoBulletin bridge. -pub mod migration { - use super::*; - use frame_support::traits::ConstBool; - - parameter_types! { - pub BulletinRococoLocation: InteriorLocation = [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); - pub RococoPeopleToRococoBulletinMessagesLane: HashedLaneId = pallet_xcm_bridge_hub::Pallet::< Runtime, XcmOverPolkadotBulletinInstance >::bridge_locations( - PeopleRococoLocation::get(), - BulletinRococoLocation::get() - ) - .unwrap() - .calculate_lane_id(xcm::latest::VERSION).expect("Valid locations"); - } - - /// Ensure that the existing lanes for the People<>Bulletin bridge are correctly configured. - pub type StaticToDynamicLanes = pallet_xcm_bridge_hub::migration::OpenBridgeForLane< - Runtime, - XcmOverPolkadotBulletinInstance, - RococoPeopleToRococoBulletinMessagesLane, - ConstBool, - PeopleRococoLocation, - BulletinRococoLocation, - >; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs index 98e2450ee832..55fd499c2f54 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs @@ -61,10 +61,20 @@ fn bridge_hub_rococo_genesis( .collect(), }, polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, + bridge_polkadot_bulletin_grandpa: BridgePolkadotBulletinGrandpaConfig { + owner: bridges_pallet_owner.clone(), + }, bridge_westend_grandpa: BridgeWestendGrandpaConfig { owner: bridges_pallet_owner.clone() }, bridge_westend_messages: BridgeWestendMessagesConfig { owner: bridges_pallet_owner.clone(), }, + xcm_over_polkadot_bulletin: XcmOverPolkadotBulletinConfig { + opened_bridges: vec![( + Location::new(1, [Parachain(1004)]), + Junctions::from([GlobalConsensus(NetworkId::PolkadotBulletin).into()]), + Some(bp_messages::LegacyLaneId([0, 0, 0, 0])), + )], + }, xcm_over_bridge_hub_westend: XcmOverBridgeHubWestendConfig { opened_bridges }, ethereum_system: EthereumSystemConfig { para_id: id, asset_hub_para_id }, }) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 598afeddb984..d87ff9b43fef 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -169,7 +169,6 @@ pub type Migrations = ( bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >, bridge_to_westend_config::migration::StaticToDynamicLanes, - bridge_to_bulletin_config::migration::StaticToDynamicLanes, frame_support::migrations::RemoveStorage< BridgeWestendMessagesPalletName, OutboundLanesCongestedSignalsKey, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 29f9615bff6a..44e69c31a560 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -501,10 +501,10 @@ mod bridge_hub_westend_tests { mod bridge_hub_bulletin_tests { use super::*; - use bp_messages::{HashedLaneId, LaneIdType}; + use bp_messages::LegacyLaneId; use bridge_common_config::BridgeGrandpaRococoBulletinInstance; use bridge_hub_rococo_runtime::{ - bridge_common_config::RelayersForPermissionlessLanesInstance, + bridge_common_config::RelayersForLegacyLaneIdsMessagesInstance, xcm_config::LocationToAccountId, }; use bridge_hub_test_utils::test_cases::from_grandpa_chain; @@ -528,7 +528,7 @@ mod bridge_hub_bulletin_tests { AllPalletsWithoutSystem, BridgeGrandpaRococoBulletinInstance, WithRococoBulletinMessagesInstance, - RelayersForPermissionlessLanesInstance, + RelayersForLegacyLaneIdsMessagesInstance, >; #[test] @@ -599,7 +599,7 @@ mod bridge_hub_bulletin_tests { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance - >(locations, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, LegacyLaneId([0, 0, 0, 0])) } ).1 }, @@ -663,7 +663,7 @@ mod bridge_hub_bulletin_tests { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance, - >(locations, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, LegacyLaneId([0, 0, 0, 0])) }, ) .1 @@ -697,7 +697,7 @@ mod bridge_hub_bulletin_tests { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance, - >(locations, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, LegacyLaneId([0, 0, 0, 0])) }, ) .1 From 592bb3205be7569cf2d705b31a272340038bbed7 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Tue, 3 Dec 2024 13:06:43 +0100 Subject: [PATCH 50/68] [Release/CICD] Re-worked Create Release Draft flow (#6734) This PR contains following changes in release pipelines: - re-built Create Release Draft workflow - binaries builds are moved completely to the `Release - Build node release candidate` flow - added upload of all the release artefacts to the S3 - adjusted `Release - Publish Docker Image` workflow, so that it will match now the new release flow. --- .github/scripts/common/lib.sh | 45 +++- .github/scripts/release/release_lib.sh | 22 ++ ...le.yml => release-10_branchoff-stable.yml} | 0 ...ation.yml => release-11_rc-automation.yml} | 0 ...e-build-rc.yml => release-20_build-rc.yml} | 96 +++++++- .../release-30_publish_release_draft.yml | 206 +++++++++++------- .../workflows/release-50_publish-docker.yml | 97 +++------ .../workflows/release-reusable-rc-buid.yml | 53 ++++- .github/workflows/release-srtool.yml | 18 +- 9 files changed, 373 insertions(+), 164 deletions(-) rename .github/workflows/{release-branchoff-stable.yml => release-10_branchoff-stable.yml} (100%) rename .github/workflows/{release-10_rc-automation.yml => release-11_rc-automation.yml} (100%) rename .github/workflows/{release-build-rc.yml => release-20_build-rc.yml} (62%) diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 6b8f70a26d7e..41dc0ba06dd2 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -270,20 +270,19 @@ fetch_debian_package_from_s3() { } # Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: -# - RELEASE_ID -# - GITHUB_TOKEN -# - REPO in the form paritytech/polkadot +# inputs: binary (polkadot), target(aarch64-apple-darwin) fetch_release_artifacts_from_s3() { BINARY=$1 - OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} + TARGET=$2 + OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${TARGET}/${BINARY}"} echo "OUTPUT_DIR : $OUTPUT_DIR" URL_BASE=$(get_s3_url_base $BINARY) echo "URL_BASE=$URL_BASE" - URL_BINARY=$URL_BASE/$VERSION/$BINARY - URL_SHA=$URL_BASE/$VERSION/$BINARY.sha256 - URL_ASC=$URL_BASE/$VERSION/$BINARY.asc + URL_BINARY=$URL_BASE/$VERSION/$TARGET/$BINARY + URL_SHA=$URL_BASE/$VERSION/$TARGET/$BINARY.sha256 + URL_ASC=$URL_BASE/$VERSION/$TARGET/$BINARY.asc # Fetch artifacts mkdir -p "$OUTPUT_DIR" @@ -306,15 +305,26 @@ fetch_release_artifacts_from_s3() { function get_s3_url_base() { name=$1 case $name in - polkadot | polkadot-execute-worker | polkadot-prepare-worker | staking-miner) + polkadot | polkadot-execute-worker | polkadot-prepare-worker ) printf "https://releases.parity.io/polkadot" ;; - polkadot-parachain) - printf "https://releases.parity.io/cumulus" + polkadot-parachain) + printf "https://releases.parity.io/polkadot-parachain" + ;; + + polkadot-omni-node) + printf "https://releases.parity.io/polkadot-omni-node" + ;; + + chain-spec-builder) + printf "https://releases.parity.io/chain-spec-builder" ;; - *) + frame-omni-bencher) + printf "https://releases.parity.io/frame-omni-bencher" + ;; + *) printf "UNSUPPORTED BINARY $name" exit 1 ;; @@ -497,3 +507,16 @@ validate_stable_tag() { exit 1 fi } + +# Prepare docker stable tag form the polkadot stable tag +# input: tag (polkaodot-stableYYMM(-X) or polkadot-stableYYMM(-X)-rcX) +# output: stableYYMM(-X) or stableYYMM(-X)-rcX +prepare_docker_stable_tag() { + tag="$1" + if [[ "$tag" =~ stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)? ]]; then + echo "${BASH_REMATCH[0]}" + else + echo "Tag is invalid: $tag" + exit 1 + fi +} diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index 8b9254ec3f29..43227180cb7c 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -139,3 +139,25 @@ upload_s3_release() { aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize echo "✅ The release should be at https://releases.parity.io/${product}/${version}/${target}" } + +# Upload runtimes artifacts to s3 release bucket +# +# input: version (stable release tage.g. polkadot-stable2412 or polkadot-stable2412-rc1) +# output: none +upload_s3_runtimes_release_artifacts() { + alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' + + version=$1 + + echo "Working on version: $version " + + echo "Current content, should be empty on new uploads:" + aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize || true + echo "Content to be uploaded:" + artifacts="artifacts/runtimes/" + ls "$artifacts" + aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/polkadot/runtimes/${version}/" + echo "Uploaded files:" + aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize + echo "✅ The release should be at https://releases.parity.io/polkadot/runtimes/${version}" +} diff --git a/.github/workflows/release-branchoff-stable.yml b/.github/workflows/release-10_branchoff-stable.yml similarity index 100% rename from .github/workflows/release-branchoff-stable.yml rename to .github/workflows/release-10_branchoff-stable.yml diff --git a/.github/workflows/release-10_rc-automation.yml b/.github/workflows/release-11_rc-automation.yml similarity index 100% rename from .github/workflows/release-10_rc-automation.yml rename to .github/workflows/release-11_rc-automation.yml diff --git a/.github/workflows/release-build-rc.yml b/.github/workflows/release-20_build-rc.yml similarity index 62% rename from .github/workflows/release-build-rc.yml rename to .github/workflows/release-20_build-rc.yml index a43c2b282a8d..d4c7055c37c5 100644 --- a/.github/workflows/release-build-rc.yml +++ b/.github/workflows/release-20_build-rc.yml @@ -11,10 +11,12 @@ on: - polkadot - polkadot-parachain - polkadot-omni-node + - frame-omni-bencher + - chain-spec-builder - all release_tag: - description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) type: string jobs: @@ -106,6 +108,50 @@ jobs: attestations: write contents: read + build-frame-omni-bencher-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["frame-omni-bencher"]' + package: "frame-omni-bencher" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-chain-spec-builder-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["chain-spec-builder"]' + package: staging-chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + build-polkadot-macos-binary: needs: [validate-inputs] if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} @@ -134,7 +180,7 @@ jobs: uses: "./.github/workflows/release-reusable-rc-buid.yml" with: binary: '["polkadot-parachain"]' - package: "polkadot-parachain-bin" + package: polkadot-parachain-bin release_tag: ${{ needs.validate-inputs.outputs.release_tag }} target: aarch64-apple-darwin secrets: @@ -156,7 +202,51 @@ jobs: uses: "./.github/workflows/release-reusable-rc-buid.yml" with: binary: '["polkadot-omni-node"]' - package: "polkadot-omni-node" + package: polkadot-omni-node + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-frame-omni-bencher-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["frame-omni-bencher"]' + package: frame-omni-bencher + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-chain-spec-builder-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["chain-spec-builder"]' + package: staging-chain-spec-builder release_tag: ${{ needs.validate-inputs.outputs.release_tag }} target: aarch64-apple-darwin secrets: diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 4364b4f80457..78ceea91f100 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -1,19 +1,46 @@ name: Release - Publish draft -on: - push: - tags: - # Catches v1.2.3 and v1.2.3-rc1 - - v[0-9]+.[0-9]+.[0-9]+* - # - polkadot-stable[0-9]+* Activate when the release process from release org is setteled +# This workflow runs in paritytech-release and creates full release draft with: +# - release notes +# - info about the runtimes +# - attached artifacts: +# - runtimes +# - binaries +# - signatures +on: workflow_dispatch: inputs: - version: - description: Current release/rc version + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) + required: true + type: string jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [ check-synchronization ] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + get-rust-versions: + needs: [ validate-inputs ] runs-on: ubuntu-latest outputs: rustc-stable: ${{ steps.get-rust-versions.outputs.stable }} @@ -24,47 +51,28 @@ jobs: echo "stable=$RUST_STABLE_VERSION" >> $GITHUB_OUTPUT build-runtimes: + needs: [ validate-inputs ] uses: "./.github/workflows/release-srtool.yml" with: excluded_runtimes: "asset-hub-rococo bridge-hub-rococo contracts-rococo coretime-rococo people-rococo rococo rococo-parachain substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template polkadot-sdk-docs-first" build_opts: "--features on-chain-release-build" - - build-binaries: - runs-on: ubuntu-latest - strategy: - matrix: - # Tuples of [package, binary-name] - binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder] ] - steps: - - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 - - - name: Install protobuf-compiler - run: | - sudo apt update - sudo apt install -y protobuf-compiler - - - name: Build ${{ matrix.binary[1] }} binary - run: | - cargo build --locked --profile=production -p ${{ matrix.binary[0] }} --bin ${{ matrix.binary[1] }} - target/production/${{ matrix.binary[1] }} --version - - - name: Upload ${{ matrix.binary[1] }} binary - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 - with: - name: ${{ matrix.binary[1] }} - path: target/production/${{ matrix.binary[1] }} - + profile: production + permissions: + id-token: write + attestations: write + contents: read publish-release-draft: runs-on: ubuntu-latest - needs: [ get-rust-versions, build-runtimes ] + environment: release + needs: [ validate-inputs, get-rust-versions, build-runtimes ] outputs: release_url: ${{ steps.create-release.outputs.html_url }} asset_upload_url: ${{ steps.create-release.outputs.upload_url }} + steps: - name: Checkout - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -87,20 +95,21 @@ jobs: GLUTTON_WESTEND_DIGEST: ${{ github.workspace}}/glutton-westend-runtime/glutton-westend-srtool-digest.json PEOPLE_WESTEND_DIGEST: ${{ github.workspace}}/people-westend-runtime/people-westend-srtool-digest.json WESTEND_DIGEST: ${{ github.workspace}}/westend-runtime/westend-srtool-digest.json + RELEASE_TAG: ${{ needs.validate-inputs.outputs.release_tag }} shell: bash run: | . ./.github/scripts/common/lib.sh export REF1=$(get_latest_release_tag) - if [[ -z "${{ inputs.version }}" ]]; then + if [[ -z "$RELEASE_TAG" ]]; then export REF2="${{ github.ref_name }}" echo "REF2: ${REF2}" else - export REF2="${{ inputs.version }}" + export REF2="$RELEASE_TAG" echo "REF2: ${REF2}" fi echo "REL_TAG=$REF2" >> $GITHUB_ENV - export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]+).*$/\1/') + export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]{4}(-[0-9]+)?).*$/\1/') ./scripts/release/build-changelogs.sh @@ -112,19 +121,29 @@ jobs: scripts/release/context.json **/*-srtool-digest.json + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk + - name: Create draft release id: create-release - uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ env.REL_TAG }} - release_name: Polkadot ${{ env.REL_TAG }} - body_path: ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md - draft: true + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + gh release create ${{ env.REL_TAG }} \ + --repo paritytech/polkadot-sdk \ + --draft \ + --title "Polkadot ${{ env.REL_TAG }}" \ + --notes-file ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md publish-runtimes: - needs: [ build-runtimes, publish-release-draft ] + needs: [ validate-inputs, build-runtimes, publish-release-draft ] + environment: release continue-on-error: true runs-on: ubuntu-latest strategy: @@ -132,7 +151,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -144,44 +163,83 @@ jobs: >>$GITHUB_ENV echo ASSET=$(find ${{ matrix.chain }}-runtime -name '*.compact.compressed.wasm') >>$GITHUB_ENV echo SPEC=$(<${JSON} jq -r .runtimes.compact.subwasm.core_version.specVersion) + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk + - name: Upload compressed ${{ matrix.chain }} v${{ env.SPEC }} wasm - if: ${{ matrix.chain != 'rococo-parachain' }} - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} - asset_path: ${{ env.ASSET }} - asset_name: ${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm - asset_content_type: application/wasm + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + '${{ env.ASSET }}#${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm' - publish-binaries: - needs: [ publish-release-draft, build-binaries ] + publish-release-artifacts: + needs: [ validate-inputs, publish-release-draft ] + environment: release continue-on-error: true runs-on: ubuntu-latest strategy: matrix: - binary: [frame-omni-bencher, chain-spec-builder] + binary: [ polkadot, polkadot-execute-worker, polkadot-prepare-worker, polkadot-parachain, polkadot-omni-node, frame-omni-bencher, chain-spec-builder ] + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] steps: - - name: Download artifacts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Fetch binaries from s3 based on version + run: | + . ./.github/scripts/common/lib.sh + + VERSION="${{ needs.validate-inputs.outputs.release_tag }}" + fetch_release_artifacts_from_s3 ${{ matrix.binary }} ${{ matrix.target }} + + - name: Rename aarch64-apple-darwin binaries + if: ${{ matrix.target == 'aarch64-apple-darwin' }} + working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} + run: | + mv ${{ matrix.binary }} ${{ matrix.binary }}-aarch64-apple-darwin + mv ${{ matrix.binary }}.asc ${{ matrix.binary }}-aarch64-apple-darwin.asc + mv ${{ matrix.binary }}.sha256 ${{ matrix.binary }}-aarch64-apple-darwin.sha256 + + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 with: - name: ${{ matrix.binary }} + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk - - name: Upload ${{ matrix.binary }} binary - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 + - name: Upload ${{ matrix.binary }} binary to release draft env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} - asset_path: ${{ github.workspace}}/${{ matrix.binary }} - asset_name: ${{ matrix.binary }} - asset_content_type: application/octet-stream + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} + run: | + if [[ ${{ matrix.target }} == "aarch64-apple-darwin" ]]; then + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + ${{ matrix.binary }}-aarch64-apple-darwin \ + ${{ matrix.binary }}-aarch64-apple-darwin.asc \ + ${{ matrix.binary }}-aarch64-apple-darwin.sha256 + else + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + ${{ matrix.binary }} \ + ${{ matrix.binary }}.asc \ + ${{ matrix.binary }}.sha256 + fi post_to_matrix: runs-on: ubuntu-latest - needs: publish-release-draft + needs: [ validate-inputs, publish-release-draft ] environment: release strategy: matrix: @@ -197,5 +255,5 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - **New version of polkadot tagged**: ${{ github.ref_name }}
- Draft release created: ${{ needs.publish-release-draft.outputs.release_url }} + **New version of polkadot tagged**: ${{ needs.validate-inputs.outputs.release_tag }}
+ And release draft is release created in [polkadot-sdk repo](https://github.com/paritytech/polkadot-sdk/releases) diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 627e53bacd88..5c3c3a6e854d 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -4,10 +4,6 @@ name: Release - Publish Docker Image # It builds and published releases and rc candidates. on: - #TODO: activate automated run later - # release: - # types: - # - published workflow_dispatch: inputs: image_type: @@ -30,16 +26,6 @@ on: - polkadot-parachain - chain-spec-builder - release_id: - description: | - Release ID. - You can find it using the command: - curl -s \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ - jq '.[] | { name: .name, id: .id }' - required: true - type: number - registry: description: Container registry required: true @@ -55,7 +41,7 @@ on: default: parity version: - description: version to build/release + description: Version of the polkadot node release in format v1.16.0 or v1.16.0-rc1 default: v0.9.18 required: true @@ -78,11 +64,15 @@ env: IMAGE_TYPE: ${{ inputs.image_type }} jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' runs-on: ubuntu-latest outputs: version: ${{ steps.validate_inputs.outputs.VERSION }} - release_id: ${{ steps.validate_inputs.outputs.RELEASE_ID }} stable_tag: ${{ steps.validate_inputs.outputs.stable_tag }} steps: @@ -97,11 +87,6 @@ jobs: VERSION=$(filter_version_from_input "${{ inputs.version }}") echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT - RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") - echo "RELEASE_ID=${RELEASE_ID}" >> $GITHUB_OUTPUT - - echo "Release ID: $RELEASE_ID" - STABLE_TAG=$(validate_stable_tag ${{ inputs.stable_tag }}) echo "stable_tag=${STABLE_TAG}" >> $GITHUB_OUTPUT @@ -114,50 +99,26 @@ jobs: - name: Checkout sources uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - #TODO: this step will be needed when automated triggering will work - #this step runs only if the workflow is triggered automatically when new release is published - # if: ${{ env.EVENT_NAME == 'release' && env.EVENT_ACTION != '' && env.EVENT_ACTION == 'published' }} - # run: | - # mkdir -p release-artifacts && cd release-artifacts - - # for f in $BINARY $BINARY.asc $BINARY.sha256; do - # URL="https://github.com/${{ github.event.repository.full_name }}/releases/download/${{ github.event.release.tag_name }}/$f" - # echo " - Fetching $f from $URL" - # wget "$URL" -O "$f" - # done - # chmod a+x $BINARY - # ls -al - - name: Fetch rc artifacts or release artifacts from s3 based on version - #this step runs only if the workflow is triggered manually - if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} + # if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} run: | . ./.github/scripts/common/lib.sh - VERSION="${{ needs.validate-inputs.outputs.VERSION }}" + VERSION="${{ needs.validate-inputs.outputs.stable_tag }}" if [[ ${{ inputs.binary }} == 'polkadot' ]]; then bins=(polkadot polkadot-prepare-worker polkadot-execute-worker) for bin in "${bins[@]}"; do - fetch_release_artifacts_from_s3 $bin + fetch_release_artifacts_from_s3 $bin x86_64-unknown-linux-gnu done else - fetch_release_artifacts_from_s3 $BINARY + fetch_release_artifacts_from_s3 $BINARY x86_64-unknown-linux-gnu fi - - name: Fetch polkadot-omni-node/chain-spec-builder rc artifacts or release artifacts based on release id - #this step runs only if the workflow is triggered manually and only for chain-spec-builder - if: ${{ env.EVENT_NAME == 'workflow_dispatch' && (inputs.binary == 'polkadot-omni-node' || inputs.binary == 'chain-spec-builder') }} - run: | - . ./.github/scripts/common/lib.sh - - RELEASE_ID="${{ needs.validate-inputs.outputs.RELEASE_ID }}" - fetch_release_artifacts - - name: Upload artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: release-artifacts - path: release-artifacts/${{ env.BINARY }}/**/* + path: release-artifacts/x86_64-unknown-linux-gnu/${{ env.BINARY }}/**/* build-container: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} @@ -173,7 +134,7 @@ jobs: uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - name: Check sha256 ${{ env.BINARY }} - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -182,7 +143,7 @@ jobs: check_sha256 $BINARY && echo "OK" || echo "ERR" - name: Check GPG ${{ env.BINARY }} - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -190,35 +151,29 @@ jobs: check_gpg $BINARY - name: Fetch rc commit and tag + working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'rc' }} id: fetch_rc_refs + shell: bash run: | - . ./.github/scripts/common/lib.sh - - echo "release=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT + . ../.github/scripts/common/lib.sh commit=$(git rev-parse --short HEAD) && \ echo "commit=${commit}" >> $GITHUB_OUTPUT - - echo "tag=${{ needs.validate-inputs.outputs.version }}" >> $GITHUB_OUTPUT + echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT + echo "tag=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Fetch release tags working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'release'}} id: fetch_release_refs + shell: bash run: | - chmod a+rx $BINARY - - if [[ $BINARY != 'chain-spec-builder' ]]; then - VERSION=$(./$BINARY --version | awk '{ print $2 }' ) - release=$( echo $VERSION | cut -f1 -d- ) - else - release=$(echo ${{ needs.validate-inputs.outputs.VERSION }} | sed 's/^v//') - fi + . ../.github/scripts/common/lib.sh echo "tag=latest" >> $GITHUB_OUTPUT - echo "release=${release}" >> $GITHUB_OUTPUT - echo "stable=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT + echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT + echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Build Injected Container image for polkadot rc if: ${{ env.BINARY == 'polkadot' }} @@ -342,8 +297,10 @@ jobs: - name: Fetch values id: fetch-data run: | + . ./.github/scripts/common/lib.sh date=$(date -u '+%Y-%m-%dT%H:%M:%SZ') echo "date=$date" >> $GITHUB_OUTPUT + echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Build and push id: docker_build @@ -354,9 +311,9 @@ jobs: # TODO: The owner should be used below but buildx does not resolve the VARs # TODO: It would be good to get rid of this GHA that we don't really need. tags: | - parity/polkadot:${{ needs.validate-inputs.outputs.stable_tag }} - parity/polkadot:latest - parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} + egorpop/polkadot:${{ steps.fetch-data.outputs.stable }} + egorpop/polkadot:latest + egorpop/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} build-args: | VCS_REF=${{ github.ref }} POLKADOT_VERSION=${{ needs.fetch-latest-debian-package-version.outputs.polkadot_apt_version }} diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index f5240878cba2..dc1b4553eb9b 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -302,7 +302,6 @@ jobs: AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - upload-polkadot-parachain-artifacts-to-s3: if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-rc] @@ -329,6 +328,32 @@ jobs: AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + upload-frame-omni-bencher-artifacts-to-s3: + if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-chain-spec-builder-artifacts-to-s3: + if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: chain-spec-builder + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + upload-polkadot-macos-artifacts-to-s3: if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} # TODO: add and use a `build-polkadot-homebrew-package` which packs all `polkadot` binaries: @@ -395,3 +420,29 @@ jobs: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-frame-omni-bencher-macos-artifacts-to-s3: + if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-chain-spec-builder-macos-artifacts-to-s3: + if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: chain-spec-builder + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 9a29b46d2fc3..fc10496d481b 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -1,7 +1,7 @@ name: Srtool build env: - SUBWASM_VERSION: 0.20.0 + SUBWASM_VERSION: 0.21.0 TOML_CLI_VERSION: 0.2.4 on: @@ -11,14 +11,16 @@ on: type: string build_opts: type: string + profile: + type: string outputs: published_runtimes: value: ${{ jobs.find-runtimes.outputs.runtime }} - schedule: - - cron: "00 02 * * 1" # 2AM weekly on monday - - workflow_dispatch: +permissions: + id-token: write + attestations: write + contents: read jobs: find-runtimes: @@ -75,6 +77,7 @@ jobs: with: chain: ${{ matrix.chain }} runtime_dir: ${{ matrix.runtime_dir }} + profile: ${{ inputs.profile }} - name: Summary run: | @@ -83,6 +86,11 @@ jobs: echo "Compact Runtime: ${{ steps.srtool_build.outputs.wasm }}" echo "Compressed Runtime: ${{ steps.srtool_build.outputs.wasm_compressed }}" + - name: Generate artifact attestation + uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + with: + subject-path: ${{ steps.srtool_build.outputs.wasm }} + # We now get extra information thanks to subwasm - name: Install subwasm run: | From 76a292b23bf6f35156fd3dd832e9c4ec31b24b2c Mon Sep 17 00:00:00 2001 From: Lulu Date: Tue, 3 Dec 2024 13:22:45 +0100 Subject: [PATCH 51/68] Update parity-publish (#6549) --- .github/workflows/check-semver.yml | 4 +- .github/workflows/publish-check-crates.yml | 2 +- .github/workflows/publish-claim-crates.yml | 2 +- .../snowbridge/runtime/test-common/Cargo.toml | 2 + cumulus/client/cli/Cargo.toml | 2 + cumulus/client/collator/Cargo.toml | 2 + cumulus/client/consensus/aura/Cargo.toml | 2 + cumulus/client/consensus/common/Cargo.toml | 2 + cumulus/client/consensus/proposer/Cargo.toml | 2 + .../client/consensus/relay-chain/Cargo.toml | 2 + cumulus/client/network/Cargo.toml | 2 + cumulus/client/parachain-inherent/Cargo.toml | 2 + cumulus/client/pov-recovery/Cargo.toml | 2 + .../Cargo.toml | 2 + .../client/relay-chain-interface/Cargo.toml | 2 + .../relay-chain-minimal-node/Cargo.toml | 2 + .../relay-chain-rpc-interface/Cargo.toml | 2 + cumulus/client/service/Cargo.toml | 2 + cumulus/pallets/aura-ext/Cargo.toml | 2 + cumulus/pallets/parachain-system/Cargo.toml | 2 + .../parachain-system/proc-macro/Cargo.toml | 2 + cumulus/pallets/solo-to-para/Cargo.toml | 2 + cumulus/pallets/xcm/Cargo.toml | 2 + cumulus/pallets/xcmp-queue/Cargo.toml | 2 + cumulus/parachains/common/Cargo.toml | 2 + .../emulated/common/Cargo.toml | 2 + .../pallets/collective-content/Cargo.toml | 2 + .../pallets/parachain-info/Cargo.toml | 2 + cumulus/parachains/pallets/ping/Cargo.toml | 2 + .../assets/asset-hub-rococo/Cargo.toml | 2 + .../assets/asset-hub-westend/Cargo.toml | 2 + .../runtimes/assets/common/Cargo.toml | 2 + .../runtimes/assets/test-utils/Cargo.toml | 2 + .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 + .../bridge-hubs/bridge-hub-westend/Cargo.toml | 2 + .../runtimes/bridge-hubs/common/Cargo.toml | 2 + .../bridge-hubs/test-utils/Cargo.toml | 2 + .../collectives-westend/Cargo.toml | 2 + .../parachains/runtimes/constants/Cargo.toml | 2 + .../contracts/contracts-rococo/Cargo.toml | 2 + .../coretime/coretime-rococo/Cargo.toml | 2 + .../coretime/coretime-westend/Cargo.toml | 2 + .../glutton/glutton-westend/Cargo.toml | 2 + .../runtimes/people/people-rococo/Cargo.toml | 2 + .../runtimes/people/people-westend/Cargo.toml | 2 + .../parachains/runtimes/test-utils/Cargo.toml | 2 + .../testing/rococo-parachain/Cargo.toml | 2 + cumulus/polkadot-omni-node/Cargo.toml | 2 + cumulus/polkadot-omni-node/lib/Cargo.toml | 2 + cumulus/polkadot-parachain/Cargo.toml | 2 + cumulus/primitives/aura/Cargo.toml | 2 + cumulus/primitives/core/Cargo.toml | 2 + .../primitives/parachain-inherent/Cargo.toml | 2 + .../proof-size-hostfunction/Cargo.toml | 2 + .../storage-weight-reclaim/Cargo.toml | 2 + cumulus/primitives/timestamp/Cargo.toml | 2 + cumulus/primitives/utility/Cargo.toml | 2 + cumulus/test/relay-sproof-builder/Cargo.toml | 2 + cumulus/xcm/xcm-emulator/Cargo.toml | 2 + polkadot/Cargo.toml | 2 + polkadot/cli/Cargo.toml | 2 + polkadot/core-primitives/Cargo.toml | 2 + polkadot/erasure-coding/Cargo.toml | 2 + polkadot/node/collation-generation/Cargo.toml | 2 + .../core/approval-voting-parallel/Cargo.toml | 2 + polkadot/node/core/approval-voting/Cargo.toml | 2 + polkadot/node/core/av-store/Cargo.toml | 2 + polkadot/node/core/backing/Cargo.toml | 2 + .../node/core/bitfield-signing/Cargo.toml | 2 + .../node/core/candidate-validation/Cargo.toml | 2 + polkadot/node/core/chain-api/Cargo.toml | 2 + polkadot/node/core/chain-selection/Cargo.toml | 2 + .../node/core/dispute-coordinator/Cargo.toml | 2 + .../node/core/parachains-inherent/Cargo.toml | 2 + .../core/prospective-parachains/Cargo.toml | 2 + polkadot/node/core/provisioner/Cargo.toml | 2 + polkadot/node/core/pvf-checker/Cargo.toml | 2 + polkadot/node/core/pvf/Cargo.toml | 2 + polkadot/node/core/pvf/common/Cargo.toml | 2 + .../node/core/pvf/execute-worker/Cargo.toml | 2 + .../node/core/pvf/prepare-worker/Cargo.toml | 2 + polkadot/node/core/runtime-api/Cargo.toml | 2 + polkadot/node/gum/Cargo.toml | 2 + polkadot/node/gum/proc-macro/Cargo.toml | 2 + polkadot/node/metrics/Cargo.toml | 2 + .../network/approval-distribution/Cargo.toml | 2 + .../availability-distribution/Cargo.toml | 2 + .../network/availability-recovery/Cargo.toml | 2 + .../network/bitfield-distribution/Cargo.toml | 2 + polkadot/node/network/bridge/Cargo.toml | 2 + .../node/network/collator-protocol/Cargo.toml | 2 + .../network/dispute-distribution/Cargo.toml | 2 + .../node/network/gossip-support/Cargo.toml | 2 + polkadot/node/network/protocol/Cargo.toml | 2 + .../network/statement-distribution/Cargo.toml | 2 + polkadot/node/overseer/Cargo.toml | 2 + polkadot/node/primitives/Cargo.toml | 2 + polkadot/node/service/Cargo.toml | 2 + polkadot/node/subsystem-types/Cargo.toml | 2 + polkadot/node/subsystem-util/Cargo.toml | 2 + polkadot/node/subsystem/Cargo.toml | 2 + polkadot/node/tracking-allocator/Cargo.toml | 2 + polkadot/parachain/Cargo.toml | 2 + polkadot/primitives/Cargo.toml | 2 + polkadot/rpc/Cargo.toml | 2 + polkadot/runtime/common/Cargo.toml | 2 + .../common/slot_range_helper/Cargo.toml | 2 + polkadot/runtime/metrics/Cargo.toml | 2 + polkadot/runtime/parachains/Cargo.toml | 2 + polkadot/runtime/rococo/Cargo.toml | 2 + polkadot/runtime/rococo/constants/Cargo.toml | 2 + polkadot/runtime/westend/Cargo.toml | 2 + polkadot/runtime/westend/constants/Cargo.toml | 2 + polkadot/statement-table/Cargo.toml | 2 + polkadot/utils/generate-bags/Cargo.toml | 2 + polkadot/xcm/Cargo.toml | 2 + polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml | 2 + polkadot/xcm/pallet-xcm/Cargo.toml | 2 + polkadot/xcm/procedural/Cargo.toml | 2 + polkadot/xcm/xcm-builder/Cargo.toml | 2 + polkadot/xcm/xcm-executor/Cargo.toml | 2 + polkadot/xcm/xcm-simulator/Cargo.toml | 2 + polkadot/xcm/xcm-simulator/example/Cargo.toml | 2 + prdoc/pr_6549.prdoc | 247 ++++++++++++++++++ scripts/generate-umbrella.py | 2 + substrate/frame/revive/fixtures/Cargo.toml | 2 + umbrella/Cargo.toml | 6 + 127 files changed, 501 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_6549.prdoc diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 8d77b6a31b75..e9bedd16e6d1 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -11,7 +11,7 @@ concurrency: cancel-in-progress: true env: - TOOLCHAIN: nightly-2024-10-19 + TOOLCHAIN: nightly-2024-11-19 jobs: preflight: @@ -74,7 +74,7 @@ jobs: - name: install parity-publish # Set the target dir to cache the build. - run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.1 --locked -q + run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.2 --locked -q - name: check semver run: | diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml index 3fad3b641474..1e5a8054e2c7 100644 --- a/.github/workflows/publish-check-crates.yml +++ b/.github/workflows/publish-check-crates.yml @@ -24,7 +24,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.8.0 --locked -q + run: cargo install parity-publish@0.10.2 --locked -q - name: parity-publish check run: parity-publish --color always check --allow-unpublished diff --git a/.github/workflows/publish-claim-crates.yml b/.github/workflows/publish-claim-crates.yml index 37bf06bb82d8..845b57a61b96 100644 --- a/.github/workflows/publish-claim-crates.yml +++ b/.github/workflows/publish-claim-crates.yml @@ -18,7 +18,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.8.0 --locked -q + run: cargo install parity-publish@0.10.2 --locked -q - name: parity-publish claim env: diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 6f8e586bf5ff..9f47f158ed4a 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -6,6 +6,8 @@ authors = ["Snowfork "] edition.workspace = true license = "Apache-2.0" categories = ["cryptography::cryptocurrencies"] +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 9b6f6b73960b..198f9428f1dd 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Parachain node CLI utilities." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 6ebde0c2c653..83a3f2661e7a 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Common node-side functionality and glue code to collate parachain blocks." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 0bb2de6bb9b8..6e0c124591cb 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 4bc2f1d1e600..0f532a2101c4 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index bb760ae03f4d..e391481bc445 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index f3ee6fc2f7d2..7f0f4333c961 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index bc67678eedeb..b78df8d73eae 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true description = "Cumulus-specific networking protocol" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index 0d82cf648743..4f53e2bc1bc2 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [dependencies] async-trait = { workspace = true } diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 3127dd26fcaa..762837e0bb11 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true description = "Parachain PoV recovery" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 6f1b74191be7..9e6e8da929bb 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Implementation of the RelayChainInterface trait for Polkadot full-nodes." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index a496fab050dd..2b9e72bbeca6 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Common interface for different relay chain datasources." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 95ecadc8bd06..0fad188bb1ab 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Minimal node implementation to be used in tandem with RPC or light-client mode." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index fb4cb4ceed4e..162f5ad0e9e8 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Implementation of the RelayChainInterface trait that connects to a remote RPC-node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 0a77b465d96a..193283648f19 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Common functions used to assemble the components of a parachain node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index c08148928b7c..fcda79f1d5c1 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "AURA consensus extension pallet for parachains" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 3cb0394c4b95..05498a474e42 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Base pallet for cumulus-based parachains" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index da6f0fd03efb..629818f9c4cc 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Proc macros provided by the parachain-system pallet" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 5fd1939e93a0..2088361bf11a 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Adds functionality to migrate from a Solo to a Parachain" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 35d7a083b061..ff9be866d48f 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -5,6 +5,8 @@ name = "cumulus-pallet-xcm" version = "0.7.0" license = "Apache-2.0" description = "Pallet for stuff specific to parachains' usage of XCM" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 9c7470eda6da..af70a3169d8e 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Pallet to queue outbound and inbound XCMP messages." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 6d436bdf799a..641693a6a01b 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Logic which is common to all parachain runtimes" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index 23edaf6bfe65..8282d12d317f 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Common resources for integration testing with xcm-emulator" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index c52021f67e36..09301bd738f3 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -5,6 +5,8 @@ authors = ["Parity Technologies "] edition.workspace = true description = "Managed content" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index e0bed23c4f8c..604441c65f29 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -5,6 +5,8 @@ name = "staging-parachain-info" version = "0.7.0" license = "Apache-2.0" description = "Pallet to store the parachain ID" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 51fc384a4f14..ceb38f39fd80 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -5,6 +5,8 @@ name = "cumulus-ping" version = "0.7.0" license = "Apache-2.0" description = "Ping Pallet for Cumulus XCM/UMP testing." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index bfe8ed869758..949640dd4be6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo variant of Asset Hub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index a3eaebb59153..8e47146a06c3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend variant of Asset Hub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index fb66f0de2322..fa9efbca7a39 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Assets common utilities" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index f6b3c13e8102..393d06f95b15 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Test utils for Asset Hub runtimes." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 3eb06e3a18c1..a7710783a1e0 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's BridgeHub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 871bf44ec5b2..91900c830ba6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's BridgeHub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index 9cb24a2b2820..76a89bcb2e72 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Bridge hub common utilities" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [dependencies] codec = { features = ["derive"], workspace = true } diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 915b3090092f..16fef951f328 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utils for BridgeHub testing" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 810abcf572d4..dc4b73db69e3 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Westend Collectives Parachain Runtime" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/constants/Cargo.toml b/cumulus/parachains/runtimes/constants/Cargo.toml index d54f1e7db6c1..01b023e0fb89 100644 --- a/cumulus/parachains/runtimes/constants/Cargo.toml +++ b/cumulus/parachains/runtimes/constants/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Common constants for Testnet Parachains runtimes" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index c98ca7ba3e74..1aeff5eb2e48 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -5,6 +5,8 @@ description = "Parachain testnet runtime for FRAME Contracts pallet." authors.workspace = true edition.workspace = true license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 02807827cf92..ab621134b252 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's Coretime parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 34353d312b1f..44dfbf93c30e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's Coretime parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 09b4ef679d24..9bbdb8d2ee08 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Glutton parachain runtime." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index a55143b62071..893133bf3c1a 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's People parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 4d66332e96dd..66b324b51af4 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's People parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index e9d666617ee2..17c81ae4921a 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utils for Runtimes testing" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index b0581c8d43ff..4713f4398eaa 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Simple runtime used by the rococo parachain(s)" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/polkadot-omni-node/Cargo.toml b/cumulus/polkadot-omni-node/Cargo.toml index a736e1ef80c5..8b46bc882868 100644 --- a/cumulus/polkadot-omni-node/Cargo.toml +++ b/cumulus/polkadot-omni-node/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true build = "build.rs" description = "Generic binary that can run a parachain node with u32 block number and Aura consensus" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/polkadot-omni-node/lib/Cargo.toml b/cumulus/polkadot-omni-node/lib/Cargo.toml index a690229f1695..cca4ac3b2b69 100644 --- a/cumulus/polkadot-omni-node/lib/Cargo.toml +++ b/cumulus/polkadot-omni-node/lib/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Helper library that can be used to build a parachain node" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 5520126d0742..f5ce040bb530 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true build = "build.rs" description = "Runs a polkadot parachain node" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 185b2d40833f..715ce3e1a03e 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Core primitives for Aura in Cumulus" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 533d368d3b00..b5bfe4fbc889 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Cumulus related core primitive types and traits" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index a4271d3fd9cc..2ff990b8d514 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml index e61c865d05fb..6e8168091892 100644 --- a/cumulus/primitives/proof-size-hostfunction/Cargo.toml +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Hostfunction exposing storage proof size to the runtime." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index e1ae6743335a..3c358bc25edb 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utilities to reclaim storage weight." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index cb328e2f2cc6..70cb3e607b98 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Provides timestamp related functionality for parachains." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 2ca8b82001d5..1444571edbe0 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Helper datatypes for Cumulus" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index e266b5807081..c1efa141a45d 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Mocked relay state proof builder for testing Cumulus." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index 8598481fae76..d0c637d64d01 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -5,6 +5,8 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 3a939464868f..101caac0e313 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -20,6 +20,8 @@ authors.workspace = true edition.workspace = true version = "6.0.0" default-run = "polkadot" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index da37f6062c57..3eff525b7b1e 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 42ca27953738..33869f216f78 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -5,6 +5,8 @@ description = "Core Polkadot types used by Relay Chains and parachains." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index 969742c5bb0a..528b955c4db3 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -5,6 +5,8 @@ description = "Erasure coding used for Polkadot's availability system" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index 777458673f5b..c1716e2e6eb8 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Collator-side subsystem that handles incoming candidate submissions from the parachain." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/approval-voting-parallel/Cargo.toml b/polkadot/node/core/approval-voting-parallel/Cargo.toml index 3a98cce80e92..995687fb4c11 100644 --- a/polkadot/node/core/approval-voting-parallel/Cargo.toml +++ b/polkadot/node/core/approval-voting-parallel/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Approval Voting Subsystem running approval work in parallel" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index f9754d2babc9..80f5dcb7f318 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Approval Voting Subsystem of the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 1d14e4cfba37..9f6864269cef 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index cd1acf9daa93..a81fe9486c63 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Candidate Backing Subsystem. Tracks parachain candidates that can be backed, as well as the issuance of statements about candidates." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 126a18a14166..f00ba5712661 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Bitfield signing subsystem for the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index 87855dbce415..fea16b1c7604 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index a8e911e0c5c9..0f443868dada 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Chain API subsystem provides access to chain related utility functions like block number to hash conversions." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 755d5cadeaaf..d2cc425a4816 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 344b66af1933..11b4ac645c23 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -5,6 +5,8 @@ description = "The node-side components that participate in disputes" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 1e4953f40d0b..b1cd5e971b00 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Parachains inherent data provider for Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 5629e4ef7fbe..ced6c30c64b6 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Prospective Parachains subsystem. Tracks and handles prospective parachain fragments." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 64a598b420f7..26dca1adbc79 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -5,6 +5,8 @@ description = "Responsible for assembling a relay chain block from a set of avai authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 73ef17a2843a..cb7e3eadcf0a 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 37d5878ea597..1b2a16ae8b55 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 903c8dd1af29..d058d582fc26 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 6ad340d25612..8327cf8058cd 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 56235bd82192..9dc800a8ef56 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 834e4b300b9e..15cbf4665d06 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -5,6 +5,8 @@ description = "Wrapper around the parachain-related runtime APIs" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml index 9b2df435a06a..84875ea121b6 100644 --- a/polkadot/node/gum/Cargo.toml +++ b/polkadot/node/gum/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Stick logs together with the TraceID as provided by tempo" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index da6364977cae..b4a3401b15e4 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index 41b08b66e9b4..05344993a75e 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index 8d674a733470..abf345552f89 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Approval Distribution subsystem for the distribution of authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 8c5574f244e4..e87103d99f72 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 41f09b1f7044..be4323e74f02 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 6d007255c574..2ff30489b6c1 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Bitfiled Distribution subsystem, which gossips signed av authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index b4b5743853cd..c4b46c1dc001 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -5,6 +5,8 @@ description = "The Network Bridge Subsystem — protocol multiplexer for Polkado authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index 304cb23bb6aa..a51d24c70807 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Collator Protocol subsystem. Allows collators and valida authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index b4dcafe09eb6..4f2f9ccadf8b 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Dispute Distribution subsystem, which ensures all concer authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index c8c19e5de070..7d17ea45eab9 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Gossip Support subsystem. Responsible for keeping track authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 3d51d3c0a565..0bcf224332bc 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Primitives types for the Node-side" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index de07937ffb0a..d737c7bf8968 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index 2253a5ae0c66..62634c1da090 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "System overseer of the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index 7185205f905b..50ee3a80ddb8 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 6e8eade21a43..7f58a56d5d16 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -6,6 +6,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Utils to tie different Polkadot components together and allow instantiation of a node." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index b5686ec96be1..44bb7036d63d 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index d12daa572055..9c21fede1c47 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/subsystem/Cargo.toml b/polkadot/node/subsystem/Cargo.toml index ce4bceec7336..4f30d3ce9c09 100644 --- a/polkadot/node/subsystem/Cargo.toml +++ b/polkadot/node/subsystem/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/tracking-allocator/Cargo.toml b/polkadot/node/tracking-allocator/Cargo.toml index d98377e53759..0fbf526ccb8b 100644 --- a/polkadot/node/tracking-allocator/Cargo.toml +++ b/polkadot/node/tracking-allocator/Cargo.toml @@ -5,6 +5,8 @@ version = "2.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 9d0518fd46ad..ea6c4423dc19 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "6.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index dd269caa2d60..150aaf153fa7 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Shared primitives used by Polkadot runtime" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index d01528d4dee0..48980dde4bbc 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Polkadot specific RPC functionality." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 01b56b31cf20..1646db54455a 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -5,6 +5,8 @@ description = "Pallets and constants used in Relay Chain networks." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 02810b75283f..3f110bdd76c6 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Helper crate for generating slot ranges for the Polkadot runtime." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index 3709e1eb697e..0415e4754009 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Runtime metric interface for the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index a3eec3f9d961..b01778eeb424 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -5,6 +5,8 @@ description = "Relay Chain runtime code responsible for Parachains." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 3b11c977edf3..764c53abbfcb 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -6,6 +6,8 @@ description = "Rococo testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index 1d0adac44af4..921bc8f5fe92 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -5,6 +5,8 @@ description = "Constants used throughout the Rococo network." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [package.metadata.polkadot-sdk] exclude-from-umbrella = true diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index f94301baab09..584f5855b7a4 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -6,6 +6,8 @@ description = "Westend testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index 27d5b19b8e77..a50e2f9cc639 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -5,6 +5,8 @@ description = "Constants used throughout the Westend network." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [package.metadata.polkadot-sdk] exclude-from-umbrella = true diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 53ea0b74463b..d9519dafe12d 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Stores messages other authorities issue about candidates in Polkadot." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index 16205b0f51f5..3006d8325ef9 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "CLI to generate voter bags for Polkadot runtimes" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 86c7067ad6fa..113e72c27ae1 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index b07bdfdca3d1..fe2b78163223 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -5,6 +5,8 @@ edition.workspace = true license.workspace = true version = "7.0.0" description = "Benchmarks for the XCM pallet" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 4d44d75e34dd..e8cdd3b4931b 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -5,6 +5,8 @@ description = "A pallet for handling XCM programs." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 83b35d19cf7e..3167766158ff 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true license.workspace = true version = "7.0.0" publish = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index eaa115740f3e..2819a0b0a555 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index cc966f91fe4d..20ca40de5faa 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index c7caa49393ed..47900e226d48 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index e0aff9b7782a..6fbe9243944a 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/prdoc/pr_6549.prdoc b/prdoc/pr_6549.prdoc new file mode 100644 index 000000000000..61a64c724185 --- /dev/null +++ b/prdoc/pr_6549.prdoc @@ -0,0 +1,247 @@ +doc: [] + +crates: + - name: polkadot-sdk + bump: none + - name: asset-test-utils + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-pallet-parachain-system-proc-macro + bump: none + - name: cumulus-primitives-core + bump: none + - name: polkadot-core-primitives + bump: none + - name: polkadot-parachain-primitives + bump: none + - name: polkadot-primitives + bump: none + - name: staging-xcm + bump: none + - name: xcm-procedural + bump: none + - name: cumulus-primitives-parachain-inherent + bump: none + - name: cumulus-primitives-proof-size-hostfunction + bump: none + - name: polkadot-runtime-common + bump: none + - name: polkadot-runtime-parachains + bump: none + - name: polkadot-runtime-metrics + bump: none + - name: staging-xcm-executor + bump: none + - name: slot-range-helper + bump: none + - name: staging-xcm-builder + bump: none + - name: pallet-xcm + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + - name: cumulus-pallet-aura-ext + bump: none + - name: cumulus-primitives-aura + bump: none + - name: staging-parachain-info + bump: none + - name: cumulus-test-relay-sproof-builder + bump: none + - name: cumulus-client-cli + bump: none + - name: cumulus-client-collator + bump: none + - name: cumulus-client-consensus-common + bump: none + - name: cumulus-client-pov-recovery + bump: none + - name: cumulus-relay-chain-interface + bump: none + - name: polkadot-overseer + bump: none + - name: tracing-gum + bump: none + - name: tracing-gum-proc-macro + bump: none + - name: polkadot-node-metrics + bump: none + - name: polkadot-node-primitives + bump: none + - name: polkadot-erasure-coding + bump: none + - name: polkadot-node-subsystem + bump: none + - name: polkadot-node-subsystem-types + bump: none + - name: polkadot-node-network-protocol + bump: none + - name: polkadot-statement-table + bump: none + - name: polkadot-rpc + bump: none + - name: polkadot-service + bump: none + - name: cumulus-client-parachain-inherent + bump: none + - name: westend-runtime + bump: none + - name: pallet-xcm-benchmarks + bump: none + - name: westend-runtime-constants + bump: none + - name: polkadot-approval-distribution + bump: none + - name: polkadot-node-subsystem-util + bump: none + - name: polkadot-availability-bitfield-distribution + bump: none + - name: polkadot-availability-distribution + bump: none + - name: polkadot-availability-recovery + bump: none + - name: polkadot-node-core-approval-voting + bump: none + - name: polkadot-node-core-approval-voting-parallel + bump: none + - name: polkadot-node-core-av-store + bump: none + - name: polkadot-node-core-chain-api + bump: none + - name: polkadot-statement-distribution + bump: none + - name: polkadot-collator-protocol + bump: none + - name: polkadot-dispute-distribution + bump: none + - name: polkadot-gossip-support + bump: none + - name: polkadot-network-bridge + bump: none + - name: polkadot-node-collation-generation + bump: none + - name: polkadot-node-core-backing + bump: none + - name: polkadot-node-core-bitfield-signing + bump: none + - name: polkadot-node-core-candidate-validation + bump: none + - name: polkadot-node-core-pvf + bump: none + - name: polkadot-node-core-pvf-common + bump: none + - name: polkadot-node-core-pvf-execute-worker + bump: none + - name: polkadot-node-core-pvf-prepare-worker + bump: none + - name: staging-tracking-allocator + bump: none + - name: rococo-runtime + bump: none + - name: rococo-runtime-constants + bump: none + - name: polkadot-node-core-chain-selection + bump: none + - name: polkadot-node-core-dispute-coordinator + bump: none + - name: polkadot-node-core-parachains-inherent + bump: none + - name: polkadot-node-core-prospective-parachains + bump: none + - name: polkadot-node-core-provisioner + bump: none + - name: polkadot-node-core-pvf-checker + bump: none + - name: polkadot-node-core-runtime-api + bump: none + - name: cumulus-client-network + bump: none + - name: cumulus-relay-chain-inprocess-interface + bump: none + - name: polkadot-cli + bump: none + - name: cumulus-client-consensus-aura + bump: none + - name: cumulus-client-consensus-proposer + bump: none + - name: cumulus-client-consensus-relay-chain + bump: none + - name: cumulus-client-service + bump: none + - name: cumulus-relay-chain-minimal-node + bump: none + - name: cumulus-relay-chain-rpc-interface + bump: none + - name: parachains-common + bump: none + - name: cumulus-primitives-utility + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none + - name: parachains-runtimes-test-utils + bump: none + - name: assets-common + bump: none + - name: bridge-hub-common + bump: none + - name: bridge-hub-test-utils + bump: none + - name: cumulus-pallet-solo-to-para + bump: none + - name: cumulus-pallet-xcm + bump: none + - name: cumulus-ping + bump: none + - name: cumulus-primitives-timestamp + bump: none + - name: emulated-integration-tests-common + bump: none + - name: xcm-emulator + bump: none + - name: pallet-collective-content + bump: none + - name: xcm-simulator + bump: none + - name: pallet-revive-fixtures + bump: none + - name: polkadot-omni-node-lib + bump: none + - name: snowbridge-runtime-test-common + bump: none + - name: testnet-parachains-constants + bump: none + - name: asset-hub-rococo-runtime + bump: none + - name: asset-hub-westend-runtime + bump: none + - name: bridge-hub-rococo-runtime + bump: none + - name: bridge-hub-westend-runtime + bump: none + - name: collectives-westend-runtime + bump: none + - name: coretime-rococo-runtime + bump: none + - name: coretime-westend-runtime + bump: none + - name: people-rococo-runtime + bump: none + - name: people-westend-runtime + bump: none + - name: contracts-rococo-runtime + bump: none + - name: glutton-westend-runtime + bump: none + - name: rococo-parachain-runtime + bump: none + - name: polkadot-omni-node + bump: none + - name: polkadot-parachain-bin + bump: none + - name: polkadot + bump: none + - name: polkadot-voter-bags + bump: none + - name: xcm-simulator-example + bump: none diff --git a/scripts/generate-umbrella.py b/scripts/generate-umbrella.py index 8326909c3449..ae3873180553 100644 --- a/scripts/generate-umbrella.py +++ b/scripts/generate-umbrella.py @@ -120,6 +120,8 @@ def main(path, version): "edition": { "workspace": True }, "authors": { "workspace": True }, "description": "Polkadot SDK umbrella crate.", + "homepage": { "workspace": True }, + "repository": { "workspace": True }, "license": "Apache-2.0", "metadata": { "docs": { "rs": { "features": ["runtime-full", "node"], diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 798ed8c75a5a..9fd434db6179 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Fixtures for testing and benchmarking" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 7f50658c4e16..9affcffd2ade 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -617,6 +617,12 @@ workspace = true [package.authors] workspace = true +[package.homepage] +workspace = true + +[package.repository] +workspace = true + [dependencies.assets-common] path = "../cumulus/parachains/runtimes/assets/common" default-features = false From c56a98b991e2cdce7419813886a74d5280b66d2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Tue, 3 Dec 2024 13:44:52 +0100 Subject: [PATCH 52/68] pallet-revive-fixtures: Try not to re-create fixture dir (#6735) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On some systems trying to re-create the output directory will lead to an error. Fixes https://github.com/paritytech/subxt/issues/1876 --------- Co-authored-by: Bastian Köcher --- substrate/frame/revive/fixtures/build.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/substrate/frame/revive/fixtures/build.rs b/substrate/frame/revive/fixtures/build.rs index 46cd5760ca4e..eca547bc6ddd 100644 --- a/substrate/frame/revive/fixtures/build.rs +++ b/substrate/frame/revive/fixtures/build.rs @@ -204,10 +204,15 @@ fn create_out_dir() -> Result { .join("pallet-revive-fixtures"); // clean up some leftover symlink from previous versions of this script - if out_dir.exists() && !out_dir.is_dir() { + let mut out_exists = out_dir.exists(); + if out_exists && !out_dir.is_dir() { fs::remove_file(&out_dir)?; + out_exists = false; + } + + if !out_exists { + fs::create_dir(&out_dir).context("Failed to create output directory")?; } - fs::create_dir_all(&out_dir).context("Failed to create output directory")?; // write the location of the out dir so it can be found later let mut file = fs::File::create(temp_dir.join("fixture_location.rs")) From d1d92ab76004ce349a97fc5d325eaf9a4a7101b7 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 3 Dec 2024 13:45:35 +0100 Subject: [PATCH 53/68] Bump Westend AH (#6583) Bump Asset-Hub westend spec version --------- Co-authored-by: GitHub Action --- .../runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- prdoc/pr_6583.prdoc | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_6583.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 98d647d868db..21368e9c2b4b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -124,7 +124,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("westmint"), impl_name: alloc::borrow::Cow::Borrowed("westmint"), authoring_version: 1, - spec_version: 1_016_008, + spec_version: 1_017_002, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, diff --git a/prdoc/pr_6583.prdoc b/prdoc/pr_6583.prdoc new file mode 100644 index 000000000000..0e67ed33e27c --- /dev/null +++ b/prdoc/pr_6583.prdoc @@ -0,0 +1,7 @@ +title: Bump Westend AH +doc: +- audience: Runtime Dev + description: Bump Asset-Hub westend spec version +crates: +- name: asset-hub-westend-runtime + bump: minor From 896c81440c1dd169bd2f5e65aba46eca228609f8 Mon Sep 17 00:00:00 2001 From: Lulu Date: Tue, 3 Dec 2024 14:18:05 +0100 Subject: [PATCH 54/68] Add publish-check-compile workflow (#6556) Add publish-check-compile workflow This Applies staged prdocs then configures crate deps to pull from crates.io for our already published crates and local paths for things to be published. Then runs cargo check on the result. This results in a build state consitent with that of publish time and should catch compile errors that we would of otherwise ran into mid pubish. This acts as a supplement to the check-semver job. check-semver works on a high level and judges what changes are incorrect and why. This job just runs the change, sees if it compiles, and if not spits out a compile error. --- .github/workflows/publish-check-compile.yml | 48 +++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 .github/workflows/publish-check-compile.yml diff --git a/.github/workflows/publish-check-compile.yml b/.github/workflows/publish-check-compile.yml new file mode 100644 index 000000000000..83cd3ff8fa90 --- /dev/null +++ b/.github/workflows/publish-check-compile.yml @@ -0,0 +1,48 @@ +name: Check publish build + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + preflight: + uses: ./.github/workflows/reusable-preflight.yml + + check-publish: + timeout-minutes: 90 + needs: [preflight] + runs-on: ${{ needs.preflight.outputs.RUNNER }} + container: + image: ${{ needs.preflight.outputs.IMAGE }} + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + + - name: Rust Cache + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + with: + cache-on-failure: true + + - name: install parity-publish + run: cargo install parity-publish@0.10.2 --locked -q + + - name: parity-publish update plan + run: parity-publish --color always plan --skip-check --prdoc prdoc/ + + - name: parity-publish apply plan + run: parity-publish --color always apply --registry + + - name: parity-publish check compile + run: | + packages="$(parity-publish apply --print)" + + if [ -n "$packages" ]; then + cargo --color always check $(printf -- '-p %s ' $packages) + fi From 41a5d8ec5f3d3d0ff82899be66113b223395ade5 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 3 Dec 2024 18:02:03 +0100 Subject: [PATCH 55/68] `fatxpool`: handling limits and priorities improvements (#6405) This PR provides a number of improvements around handling limits and priorities in the fork-aware transaction pool. #### Notes to reviewers. #### Following are the notable changes: 1. #### [Better support](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/414ec3ccad154c9a2aab0586bfa2d2c884fd140f) for `Usurped` transactions When any view reports an `Usurped` transaction (replaced by other with higher priority) it is removed from all the views (also inactive). Removal is implemented by simply submitting usurper transaction to all the views. It is also ensured that usurped tx will not sneak into the `view_store` in newly created view (this is why `ViewStore::pending_txs_replacements` was added). 1. #### [`TimedTransactionSource`](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/f10590f3bde69b31250761a5b10802fb139ab2b2) introduced: Every view now has an information when the transaction entered the pool. Enforce limits (now only for future txs) uses this timestamp to find worst transactions. Having common timestamp ensures coherent assessment of the transaction's importance across different views. This also could later be used to select which ready transaction shall be dropped. 1. #### `DroppedWatcher`: [improved logic](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/560db28c987dd1e634119788ebc8318967df206b) for future transactions For future transaction - if the last referencing view is removed, the transaction will be dropped from the pool. This prevents future unincluded and un-promoted transactions from staying in the pool for long time. #### And some minor changes: 1. [simplified](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/2d0bbf83e2df2b4c641ef84c1188907c4bfad3c6) the flow in `update_view_with_mempool` (code duplication + minor bug fix). 2. `graph::BasePool`: [handling priorities](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/c9f2d39355853d034fdbc6ea31e4e0e5bf34cb6a) for future transaction improved (previously transaction with lower prio was reported as failed), 3. `graph::listener`: dedicated `limit_enforced`/`usurped`/`dropped` [calls added](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/7b58a68cccfcf372321ea41826fbe9d4222829cf), 4. flaky test [fixed](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/e0a7bc6c048245943796839b166505e2aecdbd7d) 5. new tests added, related to: #5809 --------- Co-authored-by: GitHub Action Co-authored-by: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com> --- prdoc/pr_6405.prdoc | 9 + .../client/transaction-pool/benches/basics.rs | 4 +- .../src/fork_aware_txpool/dropped_watcher.rs | 291 +++++++++++++----- .../fork_aware_txpool/fork_aware_txpool.rs | 199 +++++++----- .../import_notification_sink.rs | 19 +- .../fork_aware_txpool/multi_view_listener.rs | 38 ++- .../fork_aware_txpool/revalidation_worker.rs | 9 +- .../src/fork_aware_txpool/tx_mem_pool.rs | 88 ++++-- .../src/fork_aware_txpool/view.rs | 31 +- .../src/fork_aware_txpool/view_store.rs | 262 ++++++++++++++-- .../transaction-pool/src/graph/base_pool.rs | 159 +++++++++- .../transaction-pool/src/graph/listener.rs | 47 ++- .../client/transaction-pool/src/graph/pool.rs | 30 +- .../transaction-pool/src/graph/ready.rs | 5 +- .../transaction-pool/src/graph/rotator.rs | 5 +- .../src/graph/validated_pool.rs | 27 +- .../transaction-pool/src/graph/watcher.rs | 6 + substrate/client/transaction-pool/src/lib.rs | 5 +- .../src/single_state_txpool/revalidation.rs | 25 +- .../single_state_txpool.rs | 46 ++- .../client/transaction-pool/tests/fatp.rs | 14 +- .../transaction-pool/tests/fatp_common/mod.rs | 14 + .../transaction-pool/tests/fatp_limits.rs | 189 ++++++++++++ .../transaction-pool/tests/fatp_prios.rs | 249 +++++++++++++++ .../client/transaction-pool/tests/pool.rs | 28 +- 25 files changed, 1420 insertions(+), 379 deletions(-) create mode 100644 prdoc/pr_6405.prdoc create mode 100644 substrate/client/transaction-pool/tests/fatp_prios.rs diff --git a/prdoc/pr_6405.prdoc b/prdoc/pr_6405.prdoc new file mode 100644 index 000000000000..9e4e0b3c6c20 --- /dev/null +++ b/prdoc/pr_6405.prdoc @@ -0,0 +1,9 @@ +title: '`fatxpool`: handling limits and priorities improvements' +doc: +- audience: Node Dev + description: |- + This PR provides a number of improvements and fixes around handling limits and priorities in the fork-aware transaction pool. + +crates: +- name: sc-transaction-pool + bump: major diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 0d8c1cbba9b4..5e40b0fb72d6 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -152,7 +152,7 @@ fn uxt(transfer: TransferData) -> Extrinsic { } fn bench_configured(pool: Pool, number: u64, api: Arc) { - let source = TransactionSource::External; + let source = TimedTransactionSource::new_external(false); let mut futures = Vec::new(); let mut tags = Vec::new(); let at = HashAndNumber { @@ -171,7 +171,7 @@ fn bench_configured(pool: Pool, number: u64, api: Arc) { tags.push(to_tag(nonce, AccountId::from_h256(H256::from_low_u64_be(1)))); - futures.push(pool.submit_one(&at, source, xt)); + futures.push(pool.submit_one(&at, source.clone(), xt)); } let res = block_on(futures::future::join_all(futures.into_iter())); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index ecae21395c91..7679e3b169d2 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -24,7 +24,7 @@ use crate::{ common::log_xt::log_xt_trace, fork_aware_txpool::stream_map_util::next_event, - graph::{BlockHash, ChainApi, ExtrinsicHash}, + graph::{self, BlockHash, ExtrinsicHash}, LOG_TARGET, }; use futures::stream::StreamExt; @@ -33,12 +33,44 @@ use sc_transaction_pool_api::TransactionStatus; use sc_utils::mpsc; use sp_runtime::traits::Block as BlockT; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{ + hash_map::{Entry, OccupiedEntry}, + HashMap, HashSet, + }, fmt::{self, Debug, Formatter}, pin::Pin, }; use tokio_stream::StreamMap; +/// Represents a transaction that was removed from the transaction pool, including the reason of its +/// removal. +#[derive(Debug, PartialEq)] +pub struct DroppedTransaction { + /// Hash of the dropped extrinsic. + pub tx_hash: Hash, + /// Reason of the transaction being dropped. + pub reason: DroppedReason, +} + +impl DroppedTransaction { + fn new_usurped(tx_hash: Hash, by: Hash) -> Self { + Self { reason: DroppedReason::Usurped(by), tx_hash } + } + + fn new_enforced_by_limts(tx_hash: Hash) -> Self { + Self { reason: DroppedReason::LimitsEnforced, tx_hash } + } +} + +/// Provides reason of why transactions was dropped. +#[derive(Debug, PartialEq)] +pub enum DroppedReason { + /// Transaction was replaced by other transaction (e.g. because of higher priority). + Usurped(Hash), + /// Transaction was dropped because of internal pool limits being enforced. + LimitsEnforced, +} + /// Dropped-logic related event from the single view. pub type ViewStreamEvent = crate::graph::DroppedByLimitsEvent, BlockHash>; @@ -47,7 +79,8 @@ type ViewStream = Pin> + Se /// Stream of extrinsic hashes that were dropped by the views and have no references by existing /// views. -pub(crate) type StreamOfDropped = Pin> + Send>>; +pub(crate) type StreamOfDropped = + Pin>> + Send>>; /// A type alias for a sender used as the controller of the [`MultiViewDropWatcherContext`]. /// Used to send control commands from the [`MultiViewDroppedWatcherController`] to @@ -59,24 +92,24 @@ type Controller = mpsc::TracingUnboundedSender; type CommandReceiver = mpsc::TracingUnboundedReceiver; /// Commands to control the instance of dropped transactions stream [`StreamOfDropped`]. -enum Command +enum Command where - C: ChainApi, + ChainApi: graph::ChainApi, { /// Adds a new stream of dropped-related events originating in a view with a specific block /// hash - AddView(BlockHash, ViewStream), + AddView(BlockHash, ViewStream), /// Removes an existing view's stream associated with a specific block hash. - RemoveView(BlockHash), - /// Removes internal states for given extrinsic hashes. + RemoveView(BlockHash), + /// Removes referencing views for given extrinsic hashes. /// /// Intended to ba called on finalization. - RemoveFinalizedTxs(Vec>), + RemoveFinalizedTxs(Vec>), } -impl Debug for Command +impl Debug for Command where - C: ChainApi, + ChainApi: graph::ChainApi, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { @@ -92,30 +125,114 @@ where /// /// This struct maintains a mapping of active views and their corresponding streams, as well as the /// state of each transaction with respect to these views. -struct MultiViewDropWatcherContext +struct MultiViewDropWatcherContext where - C: ChainApi, + ChainApi: graph::ChainApi, { /// A map that associates the views identified by corresponding block hashes with their streams /// of dropped-related events. This map is used to keep track of active views and their event /// streams. - stream_map: StreamMap, ViewStream>, + stream_map: StreamMap, ViewStream>, /// A receiver for commands to control the state of the stream, allowing the addition and /// removal of views. This is used to dynamically update which views are being tracked. - command_receiver: CommandReceiver>, - + command_receiver: CommandReceiver>, /// For each transaction hash we keep the set of hashes representing the views that see this - /// transaction as ready or future. + /// transaction as ready or in_block. + /// + /// Even if all views referencing a ready transactions are removed, we still want to keep + /// transaction, there can be a fork which sees the transaction as ready. /// /// Once transaction is dropped, dropping view is removed from the set. - transaction_states: HashMap, HashSet>>, + ready_transaction_views: HashMap, HashSet>>, + /// For each transaction hash we keep the set of hashes representing the views that see this + /// transaction as future. + /// + /// Once all views referencing a future transactions are removed, the future can be dropped. + /// + /// Once transaction is dropped, dropping view is removed from the set. + future_transaction_views: HashMap, HashSet>>, + + /// Transactions that need to be notified as dropped. + pending_dropped_transactions: Vec>, } impl MultiViewDropWatcherContext where - C: ChainApi + 'static, - <::Block as BlockT>::Hash: Unpin, + C: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, { + /// Provides the ready or future `HashSet` containing views referencing given transaction. + fn transaction_views( + &mut self, + tx_hash: ExtrinsicHash, + ) -> Option, HashSet>>> { + if let Entry::Occupied(views_keeping_tx_valid) = self.ready_transaction_views.entry(tx_hash) + { + return Some(views_keeping_tx_valid) + } + if let Entry::Occupied(views_keeping_tx_valid) = + self.future_transaction_views.entry(tx_hash) + { + return Some(views_keeping_tx_valid) + } + None + } + + /// Processes the command and updates internal state accordingly. + fn handle_command(&mut self, cmd: Command) { + match cmd { + Command::AddView(key, stream) => { + trace!( + target: LOG_TARGET, + "dropped_watcher: Command::AddView {key:?} views:{:?}", + self.stream_map.keys().collect::>() + ); + self.stream_map.insert(key, stream); + }, + Command::RemoveView(key) => { + trace!( + target: LOG_TARGET, + "dropped_watcher: Command::RemoveView {key:?} views:{:?}", + self.stream_map.keys().collect::>() + ); + self.stream_map.remove(&key); + self.ready_transaction_views.iter_mut().for_each(|(tx_hash, views)| { + trace!( + target: LOG_TARGET, + "[{:?}] dropped_watcher: Command::RemoveView ready views: {:?}", + tx_hash, + views + ); + views.remove(&key); + }); + + self.future_transaction_views.iter_mut().for_each(|(tx_hash, views)| { + trace!( + target: LOG_TARGET, + "[{:?}] dropped_watcher: Command::RemoveView future views: {:?}", + tx_hash, + views + ); + views.remove(&key); + if views.is_empty() { + self.pending_dropped_transactions.push(*tx_hash); + } + }); + }, + Command::RemoveFinalizedTxs(xts) => { + log_xt_trace!( + target: LOG_TARGET, + xts.clone(), + "[{:?}] dropped_watcher: finalized xt removed" + ); + xts.iter().for_each(|xt| { + self.ready_transaction_views.remove(xt); + self.future_transaction_views.remove(xt); + }); + }, + } + } + /// Processes a `ViewStreamEvent` from a specific view and updates the internal state /// accordingly. /// @@ -125,41 +242,69 @@ where &mut self, block_hash: BlockHash, event: ViewStreamEvent, - ) -> Option> { + ) -> Option>> { trace!( target: LOG_TARGET, - "dropped_watcher: handle_event: event:{:?} views:{:?}, ", - event, + "dropped_watcher: handle_event: event:{event:?} from:{block_hash:?} future_views:{:?} ready_views:{:?} stream_map views:{:?}, ", + self.future_transaction_views.get(&event.0), + self.ready_transaction_views.get(&event.0), self.stream_map.keys().collect::>(), ); let (tx_hash, status) = event; match status { - TransactionStatus::Ready | TransactionStatus::Future => { - self.transaction_states.entry(tx_hash).or_default().insert(block_hash); + TransactionStatus::Future => { + self.future_transaction_views.entry(tx_hash).or_default().insert(block_hash); + }, + TransactionStatus::Ready | TransactionStatus::InBlock(..) => { + // note: if future transaction was once seens as the ready we may want to treat it + // as ready transactions. Unreferenced future transactions are more likely to be + // removed when the last referencing view is removed then ready transactions. + // Transcaction seen as ready is likely quite close to be included in some + // future fork. + if let Some(mut views) = self.future_transaction_views.remove(&tx_hash) { + views.insert(block_hash); + self.ready_transaction_views.insert(tx_hash, views); + } else { + self.ready_transaction_views.entry(tx_hash).or_default().insert(block_hash); + } }, - TransactionStatus::Dropped | TransactionStatus::Usurped(_) => { - if let Entry::Occupied(mut views_keeping_tx_valid) = - self.transaction_states.entry(tx_hash) - { + TransactionStatus::Dropped => { + if let Some(mut views_keeping_tx_valid) = self.transaction_views(tx_hash) { views_keeping_tx_valid.get_mut().remove(&block_hash); - if views_keeping_tx_valid.get().is_empty() || - views_keeping_tx_valid - .get() - .iter() - .all(|h| !self.stream_map.contains_key(h)) - { - return Some(tx_hash) + if views_keeping_tx_valid.get().is_empty() { + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) } } else { debug!("[{:?}] dropped_watcher: removing (non-tracked) tx", tx_hash); - return Some(tx_hash) + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) } }, + TransactionStatus::Usurped(by) => + return Some(DroppedTransaction::new_usurped(tx_hash, by)), _ => {}, }; None } + /// Gets pending dropped transactions if any. + fn get_pending_dropped_transaction(&mut self) -> Option>> { + while let Some(tx_hash) = self.pending_dropped_transactions.pop() { + // never drop transaction that was seen as ready. It may not have a referencing + // view now, but such fork can appear. + if self.ready_transaction_views.get(&tx_hash).is_some() { + continue + } + + if let Some(views) = self.future_transaction_views.get(&tx_hash) { + if views.is_empty() { + self.future_transaction_views.remove(&tx_hash); + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + } + } + } + None + } + /// Creates a new `StreamOfDropped` and its associated event stream controller. /// /// This method initializes the internal structures and unfolds the stream of dropped @@ -176,42 +321,29 @@ where let ctx = Self { stream_map: StreamMap::new(), command_receiver, - transaction_states: Default::default(), + ready_transaction_views: Default::default(), + future_transaction_views: Default::default(), + pending_dropped_transactions: Default::default(), }; let stream_map = futures::stream::unfold(ctx, |mut ctx| async move { loop { + if let Some(dropped) = ctx.get_pending_dropped_transaction() { + debug!("dropped_watcher: sending out (pending): {dropped:?}"); + return Some((dropped, ctx)); + } tokio::select! { biased; - cmd = ctx.command_receiver.next() => { - match cmd? { - Command::AddView(key,stream) => { - trace!(target: LOG_TARGET,"dropped_watcher: Command::AddView {key:?} views:{:?}",ctx.stream_map.keys().collect::>()); - ctx.stream_map.insert(key,stream); - }, - Command::RemoveView(key) => { - trace!(target: LOG_TARGET,"dropped_watcher: Command::RemoveView {key:?} views:{:?}",ctx.stream_map.keys().collect::>()); - ctx.stream_map.remove(&key); - ctx.transaction_states.iter_mut().for_each(|(_,state)| { - state.remove(&key); - }); - }, - Command::RemoveFinalizedTxs(xts) => { - log_xt_trace!(target: LOG_TARGET, xts.clone(), "[{:?}] dropped_watcher: finalized xt removed"); - xts.iter().for_each(|xt| { - ctx.transaction_states.remove(xt); - }); - - }, - } - }, - Some(event) = next_event(&mut ctx.stream_map) => { if let Some(dropped) = ctx.handle_event(event.0, event.1) { debug!("dropped_watcher: sending out: {dropped:?}"); return Some((dropped, ctx)); } + }, + cmd = ctx.command_receiver.next() => { + ctx.handle_command(cmd?); } + } } }) @@ -225,30 +357,30 @@ where /// /// This struct provides methods to add and remove streams associated with views to and from the /// stream. -pub struct MultiViewDroppedWatcherController { +pub struct MultiViewDroppedWatcherController { /// A controller allowing to update the state of the associated [`StreamOfDropped`]. - controller: Controller>, + controller: Controller>, } -impl Clone for MultiViewDroppedWatcherController { +impl Clone for MultiViewDroppedWatcherController { fn clone(&self) -> Self { Self { controller: self.controller.clone() } } } -impl MultiViewDroppedWatcherController +impl MultiViewDroppedWatcherController where - C: ChainApi + 'static, - <::Block as BlockT>::Hash: Unpin, + ChainApi: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, { /// Creates new [`StreamOfDropped`] and its controller. - pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { - let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); + pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { + let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); (Self { controller: ctrl }, stream_map.boxed()) } /// Notifies the [`StreamOfDropped`] that new view was created. - pub fn add_view(&self, key: BlockHash, view: ViewStream) { + pub fn add_view(&self, key: BlockHash, view: ViewStream) { let _ = self.controller.unbounded_send(Command::AddView(key, view)).map_err(|e| { trace!(target: LOG_TARGET, "dropped_watcher: add_view {key:?} send message failed: {e}"); }); @@ -256,14 +388,17 @@ where /// Notifies the [`StreamOfDropped`] that the view was destroyed and shall be removed the /// stream map. - pub fn remove_view(&self, key: BlockHash) { + pub fn remove_view(&self, key: BlockHash) { let _ = self.controller.unbounded_send(Command::RemoveView(key)).map_err(|e| { trace!(target: LOG_TARGET, "dropped_watcher: remove_view {key:?} send message failed: {e}"); }); } /// Removes status info for finalized transactions. - pub fn remove_finalized_txs(&self, xts: impl IntoIterator> + Clone) { + pub fn remove_finalized_txs( + &self, + xts: impl IntoIterator> + Clone, + ) { let _ = self .controller .unbounded_send(Command::RemoveFinalizedTxs(xts.into_iter().collect())) @@ -298,7 +433,7 @@ mod dropped_watcher_tests { watcher.add_view(block_hash, view_stream); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } #[tokio::test] @@ -348,7 +483,10 @@ mod dropped_watcher_tests { watcher.add_view(block_hash0, view_stream0); watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash1]); + assert_eq!( + handle.await.unwrap(), + vec![DroppedTransaction::new_enforced_by_limts(tx_hash1)] + ); } #[tokio::test] @@ -373,10 +511,11 @@ mod dropped_watcher_tests { watcher.add_view(block_hash0, view_stream0); assert!(output_stream.next().now_or_never().is_none()); + watcher.remove_view(block_hash0); watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } #[tokio::test] @@ -419,6 +558,6 @@ mod dropped_watcher_tests { let block_hash2 = H256::repeat_byte(0x03); watcher.add_view(block_hash2, view_stream2); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 065d0cb3a274..4ec87f1fefa4 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -23,7 +23,7 @@ use super::{ import_notification_sink::MultiViewImportNotificationSink, metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, - tx_mem_pool::{TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, + tx_mem_pool::{InsertionInfo, TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, view::View, view_store::ViewStore, }; @@ -31,8 +31,12 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::revalidation_worker, - graph::{self, base_pool::Transaction, ExtrinsicFor, ExtrinsicHash, IsValidator, Options}, + fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, + graph::{ + self, + base_pool::{TimedTransactionSource, Transaction}, + ExtrinsicFor, ExtrinsicHash, IsValidator, Options, + }, ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; @@ -197,9 +201,14 @@ where let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); + + let view_store = + Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); + let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), + view_store.clone(), import_notification_sink.clone(), ); @@ -216,8 +225,8 @@ where ( Self { mempool, - api: pool_api.clone(), - view_store: Arc::new(ViewStore::new(pool_api, listener, dropped_stream_controller)), + api: pool_api, + view_store, ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), enactment_state: Arc::new(Mutex::new(EnactmentState::new( best_block_hash, @@ -233,14 +242,17 @@ where ) } - /// Monitors the stream of dropped transactions and removes them from the mempool. + /// Monitors the stream of dropped transactions and removes them from the mempool and + /// view_store. /// /// This asynchronous task continuously listens for dropped transaction notifications provided /// within `dropped_stream` and ensures that these transactions are removed from the `mempool` - /// and `import_notification_sink` instances. + /// and `import_notification_sink` instances. For Usurped events, the transaction is also + /// removed from the view_store. async fn dropped_monitor_task( mut dropped_stream: StreamOfDropped, mempool: Arc>, + view_store: Arc>, import_notification_sink: MultiViewImportNotificationSink< Block::Hash, ExtrinsicHash, @@ -251,9 +263,33 @@ where log::debug!(target: LOG_TARGET, "fatp::dropped_monitor_task: terminated..."); break; }; - log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification, removing", dropped); - mempool.remove_dropped_transactions(&[dropped]).await; - import_notification_sink.clean_notified_items(&[dropped]); + let dropped_tx_hash = dropped.tx_hash; + log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification {:?}, removing", dropped_tx_hash,dropped.reason); + match dropped.reason { + DroppedReason::Usurped(new_tx_hash) => { + if let Some(new_tx) = mempool.get_by_hash(new_tx_hash) { + view_store + .replace_transaction( + new_tx.source(), + new_tx.tx(), + dropped_tx_hash, + new_tx.is_watched(), + ) + .await; + } else { + log::trace!( + target:LOG_TARGET, + "error: dropped_monitor_task: no entry in mempool for new transaction {:?}", + new_tx_hash, + ); + } + }, + DroppedReason::LimitsEnforced => {}, + }; + + mempool.remove_dropped_transaction(&dropped_tx_hash).await; + view_store.listener.transaction_dropped(dropped); + import_notification_sink.clean_notified_items(&[dropped_tx_hash]); } } @@ -288,9 +324,13 @@ where let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); + + let view_store = + Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), + view_store.clone(), import_notification_sink.clone(), ); @@ -306,8 +346,8 @@ where Self { mempool, - api: pool_api.clone(), - view_store: Arc::new(ViewStore::new(pool_api, listener, dropped_stream_controller)), + api: pool_api, + view_store, ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), enactment_state: Arc::new(Mutex::new(EnactmentState::new( best_block_hash, @@ -366,6 +406,16 @@ where self.mempool.unwatched_and_watched_count() } + /// Returns a set of future transactions for given block hash. + /// + /// Intended for logging / tests. + pub fn futures_at( + &self, + at: Block::Hash, + ) -> Option, ExtrinsicFor>>> { + self.view_store.futures_at(at) + } + /// Returns a best-effort set of ready transactions for a given block, without executing full /// maintain process. /// @@ -600,31 +650,33 @@ where let mempool_results = self.mempool.extend_unwatched(source, &xts); if view_store.is_empty() { - return Ok(mempool_results) + return Ok(mempool_results.into_iter().map(|r| r.map(|r| r.hash)).collect::>()) } let to_be_submitted = mempool_results .iter() .zip(xts) - .filter_map(|(result, xt)| result.as_ref().ok().map(|_| xt)) + .filter_map(|(result, xt)| { + result.as_ref().ok().map(|insertion| (insertion.source.clone(), xt)) + }) .collect::>(); self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); let mempool = self.mempool.clone(); - let results_map = view_store.submit(source, to_be_submitted.into_iter()).await; + let results_map = view_store.submit(to_be_submitted.into_iter()).await; let mut submission_results = reduce_multiview_result(results_map).into_iter(); Ok(mempool_results .into_iter() .map(|result| { - result.and_then(|xt_hash| { + result.and_then(|insertion| { submission_results .next() .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") .inspect_err(|_| - mempool.remove(xt_hash) + mempool.remove(insertion.hash) ) }) }) @@ -660,19 +712,18 @@ where ) -> Result>>, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); let xt = Arc::from(xt); - let xt_hash = match self.mempool.push_watched(source, xt.clone()) { - Ok(xt_hash) => xt_hash, - Err(e) => return Err(e), - }; + let InsertionInfo { hash: xt_hash, source: timed_source } = + match self.mempool.push_watched(source, xt.clone()) { + Ok(result) => result, + Err(e) => return Err(e), + }; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - let view_store = self.view_store.clone(); - let mempool = self.mempool.clone(); - view_store - .submit_and_watch(at, source, xt) + self.view_store + .submit_and_watch(at, timed_source, xt) .await - .inspect_err(|_| mempool.remove(xt_hash)) + .inspect_err(|_| self.mempool.remove(xt_hash)) } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -801,12 +852,12 @@ where ) -> Result { log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count()); let xt = Arc::from(xt); - let result = self + let InsertionInfo { hash: xt_hash, .. } = self .mempool .extend_unwatched(TransactionSource::Local, &[xt.clone()]) .remove(0)?; - self.view_store.submit_local(xt).or_else(|_| Ok(result)) + self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash)) } } @@ -914,6 +965,9 @@ where let start = Instant::now(); let watched_xts = self.register_listeners(&mut view).await; let duration = start.elapsed(); + // sync the transactions statuses and referencing views in all the listeners with newly + // cloned view. + view.pool.validated_pool().retrigger_notifications(); log::debug!(target: LOG_TARGET, "register_listeners: at {at:?} took {duration:?}"); // 2. Handle transactions from the tree route. Pruning transactions from the view first @@ -1041,58 +1095,35 @@ where self.active_views_count() ); let included_xts = self.extrinsics_included_since_finalized(view.at.hash).await; - let xts = self.mempool.clone_unwatched(); - - let mut all_submitted_count = 0; - if !xts.is_empty() { - let unwatched_count = xts.len(); - let mut buckets = HashMap::>>::default(); - xts.into_iter() - .filter(|(hash, _)| !view.pool.validated_pool().pool.read().is_imported(hash)) - .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(_, tx)| (tx.source(), tx.tx())) - .for_each(|(source, tx)| buckets.entry(source).or_default().push(tx)); - - for (source, xts) in buckets { - all_submitted_count += xts.len(); - let _ = view.submit_many(source, xts).await; - } - log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} unwatched {}/{}", view.at.hash, all_submitted_count, unwatched_count); - } - - let watched_submitted_count = watched_xts.len(); - let mut buckets = HashMap::< - TransactionSource, - Vec<(ExtrinsicHash, ExtrinsicFor)>, - >::default(); - watched_xts + let (hashes, xts_filtered): (Vec<_>, Vec<_>) = watched_xts .into_iter() + .chain(self.mempool.clone_unwatched().into_iter()) + .filter(|(hash, _)| !view.is_imported(hash)) .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(tx_hash, tx)| (tx.source(), tx_hash, tx.tx())) - .for_each(|(source, tx_hash, tx)| { - buckets.entry(source).or_default().push((tx_hash, tx)) - }); + .map(|(tx_hash, tx)| (tx_hash, (tx.source(), tx.tx()))) + .unzip(); - let mut watched_results = Vec::default(); - for (source, watched_xts) in buckets { - let hashes = watched_xts.iter().map(|i| i.0).collect::>(); - let results = view - .submit_many(source, watched_xts.into_iter().map(|i| i.1)) - .await - .into_iter() - .zip(hashes) - .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) - .collect::>(); - watched_results.extend(results); - } + let watched_results = view + .submit_many(xts_filtered) + .await + .into_iter() + .zip(hashes) + .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) + .collect::>(); + + let submitted_count = watched_results.len(); - log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} watched {}/{}", view.at.hash, watched_submitted_count, self.mempool_len().1); + log::debug!( + target: LOG_TARGET, + "update_view_with_mempool: at {:?} submitted {}/{}", + view.at.hash, + submitted_count, + self.mempool.len() + ); - all_submitted_count += watched_submitted_count; - let _ = all_submitted_count - .try_into() - .map(|v| self.metrics.report(|metrics| metrics.submitted_from_mempool_txs.inc_by(v))); + self.metrics + .report(|metrics| metrics.submitted_from_mempool_txs.inc_by(submitted_count as _)); // if there are no views yet, and a single newly created view is reporting error, just send // out the invalid event, and remove transaction. @@ -1176,7 +1207,14 @@ where }) .map(|(tx_hash, tx)| { //find arc if tx is known - self.mempool.get_by_hash(tx_hash).unwrap_or_else(|| Arc::from(tx)) + self.mempool + .get_by_hash(tx_hash) + .map(|tx| (tx.source(), tx.tx())) + .unwrap_or_else(|| { + // These transactions are coming from retracted blocks, we + // should simply consider them external. + (TimedTransactionSource::new_external(true), Arc::from(tx)) + }) }), ); @@ -1185,16 +1223,7 @@ where }); } - let _ = view - .pool - .resubmit_at( - &hash_and_number, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await; + let _ = view.pool.resubmit_at(&hash_and_number, resubmit_transactions).await; } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs index 7fbdcade63b8..f9a41673bb8f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs @@ -326,6 +326,7 @@ mod tests { let j0 = tokio::spawn(runnable); let stream = ctrl.event_stream(); + let stream2 = ctrl.event_stream(); let mut v1 = View::new(vec![(10, 1), (10, 2), (10, 3)]); let mut v2 = View::new(vec![(20, 1), (20, 2), (20, 6)]); @@ -342,20 +343,16 @@ mod tests { ctrl.add_view(1000, o1); ctrl.add_view(2000, o2); - let j4 = { - let ctrl = ctrl.clone(); - tokio::spawn(async move { - tokio::time::sleep(Duration::from_millis(70)).await; - ctrl.clean_notified_items(&vec![1, 3]); - ctrl.add_view(3000, o3.boxed()); - }) - }; + let out = stream.take(4).collect::>().await; + assert_eq!(out, vec![1, 2, 3, 6]); - let out = stream.take(6).collect::>().await; + ctrl.clean_notified_items(&vec![1, 3]); + ctrl.add_view(3000, o3.boxed()); + let out = stream2.take(6).collect::>().await; assert_eq!(out, vec![1, 2, 3, 6, 1, 3]); - drop(ctrl); - futures::future::join_all(vec![j0, j1, j2, j3, j4]).await; + drop(ctrl); + futures::future::join_all(vec![j0, j1, j2, j3]).await; } #[tokio::test] diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs index 8d0e69db2e9a..a00234a99808 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -36,6 +36,8 @@ use std::{ }; use tokio_stream::StreamMap; +use super::dropped_watcher::{DroppedReason, DroppedTransaction}; + /// A side channel allowing to control the external stream instance (one per transaction) with /// [`ControllerCommand`]. /// @@ -79,7 +81,7 @@ enum ControllerCommand { /// Notifies that a transaction was dropped from the pool. /// /// If all preconditions are met, an external dropped event will be sent out. - TransactionDropped, + TransactionDropped(DroppedReason>), } impl std::fmt::Debug for ControllerCommand @@ -99,8 +101,8 @@ where ControllerCommand::TransactionBroadcasted(_) => { write!(f, "ListenerAction::TransactionBroadcasted(...)") }, - ControllerCommand::TransactionDropped => { - write!(f, "ListenerAction::TransactionDropped") + ControllerCommand::TransactionDropped(r) => { + write!(f, "ListenerAction::TransactionDropped {r:?}") }, } } @@ -268,6 +270,7 @@ where /// stream map. fn remove_view(&mut self, block_hash: BlockHash) { self.status_stream_map.remove(&block_hash); + self.views_keeping_tx_valid.remove(&block_hash); trace!(target: LOG_TARGET, "[{:?}] RemoveView view: {:?} views:{:?}", self.tx_hash, block_hash, self.status_stream_map.keys().collect::>()); } } @@ -282,6 +285,11 @@ where Self { controllers: Default::default() } } + /// Returns `true` if the listener contains a stream controller for the specified hash. + pub fn contains_tx(&self, tx_hash: &ExtrinsicHash) -> bool { + self.controllers.read().contains_key(tx_hash) + } + /// Creates an external aggregated stream of events for given transaction. /// /// This method initializes an `ExternalWatcherContext` for the provided transaction hash, sets @@ -346,11 +354,16 @@ where log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Broadcasted", ctx.tx_hash); return Some((TransactionStatus::Broadcast(peers), ctx)) }, - ControllerCommand::TransactionDropped => { + ControllerCommand::TransactionDropped(DroppedReason::LimitsEnforced) => { log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Dropped", ctx.tx_hash); ctx.terminate = true; return Some((TransactionStatus::Dropped, ctx)) }, + ControllerCommand::TransactionDropped(DroppedReason::Usurped(by)) => { + log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Usurped({:?})", ctx.tx_hash, by); + ctx.terminate = true; + return Some((TransactionStatus::Usurped(by), ctx)) + }, } }, }; @@ -445,16 +458,15 @@ where /// /// This method sends a `TransactionDropped` command to the controller of each requested /// transaction prompting and external `Broadcasted` event. - pub(crate) fn transactions_dropped(&self, dropped: &[ExtrinsicHash]) { + pub(crate) fn transaction_dropped(&self, dropped: DroppedTransaction>) { let mut controllers = self.controllers.write(); - debug!(target: LOG_TARGET, "mvl::transactions_dropped: {:?}", dropped); - for tx_hash in dropped { - if let Some(tx) = controllers.remove(&tx_hash) { - debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); - if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped) { - trace!(target: LOG_TARGET, "[{:?}] transactions_dropped: send message failed: {:?}", tx_hash, e); - }; - } + debug!(target: LOG_TARGET, "mvl::transaction_dropped: {:?}", dropped); + if let Some(tx) = controllers.remove(&dropped.tx_hash) { + let DroppedTransaction { tx_hash, reason } = dropped; + debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); + if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped(reason)) { + trace!(target: LOG_TARGET, "[{:?}] transaction_dropped: send message failed: {:?}", tx_hash, e); + }; } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs index 9464ab3f5766..eb898c35a134 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs @@ -186,9 +186,9 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, fork_aware_txpool::view::FinishRevalidationLocalChannels, + TimedTransactionSource, }; use futures::executor::block_on; - use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; use substrate_test_runtime_client::AccountKeyring::Alice; #[test] @@ -212,9 +212,10 @@ mod tests { nonce: 0, }); - let _ = block_on( - view.submit_many(TransactionSource::External, std::iter::once(uxt.clone().into())), - ); + let _ = block_on(view.submit_many(std::iter::once(( + TimedTransactionSource::new_external(false), + uxt.clone().into(), + )))); assert_eq!(api.validation_requests().len(), 1); let (finish_revalidation_request_tx, finish_revalidation_request_rx) = diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 86c07008c3f3..7b824d4653c2 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -30,7 +30,7 @@ use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::Mult use crate::{ common::log_xt::log_xt_trace, graph, - graph::{tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, + graph::{base_pool::TimedTransactionSource, tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, LOG_TARGET, }; use futures::FutureExt; @@ -74,7 +74,7 @@ where /// Size of the extrinsics actual body. bytes: usize, /// Transaction source. - source: TransactionSource, + source: TimedTransactionSource, /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. validated_at: AtomicU64, //todo: we need to add future / ready status at finalized block. @@ -95,18 +95,30 @@ where /// Shall the progress of transaction be watched. /// /// Was transaction sent with `submit_and_watch`. - fn is_watched(&self) -> bool { + pub(crate) fn is_watched(&self) -> bool { self.watched } /// Creates a new instance of wrapper for unwatched transaction. fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { watched: false, tx, source, validated_at: AtomicU64::new(0), bytes } + Self { + watched: false, + tx, + source: TimedTransactionSource::from_transaction_source(source, true), + validated_at: AtomicU64::new(0), + bytes, + } } /// Creates a new instance of wrapper for watched transaction. fn new_watched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { watched: true, tx, source, validated_at: AtomicU64::new(0), bytes } + Self { + watched: true, + tx, + source: TimedTransactionSource::from_transaction_source(source, true), + validated_at: AtomicU64::new(0), + bytes, + } } /// Provides a clone of actual transaction body. @@ -117,8 +129,8 @@ where } /// Returns the source of the transaction. - pub(crate) fn source(&self) -> TransactionSource { - self.source + pub(crate) fn source(&self) -> TimedTransactionSource { + self.source.clone() } } @@ -174,6 +186,19 @@ where max_transactions_total_bytes: usize, } +/// Helper structure to encapsulate a result of [`TxMemPool::try_insert`]. +#[derive(Debug)] +pub(super) struct InsertionInfo { + pub(super) hash: Hash, + pub(super) source: TimedTransactionSource, +} + +impl InsertionInfo { + fn new(hash: Hash, source: TimedTransactionSource) -> Self { + Self { hash, source } + } +} + impl TxMemPool where Block: BlockT, @@ -220,8 +245,8 @@ where pub(super) fn get_by_hash( &self, hash: ExtrinsicHash, - ) -> Option> { - self.transactions.read().get(&hash).map(|t| t.tx()) + ) -> Option>> { + self.transactions.read().get(&hash).map(Clone::clone) } /// Returns a tuple with the count of unwatched and watched transactions in the memory pool. @@ -231,6 +256,11 @@ where (transactions.len() - watched_count, watched_count) } + /// Returns a total number of transactions kept within mempool. + pub fn len(&self) -> usize { + self.transactions.read().len() + } + /// Returns the number of bytes used by all extrinsics in the the pool. #[cfg(test)] pub fn bytes(&self) -> usize { @@ -249,7 +279,7 @@ where &self, hash: ExtrinsicHash, tx: TxInMemPool, - ) -> Result, ChainApi::Error> { + ) -> Result>, ChainApi::Error> { let bytes = self.transactions.bytes(); let mut transactions = self.transactions.write(); let result = match ( @@ -257,14 +287,15 @@ where transactions.contains_key(&hash), ) { (true, false) => { + let source = tx.source(); transactions.insert(hash, Arc::from(tx)); - Ok(hash) + Ok(InsertionInfo::new(hash, source)) }, (_, true) => Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()), (false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()), }; - log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result); + log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); result } @@ -277,7 +308,7 @@ where &self, source: TransactionSource, xts: &[ExtrinsicFor], - ) -> Vec, ChainApi::Error>> { + ) -> Vec>, ChainApi::Error>> { let result = xts .iter() .map(|xt| { @@ -294,25 +325,18 @@ where &self, source: TransactionSource, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result>, ChainApi::Error> { let (hash, length) = self.api.hash_and_length(&xt); self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) } - /// Removes transactions from the memory pool which are specified by the given list of hashes - /// and send the `Dropped` event to the listeners of these transactions. - pub(super) async fn remove_dropped_transactions( + /// Removes transaction from the memory pool which are specified by the given list of hashes. + pub(super) async fn remove_dropped_transaction( &self, - to_be_removed: &[ExtrinsicHash], - ) { - log::debug!(target: LOG_TARGET, "remove_dropped_transactions count:{:?}", to_be_removed.len()); - log_xt_trace!(target: LOG_TARGET, to_be_removed, "[{:?}] mempool::remove_dropped_transactions"); - let mut transactions = self.transactions.write(); - to_be_removed.iter().for_each(|t| { - transactions.remove(t); - }); - - self.listener.transactions_dropped(to_be_removed); + dropped: &ExtrinsicHash, + ) -> Option>> { + log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); + self.transactions.write().remove(dropped) } /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory @@ -369,13 +393,13 @@ where }; let validations_futures = input.into_iter().map(|(xt_hash, xt)| { - self.api.validate_transaction(finalized_block.hash, xt.source, xt.tx()).map( - move |validation_result| { + self.api + .validate_transaction(finalized_block.hash, xt.source.clone().into(), xt.tx()) + .map(move |validation_result| { xt.validated_at .store(finalized_block.number.into().as_u64(), atomic::Ordering::Relaxed); (xt_hash, validation_result) - }, - ) + }) }); let validation_results = futures::future::join_all(validations_futures).await; let input_len = validation_results.len(); @@ -403,7 +427,7 @@ where log::debug!( target: LOG_TARGET, - "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} purged:{} took {duration:?}", invalid_hashes.len(), + "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} invalid_hashes:{} took {duration:?}", invalid_hashes.len(), ); invalid_hashes diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 99095d88cb0a..3cbb8fa4871d 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -27,13 +27,13 @@ use super::metrics::MetricsLink as PrometheusMetrics; use crate::{ common::log_xt::log_xt_trace, graph::{ - self, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, IsValidator, ValidatedTransaction, - ValidatedTransactionFor, + self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, + IsValidator, ValidatedTransaction, ValidatedTransactionFor, }, LOG_TARGET, }; use parking_lot::Mutex; -use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus, TransactionSource}; +use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus}; use sp_blockchain::HashAndNumber; use sp_runtime::{ generic::BlockId, traits::Block as BlockT, transaction_validity::TransactionValidityError, @@ -157,22 +157,21 @@ where /// Imports many unvalidated extrinsics into the view. pub(super) async fn submit_many( &self, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, ChainApi::Error>> { if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { let xts = xts.into_iter().collect::>(); - log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); - self.pool.submit_at(&self.at, source, xts).await + log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); + self.pool.submit_at(&self.at, xts).await } else { - self.pool.submit_at(&self.at, source, xts).await + self.pool.submit_at(&self.at, xts).await } } /// Import a single extrinsic and starts to watch its progress in the view. pub(super) async fn submit_and_watch( &self, - source: TransactionSource, + source: TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, ChainApi::Error> { log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); @@ -193,7 +192,7 @@ where .api() .validate_transaction_blocking( self.at.hash, - TransactionSource::Local, + sc_transaction_pool_api::TransactionSource::Local, Arc::from(xt.clone()), )? .map_err(|e| { @@ -214,7 +213,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), hash, - TransactionSource::Local, + TimedTransactionSource::new_local(true), Arc::from(xt), length, validity, @@ -285,7 +284,7 @@ where } _ = async { if let Some(tx) = batch_iter.next() { - let validation_result = (api.validate_transaction(self.at.hash, tx.source, tx.data.clone()).await, tx.hash, tx); + let validation_result = (api.validate_transaction(self.at.hash, tx.source.clone().into(), tx.data.clone()).await, tx.hash, tx); validation_results.push(validation_result); } else { self.revalidation_worker_channels.lock().as_mut().map(|ch| ch.remove_sender()); @@ -324,7 +323,7 @@ where ValidatedTransaction::valid_at( self.at.number.saturated_into::(), tx_hash, - tx.source, + tx.source.clone(), tx.data.clone(), api.hash_and_length(&tx.data).1, validity, @@ -455,4 +454,10 @@ where ); } } + + /// Returns true if the transaction with given hash is already imported into the view. + pub(super) fn is_imported(&self, tx_hash: &ExtrinsicHash) -> bool { + const IGNORE_BANNED: bool = false; + self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err() + } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index f23dcedd5bfd..a06c051f0a7e 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -24,17 +24,51 @@ use super::{ }; use crate::{ fork_aware_txpool::dropped_watcher::MultiViewDroppedWatcherController, - graph, - graph::{base_pool::Transaction, ExtrinsicFor, ExtrinsicHash, TransactionFor}, + graph::{ + self, + base_pool::{TimedTransactionSource, Transaction}, + ExtrinsicFor, ExtrinsicHash, TransactionFor, + }, ReadyIteratorFor, LOG_TARGET, }; use futures::prelude::*; use itertools::Itertools; use parking_lot::RwLock; -use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus, TransactionSource}; +use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; use sp_blockchain::TreeRoute; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use std::{collections::HashMap, sync::Arc, time::Instant}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, + time::Instant, +}; + +/// Helper struct to keep the context for transaction replacements. +#[derive(Clone)] +struct PendingTxReplacement +where + ChainApi: graph::ChainApi, +{ + /// Indicates if the new transaction was already submitted to all the views in the view_store. + /// If true, it can be removed after inserting any new view. + processed: bool, + /// New transaction replacing the old one. + xt: ExtrinsicFor, + /// Source of the transaction. + source: TimedTransactionSource, + /// Inidicates if transaction is watched. + watched: bool, +} + +impl PendingTxReplacement +where + ChainApi: graph::ChainApi, +{ + /// Creates new unprocessed instance of pending transaction replacement. + fn new(xt: ExtrinsicFor, source: TimedTransactionSource, watched: bool) -> Self { + Self { processed: false, xt, source, watched } + } +} /// The helper structure encapsulates all the views. pub(super) struct ViewStore @@ -62,6 +96,13 @@ where pub(super) most_recent_view: RwLock>, /// The controller of multi view dropped stream. pub(super) dropped_stream_controller: MultiViewDroppedWatcherController, + /// The map used to synchronize replacement of transactions between maintain and dropped + /// notifcication threads. It is meant to assure that replaced transaction is also removed from + /// newly built views in maintain process. + /// + /// The map's key is hash of replaced extrinsic. + pending_txs_replacements: + RwLock, PendingTxReplacement>>, } impl ViewStore @@ -83,14 +124,14 @@ where listener, most_recent_view: RwLock::from(None), dropped_stream_controller, + pending_txs_replacements: Default::default(), } } /// Imports a bunch of unverified extrinsics to every active view. pub(super) async fn submit( &self, - source: TransactionSource, - xts: impl IntoIterator> + Clone, + xts: impl IntoIterator)> + Clone, ) -> HashMap, ChainApi::Error>>> { let submit_futures = { let active_views = self.active_views.read(); @@ -99,7 +140,7 @@ where .map(|(_, view)| { let view = view.clone(); let xts = xts.clone(); - async move { (view.at.hash, view.submit_many(source, xts).await) } + async move { (view.at.hash, view.submit_many(xts).await) } }) .collect::>() }; @@ -145,7 +186,7 @@ where pub(super) async fn submit_and_watch( &self, _at: Block::Hash, - source: TransactionSource, + source: TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ChainApi::Error> { let tx_hash = self.api.hash_and_length(&xt).0; @@ -159,6 +200,7 @@ where .map(|(_, view)| { let view = view.clone(); let xt = xt.clone(); + let source = source.clone(); async move { match view.submit_and_watch(source, xt).await { Ok(watcher) => { @@ -261,12 +303,20 @@ where ) -> Vec, ExtrinsicFor>> { self.most_recent_view .read() - .map(|at| self.get_view_at(at, true)) + .map(|at| self.futures_at(at)) .flatten() - .map(|(v, _)| v.pool.validated_pool().pool.read().futures().cloned().collect()) .unwrap_or_default() } + /// Returns a list of future transactions in the view at given block hash. + pub(super) fn futures_at( + &self, + at: Block::Hash, + ) -> Option, ExtrinsicFor>>> { + self.get_view_at(at, true) + .map(|(v, _)| v.pool.validated_pool().pool.read().futures().cloned().collect()) + } + /// Collects all the transactions included in the blocks on the provided `tree_route` and /// triggers finalization event for them. /// @@ -329,12 +379,16 @@ where /// - moved to the inactive views set (`inactive_views`), /// - removed from the multi view listeners. /// - /// The `most_recent_view` is update with the reference to the newly inserted view. + /// The `most_recent_view` is updated with the reference to the newly inserted view. + /// + /// If there are any pending tx replacments, they are applied to the new view. pub(super) async fn insert_new_view( &self, view: Arc>, tree_route: &TreeRoute, ) { + self.apply_pending_tx_replacements(view.clone()).await; + //note: most_recent_view must be synced with changes in in/active_views. { let mut most_recent_view_lock = self.most_recent_view.write(); @@ -386,8 +440,10 @@ where let mut removed_views = vec![]; { - self.active_views - .read() + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + + active_views .iter() .filter(|(hash, v)| !match finalized_number { Err(_) | Ok(None) => **hash == finalized_hash, @@ -396,11 +452,8 @@ where }) .map(|(_, v)| removed_views.push(v.at.hash)) .for_each(drop); - } - { - self.inactive_views - .read() + inactive_views .iter() .filter(|(_, v)| !match finalized_number { Err(_) | Ok(None) => false, @@ -438,30 +491,48 @@ where let finalized_xts = self.finalize_route(finalized_hash, tree_route).await; let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); + let mut dropped_views = vec![]; //clean up older then finalized { let mut active_views = self.active_views.write(); - active_views.retain(|hash, v| match finalized_number { - Err(_) | Ok(None) => *hash == finalized_hash, - Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, - Ok(Some(n)) => v.at.number > n, + let mut inactive_views = self.inactive_views.write(); + active_views.retain(|hash, v| { + let retain = match finalized_number { + Err(_) | Ok(None) => *hash == finalized_hash, + Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, + Ok(Some(n)) => v.at.number > n, + }; + if !retain { + dropped_views.push(*hash); + } + retain }); - } - { - let mut inactive_views = self.inactive_views.write(); - inactive_views.retain(|_, v| match finalized_number { - Err(_) | Ok(None) => false, - Ok(Some(n)) => v.at.number >= n, + inactive_views.retain(|hash, v| { + let retain = match finalized_number { + Err(_) | Ok(None) => false, + Ok(Some(n)) => v.at.number >= n, + }; + if !retain { + dropped_views.push(*hash); + } + retain }); log::trace!(target:LOG_TARGET,"handle_finalized: inactive_views: {:?}", inactive_views.keys()); } - self.listener.remove_view(finalized_hash); + log::trace!(target:LOG_TARGET,"handle_finalized: dropped_views: {:?}", dropped_views); + self.listener.remove_stale_controllers(); self.dropped_stream_controller.remove_finalized_txs(finalized_xts.clone()); + self.listener.remove_view(finalized_hash); + for view in dropped_views { + self.listener.remove_view(view); + self.dropped_stream_controller.remove_view(view); + } + finalized_xts } @@ -484,4 +555,139 @@ where futures::future::join_all(finish_revalidation_futures).await; log::trace!(target:LOG_TARGET,"finish_background_revalidations took {:?}", start.elapsed()); } + + /// Replaces an existing transaction in the view_store with a new one. + /// + /// Attempts to replace a transaction identified by `replaced` with a new transaction `xt`. + /// + /// Before submitting a transaction to the views, the new *unprocessed* transaction replacement + /// record will be inserted into a pending replacement map. Once the submission to all the views + /// is accomplished, the record is marked as *processed*. + /// + /// This map is later applied in `insert_new_view` method executed from different thread. + /// + /// If the transaction is already being replaced, it will simply return without making + /// changes. + pub(super) async fn replace_transaction( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + replaced: ExtrinsicHash, + watched: bool, + ) { + if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) { + entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched)); + } else { + return + }; + + let xt_hash = self.api.hash_and_length(&xt).0; + log::trace!(target:LOG_TARGET,"[{replaced:?}] replace_transaction wtih {xt_hash:?}, w:{watched}"); + + self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; + + if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) { + replacement.processed = true; + } + } + + /// Applies pending transaction replacements to the specified view. + /// + /// After application, all already processed replacements are removed. + async fn apply_pending_tx_replacements(&self, view: Arc>) { + let mut futures = vec![]; + for replacement in self.pending_txs_replacements.read().values() { + let xt_hash = self.api.hash_and_length(&replacement.xt).0; + futures.push(self.replace_transaction_in_view( + view.clone(), + replacement.source.clone(), + replacement.xt.clone(), + xt_hash, + replacement.watched, + )); + } + let _results = futures::future::join_all(futures).await; + self.pending_txs_replacements.write().retain(|_, r| r.processed); + } + + /// Submits `xt` to the given view. + /// + /// For watched transaction stream is added to the listener. + async fn replace_transaction_in_view( + &self, + view: Arc>, + source: TimedTransactionSource, + xt: ExtrinsicFor, + xt_hash: ExtrinsicHash, + watched: bool, + ) { + if watched { + match view.submit_and_watch(source, xt).await { + Ok(watcher) => { + self.listener.add_view_watcher_for_tx( + xt_hash, + view.at.hash, + watcher.into_stream().boxed(), + ); + }, + Err(e) => { + log::trace!( + target:LOG_TARGET, + "[{:?}] replace_transaction: submit_and_watch to {} failed {}", + xt_hash, view.at.hash, e + ); + }, + } + } else { + if let Some(Err(e)) = view.submit_many(std::iter::once((source, xt))).await.pop() { + log::trace!( + target:LOG_TARGET, + "[{:?}] replace_transaction: submit to {} failed {}", + xt_hash, view.at.hash, e + ); + } + } + } + + /// Sends `xt` to every view (both active and inactive) containing `replaced` extrinsics. + /// + /// It is assumed that transaction is already known by the pool. Intended to ba called when `xt` + /// is replacing `replaced` extrinsic. + async fn replace_transaction_in_views( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + xt_hash: ExtrinsicHash, + replaced: ExtrinsicHash, + watched: bool, + ) { + if watched && !self.listener.contains_tx(&xt_hash) { + log::trace!( + target:LOG_TARGET, + "error: replace_transaction_in_views: no listener for watched transaction {:?}", + xt_hash, + ); + return; + } + + let submit_futures = { + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + active_views + .iter() + .chain(inactive_views.iter()) + .filter(|(_, view)| view.is_imported(&replaced)) + .map(|(_, view)| { + self.replace_transaction_in_view( + view.clone(), + source.clone(), + xt.clone(), + xt_hash, + watched, + ) + }) + .collect::>() + }; + let _results = futures::future::join_all(submit_futures).await; + } } diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index e4c3a6c425a9..04eaa998f42e 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -20,7 +20,7 @@ //! //! For a more full-featured pool, have a look at the `pool` module. -use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc}; +use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc, time::Instant}; use crate::LOG_TARGET; use log::{trace, warn}; @@ -30,8 +30,8 @@ use sp_core::hexdisplay::HexDisplay; use sp_runtime::{ traits::Member, transaction_validity::{ - TransactionLongevity as Longevity, TransactionPriority as Priority, - TransactionSource as Source, TransactionTag as Tag, + TransactionLongevity as Longevity, TransactionPriority as Priority, TransactionSource, + TransactionTag as Tag, }, }; @@ -83,6 +83,44 @@ pub struct PruneStatus { pub pruned: Vec>>, } +/// A transaction source that includes a timestamp indicating when the transaction was submitted. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TimedTransactionSource { + /// The original source of the transaction. + pub source: TransactionSource, + + /// The time at which the transaction was submitted. + pub timestamp: Option, +} + +impl From for TransactionSource { + fn from(value: TimedTransactionSource) -> Self { + value.source + } +} + +impl TimedTransactionSource { + /// Creates a new instance with an internal `TransactionSource::InBlock` source and an optional + /// timestamp. + pub fn new_in_block(with_timestamp: bool) -> Self { + Self { source: TransactionSource::InBlock, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an internal `TransactionSource::External` source and an optional + /// timestamp. + pub fn new_external(with_timestamp: bool) -> Self { + Self { source: TransactionSource::External, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an internal `TransactionSource::Local` source and an optional + /// timestamp. + pub fn new_local(with_timestamp: bool) -> Self { + Self { source: TransactionSource::Local, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an given source and an optional timestamp. + pub fn from_transaction_source(source: TransactionSource, with_timestamp: bool) -> Self { + Self { source, timestamp: with_timestamp.then(Instant::now) } + } +} + /// Immutable transaction #[derive(PartialEq, Eq, Clone)] pub struct Transaction { @@ -102,8 +140,8 @@ pub struct Transaction { pub provides: Vec, /// Should that transaction be propagated. pub propagate: bool, - /// Source of that transaction. - pub source: Source, + /// Timed source of that transaction. + pub source: TimedTransactionSource, } impl AsRef for Transaction { @@ -157,7 +195,7 @@ impl Transaction { bytes: self.bytes, hash: self.hash.clone(), priority: self.priority, - source: self.source, + source: self.source.clone(), valid_till: self.valid_till, requires: self.requires.clone(), provides: self.provides.clone(), @@ -322,22 +360,36 @@ impl BasePool { if !first { - promoted.push(current_hash); + promoted.push(current_hash.clone()); } + // If there were conflicting future transactions promoted, removed them from + // promoted set. + promoted.retain(|hash| replaced.iter().all(|tx| *hash != tx.hash)); // The transactions were removed from the ready pool. We might attempt to // re-import them. removed.append(&mut replaced); }, + Err(e @ error::Error::TooLowPriority { .. }) => + if first { + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + return Err(e) + } else { + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + removed.push(current_tx); + promoted.retain(|hash| *hash != current_hash); + }, // transaction failed to be imported. Err(e) => if first { - trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_hash, e); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); return Err(e) } else { - failed.push(current_hash); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + failed.push(current_tx.hash.clone()); }, } first = false; @@ -434,8 +486,24 @@ impl BasePool Some(current.clone()), - Some(ref tx) if tx.imported_at > current.imported_at => Some(current.clone()), - other => other, + Some(worst) => Some( + match (worst.transaction.source.timestamp, current.transaction.source.timestamp) + { + (Some(worst_timestamp), Some(current_timestamp)) => { + if worst_timestamp > current_timestamp { + current.clone() + } else { + worst + } + }, + _ => + if worst.imported_at > current.imported_at { + current.clone() + } else { + worst + }, + }, + ), }); if let Some(worst) = worst { @@ -562,7 +630,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: Source::External, + source: TimedTransactionSource::new_external(false), } } @@ -760,6 +828,58 @@ mod tests { ); } + #[test] + fn should_remove_conflicting_future() { + let mut pool = pool(); + pool.import(Transaction { + data: vec![3u8].into(), + hash: 3, + requires: vec![vec![1]], + priority: 50u64, + provides: vec![vec![3]], + ..default_tx().clone() + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + let tx2 = Transaction { + data: vec![2u8].into(), + hash: 2, + requires: vec![vec![1]], + provides: vec![vec![3]], + ..default_tx().clone() + }; + pool.import(tx2.clone()).unwrap(); + assert_eq!(pool.future.len(), 2); + + let res = pool + .import(Transaction { + data: vec![1u8].into(), + hash: 1, + provides: vec![vec![1]], + ..default_tx().clone() + }) + .unwrap(); + + assert_eq!( + res, + Imported::Ready { + hash: 1, + promoted: vec![3], + failed: vec![], + removed: vec![tx2.into()] + } + ); + + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), None); + + assert_eq!(pool.future.len(), 0); + } + #[test] fn should_handle_a_cycle() { // given @@ -783,14 +903,14 @@ mod tests { assert_eq!(pool.ready.len(), 0); // when - pool.import(Transaction { + let tx2 = Transaction { data: vec![2u8].into(), hash: 2, requires: vec![vec![2]], provides: vec![vec![0]], ..default_tx().clone() - }) - .unwrap(); + }; + pool.import(tx2.clone()).unwrap(); // then { @@ -817,7 +937,12 @@ mod tests { assert_eq!(it.next(), None); assert_eq!( res, - Imported::Ready { hash: 4, promoted: vec![1, 3], failed: vec![2], removed: vec![] } + Imported::Ready { + hash: 4, + promoted: vec![1, 3], + failed: vec![], + removed: vec![tx2.into()] + } ); assert_eq!(pool.future.len(), 0); } @@ -1024,7 +1149,7 @@ mod tests { ), "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -source: TransactionSource::External, requires: [03, 02], provides: [04], data: [4]}" +source: TimedTransactionSource { source: TransactionSource::External, timestamp: None }, requires: [03, 02], provides: [04], data: [4]}" .to_owned() ); } diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index a5593920eec4..41daf5491f70 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -36,6 +36,7 @@ pub type DroppedByLimitsStream = TracingUnboundedReceiver { + /// Map containing per-transaction sinks for emitting transaction status events. watchers: HashMap>>, finality_watchers: LinkedHashMap, Vec>, @@ -119,32 +120,44 @@ impl Listener, limits_enforced: bool) { + /// Transaction was dropped from the pool because of enforcing the limit. + pub fn limit_enforced(&mut self, tx: &H) { + trace!(target: LOG_TARGET, "[{:?}] Dropped (limit enforced)", tx); + self.fire(tx, |watcher| watcher.limit_enforced()); + + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Dropped)) { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); + } + } + } + + /// Transaction was replaced with other extrinsic. + pub fn usurped(&mut self, tx: &H, by: &H) { trace!(target: LOG_TARGET, "[{:?}] Dropped (replaced with {:?})", tx, by); - self.fire(tx, |watcher| match by { - Some(t) => watcher.usurped(t.clone()), - None => watcher.dropped(), - }); - - //note: LimitEnforced could be introduced as new status to get rid of this flag. - if limits_enforced { - if let Some(ref sink) = self.dropped_by_limits_sink { - if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Dropped)) { - trace!(target: LOG_TARGET, "[{:?}] dropped_sink/future: send message failed: {:?}", tx, e); - } + self.fire(tx, |watcher| watcher.usurped(by.clone())); + + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = + sink.unbounded_send((tx.clone(), TransactionStatus::Usurped(by.clone()))) + { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); } } } + /// Transaction was dropped from the pool because of the failure during the resubmission of + /// revalidate transactions or failure during pruning tags. + pub fn dropped(&mut self, tx: &H) { + trace!(target: LOG_TARGET, "[{:?}] Dropped", tx); + self.fire(tx, |watcher| watcher.dropped()); + } + /// Transaction was removed as invalid. pub fn invalid(&mut self, tx: &H) { trace!(target: LOG_TARGET, "[{:?}] Extrinsic invalid", tx); diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 2dd8de352c6b..23b71ce437b3 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -181,10 +181,8 @@ impl Pool { pub async fn submit_at( &self, at: &HashAndNumber, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, B::Error>> { - let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -195,10 +193,8 @@ impl Pool { pub async fn resubmit_at( &self, at: &HashAndNumber, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, B::Error>> { - let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -207,10 +203,10 @@ impl Pool { pub async fn submit_one( &self, at: &HashAndNumber, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, B::Error> { - let res = self.submit_at(at, source, std::iter::once(xt)).await.pop(); + let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); res.expect("One extrinsic passed; one result returned; qed") } @@ -218,7 +214,7 @@ impl Pool { pub async fn submit_and_watch( &self, at: &HashAndNumber, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, B::Error> { let (_, tx) = self @@ -368,7 +364,7 @@ impl Pool { // Try to re-validate pruned transactions since some of them might be still valid. // note that `known_imported_hashes` will be rejected here due to temporary ban. let pruned_transactions = - prune_status.pruned.into_iter().map(|tx| (tx.source, tx.data.clone())); + prune_status.pruned.into_iter().map(|tx| (tx.source.clone(), tx.data.clone())); let reverified_transactions = self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await; @@ -396,7 +392,7 @@ impl Pool { async fn verify( &self, at: &HashAndNumber, - xts: impl IntoIterator)>, + xts: impl IntoIterator)>, check: CheckBannedBeforeVerify, ) -> IndexMap, ValidatedTransactionFor> { let HashAndNumber { number, hash } = *at; @@ -417,7 +413,7 @@ impl Pool { &self, block_hash: ::Hash, block_number: NumberFor, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, check: CheckBannedBeforeVerify, ) -> (ExtrinsicHash, ValidatedTransactionFor) { @@ -431,7 +427,7 @@ impl Pool { let validation_result = self .validated_pool .api() - .validate_transaction(block_hash, source, xt.clone()) + .validate_transaction(block_hash, source.clone().into(), xt.clone()) .await; let status = match validation_result { @@ -488,6 +484,7 @@ mod tests { use super::{super::base_pool::Limit, *}; use crate::common::tests::{pool, uxt, TestApi, INVALID_NONCE}; use assert_matches::assert_matches; + use base::TimedTransactionSource; use codec::Encode; use futures::executor::block_on; use parking_lot::Mutex; @@ -497,7 +494,8 @@ mod tests { use substrate_test_runtime::{AccountId, ExtrinsicBuilder, Transfer, H256}; use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; - const SOURCE: TransactionSource = TransactionSource::External; + const SOURCE: TimedTransactionSource = + TimedTransactionSource { source: TransactionSource::External, timestamp: None }; #[test] fn should_validate_and_import_transaction() { @@ -545,8 +543,8 @@ mod tests { let initial_hashes = txs.iter().map(|t| api.hash_and_length(t).0).collect::>(); // when - let txs = txs.into_iter().map(|x| Arc::from(x)).collect::>(); - let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), SOURCE, txs)); + let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); + let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)); log::debug!("--> {hashes:#?}"); // then diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 860bcff0bace..9061d0e25581 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -589,7 +589,6 @@ fn remove_item(vec: &mut Vec, item: &T) { #[cfg(test)] mod tests { use super::*; - use sp_runtime::transaction_validity::TransactionSource as Source; fn tx(id: u8) -> Transaction> { Transaction { @@ -601,7 +600,7 @@ mod tests { requires: vec![vec![1], vec![2]], provides: vec![vec![3], vec![4]], propagate: true, - source: Source::External, + source: crate::TimedTransactionSource::new_external(false), } } @@ -711,7 +710,7 @@ mod tests { requires: vec![tx1.provides[0].clone()], provides: vec![], propagate: true, - source: Source::External, + source: crate::TimedTransactionSource::new_external(false), }; // when diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs index 61a26fb4138c..9a2e269b5eed 100644 --- a/substrate/client/transaction-pool/src/graph/rotator.rs +++ b/substrate/client/transaction-pool/src/graph/rotator.rs @@ -106,7 +106,6 @@ impl PoolRotator { #[cfg(test)] mod tests { use super::*; - use sp_runtime::transaction_validity::TransactionSource; type Hash = u64; type Ex = (); @@ -126,7 +125,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TransactionSource::External, + source: crate::TimedTransactionSource::new_external(false), }; (hash, tx) @@ -192,7 +191,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TransactionSource::External, + source: crate::TimedTransactionSource::new_external(false), } } diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index d7f55198a40a..14df63d9673e 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -30,7 +30,7 @@ use serde::Serialize; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::{self, SaturatedConversion}, - transaction_validity::{TransactionSource, TransactionTag as Tag, ValidTransaction}, + transaction_validity::{TransactionTag as Tag, ValidTransaction}, }; use std::time::Instant; @@ -62,7 +62,7 @@ impl ValidatedTransaction { pub fn valid_at( at: u64, hash: Hash, - source: TransactionSource, + source: base::TimedTransactionSource, data: Ex, bytes: usize, validity: ValidTransaction, @@ -280,7 +280,7 @@ impl ValidatedPool { // run notifications let mut listener = self.listener.write(); for h in &removed { - listener.dropped(h, None, true); + listener.limit_enforced(h); } removed @@ -453,7 +453,7 @@ impl ValidatedPool { match final_status { Status::Future => listener.future(&hash), Status::Ready => listener.ready(&hash, None), - Status::Dropped => listener.dropped(&hash, None, false), + Status::Dropped => listener.dropped(&hash), Status::Failed => listener.invalid(&hash), } } @@ -492,7 +492,7 @@ impl ValidatedPool { fire_events(&mut *listener, promoted); } for f in &status.failed { - listener.dropped(f, None, false); + listener.dropped(f); } } @@ -671,6 +671,21 @@ impl ValidatedPool { ) -> super::listener::DroppedByLimitsStream, BlockHash> { self.listener.write().create_dropped_by_limits_stream() } + + /// Resends ready and future events for all the ready and future transactions that are already + /// in the pool. + /// + /// Intended to be called after cloning the instance of `ValidatedPool`. + pub fn retrigger_notifications(&self) { + let pool = self.pool.read(); + let mut listener = self.listener.write(); + pool.ready().for_each(|r| { + listener.ready(&r.hash, None); + }); + pool.futures().for_each(|f| { + listener.future(&f.hash); + }); + } } fn fire_events(listener: &mut Listener, imported: &base::Imported) @@ -682,7 +697,7 @@ where base::Imported::Ready { ref promoted, ref failed, ref removed, ref hash } => { listener.ready(hash, None); failed.iter().for_each(|f| listener.invalid(f)); - removed.iter().for_each(|r| listener.dropped(&r.hash, Some(hash), false)); + removed.iter().for_each(|r| listener.usurped(&r.hash, hash)); promoted.iter().for_each(|p| listener.ready(p, None)); }, base::Imported::Future { ref hash } => listener.future(hash), diff --git a/substrate/client/transaction-pool/src/graph/watcher.rs b/substrate/client/transaction-pool/src/graph/watcher.rs index fb7cf99d4dc6..2fd31e772fd8 100644 --- a/substrate/client/transaction-pool/src/graph/watcher.rs +++ b/substrate/client/transaction-pool/src/graph/watcher.rs @@ -113,6 +113,12 @@ impl Sender { } /// Transaction has been dropped from the pool because of the limit. + pub fn limit_enforced(&mut self) { + self.send(TransactionStatus::Dropped); + self.is_finalized = true; + } + + /// Transaction has been dropped from the pool. pub fn dropped(&mut self) { self.send(TransactionStatus::Dropped); self.is_finalized = true; diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index 3d3d596c291f..366d91a973d2 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -36,7 +36,10 @@ pub use api::FullChainApi; pub use builder::{Builder, TransactionPoolHandle, TransactionPoolOptions, TransactionPoolType}; pub use common::notification_future; pub use fork_aware_txpool::{ForkAwareTxPool, ForkAwareTxPoolTask}; -pub use graph::{base_pool::Limit as PoolLimit, ChainApi, Options, Pool}; +pub use graph::{ + base_pool::{Limit as PoolLimit, TimedTransactionSource}, + ChainApi, Options, Pool, +}; use single_state_txpool::prune_known_txs_for_block; pub use single_state_txpool::{BasicPool, RevalidationType}; pub use transaction_pool_wrapper::TransactionPoolWrapper; diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index 5ef726c9f7d3..74031b1e1c72 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -88,7 +88,7 @@ async fn batch_revalidate( let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(at, ext.source, ext.data.clone()) + api.validate_transaction(at, ext.source.clone().into(), ext.data.clone()) .map(move |validation_result| (validation_result, ext_hash, ext)) }) })) @@ -121,7 +121,7 @@ async fn batch_revalidate( ValidatedTransaction::valid_at( block_number.saturated_into::(), ext_hash, - ext.source, + ext.source.clone(), ext.data.clone(), api.hash_and_length(&ext.data).1, validity, @@ -375,9 +375,9 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, graph::Pool, + TimedTransactionSource, }; use futures::executor::block_on; - use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; @@ -398,7 +398,7 @@ mod tests { let uxt_hash = block_on(pool.submit_one( &han_of_block0, - TransactionSource::External, + TimedTransactionSource::new_external(false), uxt.clone().into(), )) .expect("Should be valid"); @@ -433,14 +433,15 @@ mod tests { let han_of_block0 = api.expect_hash_and_number(0); let unknown_block = H256::repeat_byte(0x13); - let uxt_hashes = block_on(pool.submit_at( - &han_of_block0, - TransactionSource::External, - vec![uxt0.into(), uxt1.into()], - )) - .into_iter() - .map(|r| r.expect("Should be valid")) - .collect::>(); + let source = TimedTransactionSource::new_external(false); + let uxt_hashes = + block_on(pool.submit_at( + &han_of_block0, + vec![(source.clone(), uxt0.into()), (source, uxt1.into())], + )) + .into_iter() + .map(|r| r.expect("Should be valid")) + .collect::>(); assert_eq!(api.validation_requests().len(), 2); assert_eq!(pool.validated_pool().status().ready, 2); diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index b29630b563bb..e7504012ca67 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -29,7 +29,7 @@ use crate::{ error, log_xt::log_xt_trace, }, - graph::{self, ExtrinsicHash, IsValidator}, + graph::{self, base_pool::TimedTransactionSource, ExtrinsicHash, IsValidator}, ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; @@ -254,14 +254,19 @@ where xts: Vec>, ) -> Result, Self::Error>>, Self::Error> { let pool = self.pool.clone(); - let xts = xts.into_iter().map(Arc::from).collect::>(); + let xts = xts + .into_iter() + .map(|xt| { + (TimedTransactionSource::from_transaction_source(source, false), Arc::from(xt)) + }) + .collect::>(); self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - Ok(pool.submit_at(&at, source, xts).await) + Ok(pool.submit_at(&at, xts).await) } async fn submit_one( @@ -277,7 +282,8 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - pool.submit_one(&at, source, xt).await + pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) + .await } async fn submit_and_watch( @@ -294,7 +300,13 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - let watcher = pool.submit_and_watch(&at, source, xt).await?; + let watcher = pool + .submit_and_watch( + &at, + TimedTransactionSource::from_transaction_source(source, false), + xt, + ) + .await?; Ok(watcher.into_stream().boxed()) } @@ -458,7 +470,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), hash, - TransactionSource::Local, + TimedTransactionSource::new_local(false), Arc::from(xt), bytes, validity, @@ -662,8 +674,8 @@ where resubmit_transactions.extend( //todo: arctx - we need to get ref from somewhere - block_transactions.into_iter().map(Arc::from).filter(|tx| { - let tx_hash = pool.hash_of(tx); + block_transactions.into_iter().map(Arc::from).filter_map(|tx| { + let tx_hash = pool.hash_of(&tx); let contains = pruned_log.contains(&tx_hash); // need to count all transactions, not just filtered, here @@ -676,8 +688,15 @@ where tx_hash, hash, ); + Some(( + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TimedTransactionSource::new_external(false), + tx, + )) + } else { + None } - !contains }), ); @@ -686,14 +705,7 @@ where }); } - pool.resubmit_at( - &hash_and_number, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await; + pool.resubmit_at(&hash_and_number, resubmit_transactions).await; } let extra_pool = pool.clone(); diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index 9f343a9bd029..c51ca6e17663 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -2267,19 +2267,13 @@ fn fatp_avoid_stuck_transaction() { assert_pool_status!(header06.hash(), &pool, 0, 0); - // Import enough blocks to make xt4i revalidated - let mut prev_header = header03; - // wait 10 blocks for revalidation - for n in 7..=11 { - let header = api.push_block(n, vec![], true); - let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); - block_on(pool.maintain(event)); - prev_header = header; - } + let header07 = api.push_block(7, vec![], true); + let event = finalized_block_event(&pool, header03.hash(), header07.hash()); + block_on(pool.maintain(event)); let xt4i_events = futures::executor::block_on_stream(xt4i_watcher).collect::>(); log::debug!("xt4i_events: {:#?}", xt4i_events); - assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Invalid]); + assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Dropped]); assert_eq!(pool.mempool_len(), (0, 0)); } diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index 15f2b7f79c14..aecd83360f1e 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -201,6 +201,20 @@ macro_rules! assert_ready_iterator { }}; } +#[macro_export] +macro_rules! assert_future_iterator { + ($hash:expr, $pool:expr, [$( $xt:expr ),*]) => {{ + let futures = $pool.futures_at($hash).unwrap(); + let expected = vec![ $($pool.api().hash_and_length(&$xt).0),*]; + log::debug!(target:LOG_TARGET, "expected: {:#?}", futures); + log::debug!(target:LOG_TARGET, "output: {:#?}", expected); + assert_eq!(expected.len(), futures.len()); + let hsf = futures.iter().map(|a| a.hash).collect::>(); + let hse = expected.into_iter().collect::>(); + assert_eq!(hse,hsf); + }}; +} + pub const SOURCE: TransactionSource = TransactionSource::External; #[cfg(test)] diff --git a/substrate/client/transaction-pool/tests/fatp_limits.rs b/substrate/client/transaction-pool/tests/fatp_limits.rs index 03792fd89dfa..afd8183957a8 100644 --- a/substrate/client/transaction-pool/tests/fatp_limits.rs +++ b/substrate/client/transaction-pool/tests/fatp_limits.rs @@ -641,3 +641,192 @@ fn fatp_limits_future_size_works() { assert_pool_status!(header01.hash(), &pool, 0, 3); assert_eq!(pool.mempool_len().0, 3); } + +#[test] +fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(6).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let _xt2_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let _xt3_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let _xt4_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let _xt5_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 6); + + let header04 = + api.push_block_with_parent(header03.hash(), vec![xt4.clone(), xt5.clone()], true); + api.set_nonce(header04.hash(), Alice.into(), 201); + api.set_nonce(header04.hash(), Bob.into(), 301); + api.set_nonce(header04.hash(), Charlie.into(), 401); + api.set_nonce(header04.hash(), Dave.into(), 501); + api.set_nonce(header04.hash(), Eve.into(), 601); + api.set_nonce(header04.hash(), Ferdie.into(), 701); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt3]); + assert_ready_iterator!(header03.hash(), pool, [xt4, xt5]); + assert_ready_iterator!(header04.hash(), pool, []); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); + assert!(!pool.status_all().contains_key(&header01.hash())); + + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); + assert!(!pool.status_all().contains_key(&header02.hash())); + + //view 01 was dropped + assert!(pool.ready_at(header01.hash()).now_or_never().is_none()); + assert_eq!(pool.mempool_len().1, 6); + + block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash()))); + + //no revalidation has happened yet, all txs are kept + assert_eq!(pool.mempool_len().1, 6); + + //view 03 is still there + assert!(!pool.status_all().contains_key(&header03.hash())); + + //view 02 was dropped + assert!(pool.ready_at(header02.hash()).now_or_never().is_none()); + + let mut prev_header = header03; + for n in 5..=11 { + let header = api.push_block(n, vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + //now revalidation has happened, all txs are dropped + assert_eq!(pool.mempool_len().1, 0); +} + +#[test] +fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(6).with_future_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Bob, 301); + let xt2 = uxt(Charlie, 401); + + let xt3 = uxt(Dave, 501); + let xt4 = uxt(Eve, 601); + let xt5 = uxt(Ferdie, 701); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 2); + assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 4); + assert_future_iterator!(header02.hash(), pool, [xt2, xt3]); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 6); + assert_future_iterator!(header03.hash(), pool, [xt4, xt5]); + + let header04 = api.push_block_with_parent(header03.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_pool_status!(header04.hash(), &pool, 0, 2); + assert_eq!(pool.futures().len(), 2); + assert_future_iterator!(header04.hash(), pool, [xt4, xt5]); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header04.hash()))); + assert_eq!(pool.active_views_count(), 1); + assert_eq!(pool.inactive_views_count(), 0); + //todo: can we do better? We don't have API to check if event was processed internally. + let mut counter = 0; + while pool.mempool_len().1 != 2 { + sleep(std::time::Duration::from_millis(1)); + counter = counter + 1; + if counter > 20 { + assert!(false, "timeout {}", pool.mempool_len().1); + } + } + assert_eq!(pool.mempool_len().1, 2); + assert_pool_status!(header04.hash(), &pool, 0, 2); + assert_eq!(pool.futures().len(), 2); + + let to_be_checked = vec![xt0_watcher, xt1_watcher, xt2_watcher, xt3_watcher]; + for x in to_be_checked { + let x_status = futures::executor::block_on_stream(x).take(2).collect::>(); + assert_eq!(x_status, vec![TransactionStatus::Future, TransactionStatus::Dropped]); + } + + let to_be_checked = vec![xt4_watcher, xt5_watcher]; + for x in to_be_checked { + let x_status = futures::executor::block_on_stream(x).take(1).collect::>(); + assert_eq!(x_status, vec![TransactionStatus::Future]); + } +} diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs new file mode 100644 index 000000000000..41bc374b38f4 --- /dev/null +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -0,0 +1,249 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests of priorities for fork-aware transaction pool. + +pub mod fatp_common; + +use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; +use futures::{executor::block_on, FutureExt}; +use sc_transaction_pool::ChainApi; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; +use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +#[test] +fn fatp_prio_ready_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let result0 = block_on(pool.submit_one(header01.hash(), SOURCE, xt0.clone())); + let result1 = block_on(pool.submit_one(header01.hash(), SOURCE, xt1.clone())); + + log::info!("r0 => {:?}", result0); + log::info!("r1 => {:?}", result1); + log::info!("len: {:?}", pool.mempool_len()); + log::info!("len: {:?}", pool.status_all()[&header01.hash()]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_pool_status!(header01.hash(), &pool, 1, 0); +} + +#[test] +fn fatp_prio_watcher_ready_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] + ); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + + log::info!("len: {:?}", pool.mempool_len()); + log::info!("len: {:?}", pool.status_all()[&header01.hash()]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_pool_status!(header01.hash(), &pool, 1, 0); +} + +#[test] +fn fatp_prio_watcher_future_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(3).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + + assert_eq!( + xt0_status, + vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] + ); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Future, TransactionStatus::Ready]); + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + + assert_eq!(pool.mempool_len().1, 2); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); + assert_pool_status!(header01.hash(), &pool, 2, 0); +} + +#[test] +fn fatp_prio_watcher_ready_lower_prio_gets_dropped_from_all_views() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let header03a = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); + + let header03b = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); + + assert_pool_status!(header03a.hash(), &pool, 1, 0); + assert_ready_iterator!(header03a.hash(), pool, [xt0]); + assert_pool_status!(header03b.hash(), &pool, 1, 0); + assert_ready_iterator!(header03b.hash(), pool, [xt0]); + assert_ready_iterator!(header01.hash(), pool, [xt0]); + assert_ready_iterator!(header02.hash(), pool, [xt0]); + + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] + ); + assert_ready_iterator!(header03a.hash(), pool, [xt1]); + assert_ready_iterator!(header03b.hash(), pool, [xt1]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt1]); +} + +#[test] +fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let header03a = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); + + let header03b = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); + + assert_pool_status!(header03a.hash(), &pool, 0, 2); + assert_future_iterator!(header03a.hash(), pool, [xt0, xt1]); + assert_pool_status!(header03b.hash(), &pool, 0, 2); + assert_future_iterator!(header03b.hash(), pool, [xt0, xt1]); + assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_future_iterator!(header02.hash(), pool, [xt0, xt1]); + + let xt2_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Future]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] + ); + assert_future_iterator!(header03a.hash(), pool, []); + assert_future_iterator!(header03b.hash(), pool, []); + assert_future_iterator!(header01.hash(), pool, []); + assert_future_iterator!(header02.hash(), pool, []); + + assert_ready_iterator!(header03a.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header03b.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]); +} diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index ed0fd7d4e655..e556ba9875f1 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -80,12 +80,14 @@ fn create_basic_pool(test_api: TestApi) -> BasicPool { create_basic_pool_with_genesis(Arc::from(test_api)).0 } +const TSOURCE: TimedTransactionSource = + TimedTransactionSource { source: TransactionSource::External, timestamp: None }; const SOURCE: TransactionSource = TransactionSource::External; #[test] fn submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); let pending: Vec<_> = pool @@ -99,9 +101,9 @@ fn submission_should_work() { #[test] fn multiple_submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool @@ -116,7 +118,7 @@ fn multiple_submission_should_work() { fn early_nonce_should_be_culled() { sp_tracing::try_init_simple(); let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 208).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 208).into())) .unwrap(); log::debug!("-> {:?}", pool.validated_pool().status()); @@ -132,7 +134,7 @@ fn early_nonce_should_be_culled() { fn late_nonce_should_be_queued() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -141,7 +143,7 @@ fn late_nonce_should_be_queued() { .collect(); assert_eq!(pending, Vec::::new()); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -155,9 +157,9 @@ fn late_nonce_should_be_queued() { fn prune_tags_should_work() { let (pool, api) = pool(); let hash209 = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool @@ -183,9 +185,9 @@ fn should_ban_invalid_transactions() { let (pool, api) = pool(); let uxt = Arc::from(uxt(Alice, 209)); let hash = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); // when let pending: Vec<_> = pool @@ -196,7 +198,7 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); } #[test] @@ -224,7 +226,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { })); let pool = Pool::new(Default::default(), true.into(), api.clone()); let xt0 = Arc::from(uxt(Alice, 209)); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt0.clone())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone())) .expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(api.validation_requests().len(), 1); @@ -242,7 +244,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.increment_nonce(Alice.into()); api.push_block(2, Vec::new(), true); let xt1 = uxt(Alice, 211); - block_on(pool.submit_one(&api.expect_hash_and_number(2), SOURCE, xt1.clone().into())) + block_on(pool.submit_one(&api.expect_hash_and_number(2), TSOURCE, xt1.clone().into())) .expect("2. Imported"); assert_eq!(api.validation_requests().len(), 3); assert_eq!(pool.validated_pool().status().ready, 1); From 8dfa3c5e1d21e980b97aa15f431f03655afc053b Mon Sep 17 00:00:00 2001 From: ron Date: Wed, 4 Dec 2024 11:17:52 +0800 Subject: [PATCH 56/68] Add force_create_agent --- bridges/snowbridge/pallets/system/src/lib.rs | 34 +++++++ .../src/tests/snowbridge_v2_outbound.rs | 95 +++++-------------- 2 files changed, 58 insertions(+), 71 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 24575a75b14c..c324459a8656 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -637,6 +637,40 @@ pub mod pallet { pays_fee: Pays::No, }) } + + #[pallet::call_index(11)] + #[pallet::weight(T::WeightInfo::create_agent())] + pub fn force_create_agent( + origin: OriginFor, + location: Box, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + + let location: Location = + (*location).try_into().map_err(|_| Error::::UnsupportedLocationVersion)?; + + let ethereum_location = T::EthereumLocation::get(); + let location = location + .clone() + .reanchored(ðereum_location, &T::UniversalLocation::get()) + .map_err(|_| Error::::LocationConversionFailed)?; + + let agent_id = agent_id_of::(&location)?; + + // Record the agent id or fail if it has already been created + ensure!(!Agents::::contains_key(agent_id), Error::::AgentAlreadyCreated); + Agents::::insert(agent_id, ()); + + let command = Command::CreateAgent { agent_id }; + let pays_fee = PaysFee::::No; + Self::send(SECONDARY_GOVERNANCE_CHANNEL, command, pays_fee)?; + + Self::deposit_event(Event::::CreateAgent { location: Box::new(location), agent_id }); + Ok(PostDispatchInfo { + actual_weight: Some(T::WeightInfo::register_token()), + pays_fee: Pays::No, + }) + } } impl Pallet { diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs index 21e752a981a2..658612ed2ddc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs @@ -18,7 +18,6 @@ use hex_literal::hex; use snowbridge_core::AssetMetadata; use snowbridge_outbound_primitives::TransactInfo; use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; -use sp_runtime::MultiAddress; use testnet_parachains_constants::westend::snowbridge::EthereumNetwork; use xcm::v5::AssetTransferFilter; use xcm_executor::traits::ConvertLocation; @@ -95,11 +94,6 @@ pub fn register_relay_token() { type RuntimeOrigin = ::RuntimeOrigin; // Register WND on BH - assert_ok!(::Balances::force_set_balance( - RuntimeOrigin::root(), - MultiAddress::Id(BridgeHubWestendSender::get()), - INITIAL_FUND, - )); assert_ok!(::EthereumSystem::register_token( RuntimeOrigin::root(), Box::new(VersionedLocation::from(Location::parent())), @@ -325,71 +319,30 @@ fn send_weth_and_dot_from_asset_hub_to_ethereum() { }); } -// #[test] -// fn create_agent() { -// fund_sovereign(); -// -// register_weth(); -// -// BridgeHubWestend::execute_with(|| {}); -// -// AssetHubWestend::execute_with(|| { -// type RuntimeOrigin = ::RuntimeOrigin; -// -// let local_fee_asset = -// Asset { id: AssetId(Location::parent()), fun: Fungible(LOCAL_FEE_AMOUNT_IN_DOT) }; -// -// // All WETH as fee and reserve_asset is zero, so there is no transfer in this case -// let remote_fee_asset = Asset { id: AssetId(weth_location()), fun: Fungible(TOKEN_AMOUNT) }; -// let reserve_asset = Asset { id: AssetId(weth_location()), fun: Fungible(0) }; -// -// let assets = vec![ -// Asset { id: weth_location().into(), fun: Fungible(TOKEN_AMOUNT) }, -// local_fee_asset.clone(), -// ]; -// -// let transact_info = TransactInfo { kind: TransactKind::RegisterAgent, params: vec![] }; -// -// let xcms = VersionedXcm::from(Xcm(vec![ -// WithdrawAsset(assets.clone().into()), -// PayFees { asset: local_fee_asset.clone() }, -// InitiateTransfer { -// destination: destination(), -// remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( -// remote_fee_asset.clone().into(), -// ))), -// preserve_origin: true, -// assets: vec![AssetTransferFilter::ReserveWithdraw(Definite( -// reserve_asset.clone().into(), -// ))], -// remote_xcm: Xcm(vec![ -// DepositAsset { assets: Wild(AllCounted(2)), beneficiary: beneficiary() }, -// Transact { -// origin_kind: OriginKind::SovereignAccount, -// call: transact_info.encode().into(), -// }, -// ]), -// }, -// ])); -// -// // Send the Weth back to Ethereum -// ::PolkadotXcm::execute( -// RuntimeOrigin::signed(AssetHubWestendReceiver::get()), -// bx!(xcms), -// Weight::from(8_000_000_000), -// ) -// .unwrap(); -// }); -// -// BridgeHubWestend::execute_with(|| { -// type RuntimeEvent = ::RuntimeEvent; -// // Check that Ethereum message was queue in the Outbound Queue -// assert_expected_events!( -// BridgeHubWestend, -// vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageAccepted{ .. }) => {},] -// ); -// }); -// } +#[test] +fn create_agent() { + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + let location = Location::new( + 1, + [ + Parachain(AssetHubWestend::para_id().into()), + AccountId32 { network: None, id: AssetHubWestendSender::get().into() }, + ], + ); + + ::EthereumSystem::force_create_agent( + RuntimeOrigin::root(), + bx!(location.into()), + ); + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::CreateAgent{ .. }) => {},] + ); + }); +} #[test] fn transact_with_agent() { From a2ffae303e8b6c52cff720fb2f3e2019d65d91b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 4 Dec 2024 10:06:57 +0100 Subject: [PATCH 57/68] umbrella: Remove `pallet-revive-fixtures` (#6743) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to have them in the umbrella crate also by having them in the umbrella crate they are bleeding into the normal build. --------- Co-authored-by: GitHub Action Co-authored-by: Alexander Theißen --- Cargo.lock | 1 - prdoc/pr_6743.prdoc | 10 ++++++++++ substrate/frame/revive/fixtures/Cargo.toml | 3 +++ umbrella/Cargo.toml | 8 +------- umbrella/src/lib.rs | 4 ---- 5 files changed, 14 insertions(+), 12 deletions(-) create mode 100644 prdoc/pr_6743.prdoc diff --git a/Cargo.lock b/Cargo.lock index bc2ebb2a057d..863822f4ffd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18577,7 +18577,6 @@ dependencies = [ "pallet-remark 28.0.0", "pallet-revive 0.1.0", "pallet-revive-eth-rpc", - "pallet-revive-fixtures 0.1.0", "pallet-revive-mock-network 0.1.0", "pallet-revive-proc-macro 0.1.0", "pallet-revive-uapi 0.1.0", diff --git a/prdoc/pr_6743.prdoc b/prdoc/pr_6743.prdoc new file mode 100644 index 000000000000..4c35ff46ca67 --- /dev/null +++ b/prdoc/pr_6743.prdoc @@ -0,0 +1,10 @@ +title: 'umbrella: Remove `pallet-revive-fixtures`' +doc: +- audience: Runtime Dev + description: |- + No need to have them in the umbrella crate also by having them in the umbrella crate they are bleeding into the normal build. +crates: +- name: pallet-revive-fixtures + bump: major +- name: polkadot-sdk + bump: major diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 9fd434db6179..88921cca08ec 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -8,6 +8,9 @@ description = "Fixtures for testing and benchmarking" homepage.workspace = true repository.workspace = true +[package.metadata.polkadot-sdk] +exclude-from-umbrella = true + [lints] workspace = true diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 9affcffd2ade..8ed9c3dcb02c 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -120,7 +120,6 @@ std = [ "pallet-recovery?/std", "pallet-referenda?/std", "pallet-remark?/std", - "pallet-revive-fixtures?/std", "pallet-revive-mock-network?/std", "pallet-revive?/std", "pallet-root-offences?/std", @@ -541,7 +540,7 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] -runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-fixtures", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] +runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] runtime = [ "frame-benchmarking", "frame-benchmarking-pallet-pov", @@ -1193,11 +1192,6 @@ path = "../substrate/frame/revive" default-features = false optional = true -[dependencies.pallet-revive-fixtures] -path = "../substrate/frame/revive/fixtures" -default-features = false -optional = true - [dependencies.pallet-revive-proc-macro] path = "../substrate/frame/revive/proc-macro" default-features = false diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 2216864fad0f..3712fb3343cf 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -584,10 +584,6 @@ pub use pallet_revive; #[cfg(feature = "pallet-revive-eth-rpc")] pub use pallet_revive_eth_rpc; -/// Fixtures for testing and benchmarking. -#[cfg(feature = "pallet-revive-fixtures")] -pub use pallet_revive_fixtures; - /// A mock network for testing pallet-revive. #[cfg(feature = "pallet-revive-mock-network")] pub use pallet_revive_mock_network; From 377bc3f830a63e277dd94fed490670d1be6d840f Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 4 Dec 2024 11:06:55 +0100 Subject: [PATCH 58/68] [Release|CI/CD] Add pipeline to promote release candidate from rcX to final in S3 (#6748) This PR adds the pipeline, that moves release candidate artefacts from `polkadot-stableYYMM-rcX` bucket to the one that is going to be the final `polkadot-stableYYMM` (bucket name matches the tag name). So that it could be used for publishing later without a need to re-build it again. --- .github/scripts/common/lib.sh | 2 +- .github/scripts/release/release_lib.sh | 44 +++++- .../release-31_promote-rc-to-final.yml | 125 ++++++++++++++++++ .../release-reusable-promote-to-final.yml | 83 ++++++++++++ .../workflows/release-reusable-rc-buid.yml | 4 +- .../workflows/release-reusable-s3-upload.yml | 14 +- 6 files changed, 253 insertions(+), 19 deletions(-) create mode 100644 .github/workflows/release-31_promote-rc-to-final.yml create mode 100644 .github/workflows/release-reusable-promote-to-final.yml diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 41dc0ba06dd2..00f8c089831e 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -297,7 +297,7 @@ fetch_release_artifacts_from_s3() { pwd ls -al --color popd > /dev/null - + unset OUTPUT_DIR } # Pass the name of the binary as input, it will diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index 43227180cb7c..984709f2ea03 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -129,15 +129,17 @@ upload_s3_release() { echo "Working on version: $version " echo "Working on platform: $target " + URL_BASE=$(get_s3_url_base $product) + echo "Current content, should be empty on new uploads:" - aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize || true + aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize || true echo "Content to be uploaded:" - artifacts="artifacts/$product/" + artifacts="release-artifacts/$target/$product/" ls "$artifacts" - aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/${product}/${version}/${target}" + aws s3 sync --acl public-read "$artifacts" "s3://${URL_BASE}/${version}/${target}" echo "Uploaded files:" - aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize - echo "✅ The release should be at https://releases.parity.io/${product}/${version}/${target}" + aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize + echo "✅ The release should be at https://${URL_BASE}/${version}/${target}" } # Upload runtimes artifacts to s3 release bucket @@ -161,3 +163,35 @@ upload_s3_runtimes_release_artifacts() { aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize echo "✅ The release should be at https://releases.parity.io/polkadot/runtimes/${version}" } + + +# Pass the name of the binary as input, it will +# return the s3 base url +function get_s3_url_base() { + name=$1 + case $name in + polkadot | polkadot-execute-worker | polkadot-prepare-worker ) + printf "releases.parity.io/polkadot" + ;; + + polkadot-parachain) + printf "releases.parity.io/polkadot-parachain" + ;; + + polkadot-omni-node) + printf "releases.parity.io/polkadot-omni-node" + ;; + + chain-spec-builder) + printf "releases.parity.io/chain-spec-builder" + ;; + + frame-omni-bencher) + printf "releases.parity.io/frame-omni-bencher" + ;; + *) + printf "UNSUPPORTED BINARY $name" + exit 1 + ;; + esac +} diff --git a/.github/workflows/release-31_promote-rc-to-final.yml b/.github/workflows/release-31_promote-rc-to-final.yml new file mode 100644 index 000000000000..6aa9d4bddd1d --- /dev/null +++ b/.github/workflows/release-31_promote-rc-to-final.yml @@ -0,0 +1,125 @@ +name: Release - Promote RC to final candidate on S3 + +on: + workflow_dispatch: + inputs: + binary: + description: Binary to be build for the release + default: all + type: choice + options: + - polkadot + - polkadot-parachain + - polkadot-omni-node + - frame-omni-bencher + - chain-spec-builder + - all + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX + type: string + + +jobs: + + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [ check-synchronization ] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + final_tag: ${{ steps.validate_inputs.outputs.final_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + promote-polkadot-rc-to-final: + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-polkadot-parachain-rc-to-final: + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot-parachain + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-polkadot-omni-node-rc-to-final: + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot-omni-node + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-frame-omni-bencher-rc-to-final: + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: frame-omni-bencher + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-chain-spec-builder-rc-to-final: + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-reusable-promote-to-final.yml b/.github/workflows/release-reusable-promote-to-final.yml new file mode 100644 index 000000000000..ed4a80a01e82 --- /dev/null +++ b/.github/workflows/release-reusable-promote-to-final.yml @@ -0,0 +1,83 @@ +name: Promote rc to final + +on: + workflow_call: + inputs: + package: + description: Package to be promoted + required: true + type: string + + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX taht will be changed to final in form of polkadot-stableYYMM(-X) + required: true + type: string + + target: + description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) + required: true + type: string + + secrets: + AWS_DEFAULT_REGION: + required: true + AWS_RELEASE_ACCESS_KEY_ID: + required: true + AWS_RELEASE_SECRET_ACCESS_KEY: + required: true + +jobs: + + promote-release-artifacts: + environment: release + runs-on: ubuntu-latest + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + AWS_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Prepare final tag + id: prepare_final_tag + shell: bash + run: | + tag="$(echo ${{ inputs.release_tag }} | sed 's/-rc[0-9]*$//')" + echo $tag + echo "FINAL_TAG=${tag}" >> $GITHUB_OUTPUT + + - name: Fetch binaries from s3 based on version + run: | + . ./.github/scripts/common/lib.sh + + VERSION="${{ inputs.release_tag }}" + if [[ ${{ inputs.package }} == 'polkadot' ]]; then + packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for package in "${packages[@]}"; do + fetch_release_artifacts_from_s3 $package ${{ inputs.target }} + done + else + fetch_release_artifacts_from_s3 ${{ inputs.package }} ${{ inputs.target }} + fi + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ env.AWS_REGION }} + + - name: Upload ${{ inputs.package }} ${{ inputs.target }} artifacts to s3 + run: | + . ./.github/scripts/release/release_lib.sh + + if [[ ${{ inputs.package }} == 'polkadot' ]]; then + packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for package in "${packages[@]}"; do + upload_s3_release $package ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} + done + else + upload_s3_release ${{ inputs.package }} ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} + fi diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index dc1b4553eb9b..0222b2aa91e2 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -133,7 +133,7 @@ jobs: - name: Upload ${{ matrix.binaries }} artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ matrix.binaries }} + name: ${{ matrix.binaries }}_${{ inputs.target }} path: /artifacts/${{ matrix.binaries }} build-macos-rc: @@ -285,7 +285,7 @@ jobs: - name: Upload ${{inputs.package }} artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ inputs.package }} + name: ${{ inputs.package }}_${{ inputs.target }} path: target/production overwrite: true diff --git a/.github/workflows/release-reusable-s3-upload.yml b/.github/workflows/release-reusable-s3-upload.yml index f85466bc8c07..48c7e53c6c8f 100644 --- a/.github/workflows/release-reusable-s3-upload.yml +++ b/.github/workflows/release-reusable-s3-upload.yml @@ -9,7 +9,7 @@ on: type: string release_tag: - description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM-rcX + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM-rcX required: true type: string @@ -40,18 +40,10 @@ jobs: uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Download amd64 artifacts - if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: ${{ inputs.package }} - path: artifacts/${{ inputs.package }} - - - name: Download arm artifacts - if: ${{ inputs.target == 'aarch64-apple-darwin' }} - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - with: - name: ${{ inputs.package }}_aarch64-apple-darwin - path: artifacts/${{ inputs.package }} + name: ${{ inputs.package }}_${{ inputs.target }} + path: release-artifacts/${{ inputs.target }}/${{ inputs.package }} - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 From 34632ed68272ce2c250cb085c5ab9e53f0a2ced6 Mon Sep 17 00:00:00 2001 From: Javier Viola <363911+pepoviola@users.noreply.github.com> Date: Wed, 4 Dec 2024 13:29:13 +0100 Subject: [PATCH 59/68] Disable flaky tests reported in #6574/#6644 (#6749) Reference issues #6574 #6644 --- .gitlab/pipeline/zombienet/polkadot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 3dab49a118e5..ac4bdac7ad15 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -179,7 +179,7 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: --local-dir="${LOCAL_DIR}/elastic_scaling" --test="0001-basic-3cores-6s-blocks.zndsl" -zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: +.zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: extends: - .zombienet-polkadot-common before_script: @@ -233,7 +233,7 @@ zombienet-polkadot-functional-0015-coretime-shared-core: --local-dir="${LOCAL_DIR}/functional" --test="0016-approval-voting-parallel.zndsl" -zombienet-polkadot-functional-0017-sync-backing: +.zombienet-polkadot-functional-0017-sync-backing: extends: - .zombienet-polkadot-common script: From 5ca726750da563c46449f9aa915296e6c6967e61 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 4 Dec 2024 14:47:47 +0100 Subject: [PATCH 60/68] chore: Update litep2p to v0.8.3 (#6742) ## [0.8.3] - 2024-12-03 This release includes two fixes for small memory leaks on edge-cases in the notification and request-response protocols. ### Fixed - req-resp: Fix memory leak of pending substreams ([#297](https://github.com/paritytech/litep2p/pull/297)) - notification: Fix memory leak of pending substreams ([#296](https://github.com/paritytech/litep2p/pull/296)) cc @paritytech/networking --------- Signed-off-by: Alexandru Vasile --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- prdoc/pr_6742.prdoc | 11 +++++++++++ substrate/client/network/src/litep2p/mod.rs | 2 +- 4 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_6742.prdoc diff --git a/Cargo.lock b/Cargo.lock index 863822f4ffd5..eee12dc5bc40 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10225,9 +10225,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "569e7dbec8a0d4b08d30f4942cd579cfe8db5d3f83f8604abe61697c38d17e73" +checksum = "14e490b5a6d486711fd0284bd30e607a287343f2935a59a9192bd7109e85f443" dependencies = [ "async-trait", "bs58", diff --git a/Cargo.toml b/Cargo.toml index ecc385504181..49fdc198fe33 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -848,7 +848,7 @@ linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } linregress = { version = "0.5.1" } lite-json = { version = "0.2.0", default-features = false } -litep2p = { version = "0.8.2", features = ["websocket"] } +litep2p = { version = "0.8.3", features = ["websocket"] } log = { version = "0.4.22", default-features = false } macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } diff --git a/prdoc/pr_6742.prdoc b/prdoc/pr_6742.prdoc new file mode 100644 index 000000000000..92c3755a3c28 --- /dev/null +++ b/prdoc/pr_6742.prdoc @@ -0,0 +1,11 @@ +title: Update litep2p backend to v0.8.3 +doc: +- audience: Node Dev + description: |- + This release includes two fixes for small memory leaks on edge-cases in the notification and request-response protocols. + While at it, have downgraded a log message from litep2p. + +crates: +- name: sc-network + bump: patch + diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 6d3575fc2b6b..b6d64b34d64a 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -753,7 +753,7 @@ impl NetworkBackend for Litep2pNetworkBac } if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) == 0usize { - log::warn!( + log::debug!( target: LOG_TARGET, "couldn't add known address ({address}) for {peer:?}, unsupported transport" ); From 2779043b0f667b75062cdc085e8052190b78cb20 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 4 Dec 2024 17:43:51 +0100 Subject: [PATCH 61/68] [CI/CD] Fix permissions issue in the backport to stable flow (#6754) This PR has changes to the `command-backport.yml`: - swapped action that creates backports PRs from master to the stable branches and added another app with more permissions --- .github/workflows/command-backport.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/command-backport.yml b/.github/workflows/command-backport.yml index eecf0ac72d2c..53dcea2f1d6d 100644 --- a/.github/workflows/command-backport.yml +++ b/.github/workflows/command-backport.yml @@ -29,12 +29,13 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Generate token - id: generate_token - uses: tibdex/github-app-token@v2.1.0 + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 with: - app_id: ${{ secrets.CMD_BOT_APP_ID }} - private_key: ${{ secrets.CMD_BOT_APP_KEY }} + app-id: ${{ vars.RELEASE_AUTOMATION_APP_ID }} + private-key: ${{ secrets.RELEASE_AUTOMATION_APP_PRIVATE_KEY }} + owner: paritytech - name: Create backport pull requests uses: korthout/backport-action@v3 @@ -42,7 +43,7 @@ jobs: with: target_branches: stable2407 stable2409 stable2412 merge_commits: skip - github_token: ${{ steps.generate_token.outputs.token }} + github_token: ${{ steps.generate_write_token.outputs.token }} pull_description: | Backport #${pull_number} into `${target_branch}` from ${pull_author}. @@ -86,7 +87,7 @@ jobs: const reviewer = '${{ github.event.pull_request.user.login }}'; for (const pullNumber of pullNumbers) { - await github.pulls.createReviewRequest({ + await github.pulls.requestReviewers({ owner: context.repo.owner, repo: context.repo.repo, pull_number: parseInt(pullNumber), From f540b5265d3ef62fa58ba75a5d6c7e10ed3d7fec Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 5 Dec 2024 01:37:44 +0800 Subject: [PATCH 62/68] Smoke test for multiple-hop --- .../outbound-router/src/v2/convert.rs | 4 +- .../src/tests/snowbridge.rs | 2 +- .../src/tests/snowbridge_v2_outbound.rs | 357 ++++++++++++++++-- 3 files changed, 331 insertions(+), 32 deletions(-) diff --git a/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs b/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs index 25ecdcee3bc6..96eeea06bc8b 100644 --- a/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs +++ b/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs @@ -5,7 +5,7 @@ use codec::DecodeAll; use core::slice::Iter; use frame_support::{ensure, traits::Get, BoundedVec}; -use snowbridge_core::{TokenId, TokenIdOf, TokenIdOf as LocationIdOf}; +use snowbridge_core::{AgentIdOf, TokenId, TokenIdOf}; use snowbridge_outbound_primitives::{ v2::{Command, Message}, TransactInfo, @@ -148,7 +148,7 @@ where // Check AliasOrigin. let origin_location = match_expression!(self.next()?, AliasOrigin(origin), origin) .ok_or(AliasOriginExpected)?; - let origin = LocationIdOf::convert_location(origin_location).ok_or(InvalidOrigin)?; + let origin = AgentIdOf::convert_location(origin_location).ok_or(InvalidOrigin)?; let (deposit_assets, beneficiary) = match_expression!( self.next()?, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index bee5665d56ce..206005c9890c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -31,7 +31,7 @@ use xcm_executor::traits::ConvertLocation; const INITIAL_FUND: u128 = 5_000_000_000_000; pub const CHAIN_ID: u64 = 11155111; -pub const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); +pub const WETH: [u8; 20] = hex!("c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); const XCM_FEE: u128 = 100_000_000_000; const TOKEN_AMOUNT: u128 = 100_000_000_000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs index 658612ed2ddc..3be500e98953 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs @@ -12,9 +12,23 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -use crate::imports::*; +use crate::{ + create_pool_with_native_on, + imports::*, + tests::snowbridge::{CHAIN_ID, WETH}, +}; +use emulated_integration_tests_common::PenpalBTeleportableAssetLocation; use frame_support::traits::fungibles::Mutate; use hex_literal::hex; +use rococo_westend_system_emulated_network::{ + bridge_hub_rococo_emulated_chain::genesis::ASSETHUB_PARA_ID, + penpal_emulated_chain::{ + penpal_runtime::xcm_config::{ + CheckingAccount, LocalTeleportableToAssetHub, TELEPORTABLE_ASSET_ID, + }, + PenpalAssetOwner, + }, +}; use snowbridge_core::AssetMetadata; use snowbridge_outbound_primitives::TransactInfo; use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; @@ -23,13 +37,11 @@ use xcm::v5::AssetTransferFilter; use xcm_executor::traits::ConvertLocation; const INITIAL_FUND: u128 = 50_000_000_000_000; -pub const CHAIN_ID: u64 = 11155111; -pub const WETH: [u8; 20] = hex!("c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); const AGENT_ADDRESS: [u8; 20] = hex!("90A987B944Cb1dCcE5564e5FDeCD7a54D3de27Fe"); -const TOKEN_AMOUNT: u128 = 100_000_000_000; -const REMOTE_FEE_AMOUNT_IN_WETH: u128 = 4_000_000_000; -const LOCAL_FEE_AMOUNT_IN_DOT: u128 = 200_000_000_000; +const TOKEN_AMOUNT: u128 = 1_000_000_000_000; +const REMOTE_FEE_AMOUNT_IN_WETH: u128 = 400_000_000_000; +const LOCAL_FEE_AMOUNT_IN_DOT: u128 = 800_000_000_000; const EXECUTION_WEIGHT: u64 = 8_000_000_000; @@ -43,7 +55,7 @@ pub fn weth_location() -> Location { ) } -pub fn destination() -> Location { +pub fn ethereum() -> Location { Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]) } @@ -51,23 +63,33 @@ pub fn beneficiary() -> Location { Location::new(0, [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }]) } -pub fn fund_sovereign() { +pub fn asset_hub() -> Location { + Location::new(1, Parachain(ASSETHUB_PARA_ID)) +} + +pub fn fund_on_bh() { let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); } -pub fn register_weth() { - let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); - let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); +pub fn register_weth_on_ah() { + let ethereum_sovereign: AccountId = + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&Location::new( + 2, + [GlobalConsensus(EthereumNetwork::get())], + )) + .unwrap() + .into(); + AssetHubWestend::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; assert_ok!(::ForeignAssets::force_create( RuntimeOrigin::root(), weth_location().try_into().unwrap(), - assethub_sovereign.clone().into(), - false, + ethereum_sovereign.clone().into(), + true, 1, )); @@ -110,11 +132,176 @@ pub fn register_relay_token() { }); } +pub fn register_weth_on_penpal() { + PenpalB::execute_with(|| { + let ethereum_sovereign: AccountId = + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&Location::new( + 2, + [GlobalConsensus(EthereumNetwork::get())], + )) + .unwrap() + .into(); + assert_ok!(::ForeignAssets::force_create( + ::RuntimeOrigin::root(), + weth_location().try_into().unwrap(), + ethereum_sovereign.into(), + true, + 1, + )); + assert_ok!(::ForeignAssets::mint_into( + weth_location().try_into().unwrap(), + &PenpalBReceiver::get(), + TOKEN_AMOUNT, + )); + assert_ok!(::ForeignAssets::mint_into( + weth_location().try_into().unwrap(), + &PenpalBSender::get(), + TOKEN_AMOUNT, + )); + }); +} + +pub fn register_pal_on_ah() { + // Create PAL(i.e. native asset for penpal) on AH. + AssetHubWestend::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + let penpal_asset_id = Location::new(1, Parachain(PenpalB::para_id().into())); + + assert_ok!(::ForeignAssets::force_create( + RuntimeOrigin::root(), + penpal_asset_id.clone(), + PenpalAssetOwner::get().into(), + false, + 1_000_000, + )); + + assert!(::ForeignAssets::asset_exists( + penpal_asset_id.clone(), + )); + + assert_ok!(::ForeignAssets::mint_into( + penpal_asset_id.clone(), + &AssetHubWestendReceiver::get(), + TOKEN_AMOUNT, + )); + + assert_ok!(::ForeignAssets::mint_into( + penpal_asset_id.clone(), + &AssetHubWestendSender::get(), + TOKEN_AMOUNT, + )); + }); +} + +pub fn fund_on_penpal() { + PenpalB::fund_accounts(vec![ + (PenpalBReceiver::get(), INITIAL_FUND), + (PenpalBSender::get(), INITIAL_FUND), + (CheckingAccount::get(), INITIAL_FUND), + ]); + PenpalB::execute_with(|| { + assert_ok!(::ForeignAssets::mint_into( + Location::parent(), + &PenpalBReceiver::get(), + TOKEN_AMOUNT, + )); + assert_ok!(::ForeignAssets::mint_into( + Location::parent(), + &PenpalBSender::get(), + TOKEN_AMOUNT, + )); + }); + PenpalB::execute_with(|| { + assert_ok!(::Assets::mint_into( + TELEPORTABLE_ASSET_ID, + &PenpalBReceiver::get(), + TOKEN_AMOUNT, + )); + assert_ok!(::Assets::mint_into( + TELEPORTABLE_ASSET_ID, + &PenpalBSender::get(), + TOKEN_AMOUNT, + )); + }); +} + +pub fn set_trust_reserve_on_penpal() { + PenpalB::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]).encode(), + )], + )); + }); +} + +pub fn fund_on_ah() { + let penpal_sovereign = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), + ); + + AssetHubWestend::execute_with(|| { + assert_ok!(::ForeignAssets::mint_into( + weth_location().try_into().unwrap(), + &penpal_sovereign, + TOKEN_AMOUNT, + )); + }); + + let ethereum_sovereign: AccountId = + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&Location::new( + 2, + [GlobalConsensus(EthereumNetwork::get())], + )) + .unwrap() + .into(); + AssetHubWestend::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); +} + +pub fn create_pools() { + // We create a pool between WND and WETH in AssetHub to support paying for fees with WETH. + let ethereum_sovereign: AccountId = + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&Location::new( + 2, + [GlobalConsensus(EthereumNetwork::get())], + )) + .unwrap() + .into(); + AssetHubWestend::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); + PenpalB::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); + create_pool_with_native_on!(AssetHubWestend, weth_location(), true, ethereum_sovereign.clone()); + // We also need a pool between WND and WETH on PenpalB to support paying for fees with WETH. + create_pool_with_native_on!(PenpalB, weth_location(), true, ethereum_sovereign.clone()); +} + +pub fn register_pal_on_bh() { + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + assert_ok!(::EthereumSystem::register_token( + RuntimeOrigin::root(), + Box::new(VersionedLocation::from(PenpalBTeleportableAssetLocation::get())), + AssetMetadata { + name: "pal".as_bytes().to_vec().try_into().unwrap(), + symbol: "pal".as_bytes().to_vec().try_into().unwrap(), + decimals: 12, + }, + )); + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::RegisterToken { .. }) => {},] + ); + }); +} + #[test] fn send_weth_from_asset_hub_to_ethereum() { - fund_sovereign(); + fund_on_bh(); - register_weth(); + register_weth_on_ah(); AssetHubWestend::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; @@ -139,7 +326,7 @@ fn send_weth_from_asset_hub_to_ethereum() { WithdrawAsset(assets.clone().into()), PayFees { asset: local_fee_asset.clone() }, InitiateTransfer { - destination: destination(), + destination: ethereum(), remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( remote_fee_asset.clone().into(), ))), @@ -176,13 +363,13 @@ fn send_weth_from_asset_hub_to_ethereum() { #[test] fn transfer_relay_token() { let ethereum_sovereign: AccountId = - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&destination()) + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(ðereum()) .unwrap() .into(); - fund_sovereign(); + fund_on_bh(); - register_weth(); + register_weth_on_ah(); register_relay_token(); @@ -208,7 +395,7 @@ fn transfer_relay_token() { WithdrawAsset(assets.clone().into()), PayFees { asset: local_fee_asset.clone() }, InitiateTransfer { - destination: destination(), + destination: ethereum(), remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( remote_fee_asset.clone().into(), ))), @@ -256,9 +443,9 @@ fn transfer_relay_token() { #[test] fn send_weth_and_dot_from_asset_hub_to_ethereum() { - fund_sovereign(); + fund_on_bh(); - register_weth(); + register_weth_on_ah(); register_relay_token(); @@ -285,7 +472,7 @@ fn send_weth_and_dot_from_asset_hub_to_ethereum() { WithdrawAsset(assets.clone().into()), PayFees { asset: local_fee_asset.clone() }, InitiateTransfer { - destination: destination(), + destination: ethereum(), remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( remote_fee_asset.clone().into(), ))), @@ -333,9 +520,11 @@ fn create_agent() { ], ); - ::EthereumSystem::force_create_agent( - RuntimeOrigin::root(), - bx!(location.into()), + assert_ok!( + ::EthereumSystem::force_create_agent( + RuntimeOrigin::root(), + bx!(location.into()), + ) ); assert_expected_events!( BridgeHubWestend, @@ -349,9 +538,9 @@ fn transact_with_agent() { let weth_asset_location: Location = (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); - fund_sovereign(); + fund_on_bh(); - register_weth(); + register_weth_on_ah(); BridgeHubWestend::execute_with(|| {}); @@ -390,7 +579,7 @@ fn transact_with_agent() { WithdrawAsset(assets.clone().into()), PayFees { asset: local_fee_asset.clone() }, InitiateTransfer { - destination: destination(), + destination: ethereum(), remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( remote_fee_asset.clone().into(), ))), @@ -425,3 +614,113 @@ fn transact_with_agent() { ); }); } + +#[test] +fn send_penpal_native_asset_to_ethereum() { + fund_on_bh(); + register_weth_on_ah(); + register_pal_on_ah(); + register_pal_on_bh(); + fund_on_ah(); + fund_on_penpal(); + register_weth_on_penpal(); + set_trust_reserve_on_penpal(); + create_pools(); + + PenpalB::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + + let local_fee_asset_on_penpal = + Asset { id: AssetId(Location::parent()), fun: Fungible(TOKEN_AMOUNT) }; + + let remote_fee_asset_on_ah = + Asset { id: AssetId(weth_location()), fun: Fungible(REMOTE_FEE_AMOUNT_IN_WETH) }; + + let remote_fee_asset_on_ethereum = + Asset { id: AssetId(weth_location()), fun: Fungible(REMOTE_FEE_AMOUNT_IN_WETH) }; + + let transfer_asset = + Asset { id: AssetId(LocalTeleportableToAssetHub::get()), fun: Fungible(TOKEN_AMOUNT) }; + + let transfer_asset_reanchor_on_ah = Asset { + id: AssetId(PenpalBTeleportableAssetLocation::get()), + fun: Fungible(TOKEN_AMOUNT), + }; + + let assets = vec![ + local_fee_asset_on_penpal.clone(), + remote_fee_asset_on_ah.clone(), + remote_fee_asset_on_ethereum.clone(), + transfer_asset.clone(), + ]; + + let transact_info = + TransactInfo { target: Default::default(), data: vec![], gas_limit: 40000, value: 0 }; + + let xcms = VersionedXcm::from(Xcm(vec![ + WithdrawAsset(assets.clone().into()), + PayFees { asset: local_fee_asset_on_penpal.clone() }, + InitiateTransfer { + destination: asset_hub(), + remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( + remote_fee_asset_on_ah.clone().into(), + ))), + preserve_origin: true, + assets: vec![ + AssetTransferFilter::ReserveWithdraw(Definite( + remote_fee_asset_on_ethereum.clone().into(), + )), + // Should use Teleport here because: + // a. Penpal is configured to allow teleport specific asset to AH + // b. AH is configured to trust asset teleport from sibling chain + AssetTransferFilter::Teleport(Definite(transfer_asset.clone().into())), + ], + remote_xcm: Xcm(vec![InitiateTransfer { + destination: ethereum(), + remote_fees: Some(AssetTransferFilter::ReserveWithdraw(Definite( + remote_fee_asset_on_ethereum.clone().into(), + ))), + preserve_origin: true, + // should use ReserveDeposit because Ethereum does not trust asset from penpal. + // transfer_asset should be reachored first on AH + assets: vec![AssetTransferFilter::ReserveDeposit(Definite( + transfer_asset_reanchor_on_ah.clone().into(), + ))], + remote_xcm: Xcm(vec![ + DepositAsset { assets: Wild(All), beneficiary: beneficiary() }, + Transact { + origin_kind: OriginKind::SovereignAccount, + call: transact_info.encode().into(), + }, + ]), + }]), + }, + ])); + + assert_ok!(::PolkadotXcm::execute( + RuntimeOrigin::signed(PenpalBSender::get()), + bx!(xcms), + Weight::from(EXECUTION_WEIGHT), + )); + }); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::SwapCreditExecuted { .. }) => {},] + ); + assert_expected_events!( + AssetHubWestend, + vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},] + ); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumOutboundQueueV2(snowbridge_pallet_outbound_queue_v2::Event::MessageQueued{ .. }) => {},] + ); + }); +} From 82117ad53fc68e8097183e759926b62265ffff0a Mon Sep 17 00:00:00 2001 From: Jarkko Sakkinen Date: Wed, 4 Dec 2024 18:55:33 +0100 Subject: [PATCH 63/68] wasm-builder: Use riscv32emac-unknown-none-polkavm.json target (#6419) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Closes #6335. ## Integration N/A ## Review Notes `RuntimeTarget` is converted to return path to the custom target JSON file --------- Signed-off-by: Jarkko Sakkinen Co-authored-by: Alexander Theißen Co-authored-by: Koute --- Cargo.lock | 12 +- Cargo.toml | 4 +- prdoc/pr_6419.prdoc | 12 ++ substrate/utils/wasm-builder/src/builder.rs | 3 +- substrate/utils/wasm-builder/src/lib.rs | 105 +++++++----------- .../utils/wasm-builder/src/prerequisites.rs | 7 +- .../utils/wasm-builder/src/wasm_project.rs | 28 +++-- 7 files changed, 87 insertions(+), 84 deletions(-) create mode 100644 prdoc/pr_6419.prdoc diff --git a/Cargo.lock b/Cargo.lock index eee12dc5bc40..dad578ba0c1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14752,7 +14752,7 @@ dependencies = [ "anyhow", "frame-system 28.0.0", "log", - "polkavm-linker 0.17.0", + "polkavm-linker 0.17.1", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", @@ -19936,9 +19936,9 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.17.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d359dc721d2cc9b555ebb3558c305112ddc5bdac09d26f95f2f7b49c1f2db7e9" +checksum = "0422ead3030d5cde69e2206dbc7d65da872b121876507cd5363f6c6e6aa45157" dependencies = [ "dirs", "gimli 0.31.1", @@ -26495,7 +26495,7 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", - "polkavm-derive 0.9.1", + "polkavm-derive 0.17.0", "rustversion", "secp256k1 0.28.2", "sp-core 28.0.0", @@ -26979,7 +26979,7 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.9.1", + "polkavm-derive 0.17.0", "primitive-types 0.13.1", "rustversion", "sp-core 28.0.0", @@ -28623,7 +28623,7 @@ dependencies = [ "merkleized-metadata", "parity-scale-codec", "parity-wasm", - "polkavm-linker 0.9.2", + "polkavm-linker 0.17.1", "sc-executor 0.32.0", "shlex", "sp-core 28.0.0", diff --git a/Cargo.toml b/Cargo.toml index 49fdc198fe33..383fc46c4e76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1090,8 +1090,8 @@ polkadot-test-client = { path = "polkadot/node/test/client" } polkadot-test-runtime = { path = "polkadot/runtime/test-runtime" } polkadot-test-service = { path = "polkadot/node/test/service" } polkavm = { version = "0.9.3", default-features = false } -polkavm-derive = "0.9.1" -polkavm-linker = "0.9.2" +polkavm-derive = "0.17.0" +polkavm-linker = "0.17.1" portpicker = { version = "0.1.1" } pretty_assertions = { version = "1.3.0" } primitive-types = { version = "0.13.1", default-features = false, features = [ diff --git a/prdoc/pr_6419.prdoc b/prdoc/pr_6419.prdoc new file mode 100644 index 000000000000..6cc155d64b91 --- /dev/null +++ b/prdoc/pr_6419.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use the custom target riscv32emac-unknown-none-polkavm +doc: + - audience: Runtime Dev + description: | + Closes: https://github.com/paritytech/polkadot-sdk/issues/6335 + +crates: +- name: substrate-wasm-builder + bump: patch diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index a40aafe1d812..5bdc743eac31 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -235,7 +235,8 @@ impl WasmBuilder { /// Build the WASM binary. pub fn build(mut self) { - let target = crate::runtime_target(); + let target = RuntimeTarget::new(); + if target == RuntimeTarget::Wasm { if self.export_heap_base { self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 420ecd63e1dc..ce90f492e08f 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -112,7 +112,6 @@ //! wasm32-unknown-unknown --toolchain nightly-2020-02-20`. use std::{ - collections::BTreeSet, env, fs, io::BufRead, path::{Path, PathBuf}, @@ -254,26 +253,22 @@ struct CargoCommand { program: String, args: Vec, version: Option, - target_list: Option>, } impl CargoCommand { fn new(program: &str) -> Self { let version = Self::extract_version(program, &[]); - let target_list = Self::extract_target_list(program, &[]); - CargoCommand { program: program.into(), args: Vec::new(), version, target_list } + CargoCommand { program: program.into(), args: Vec::new(), version } } fn new_with_args(program: &str, args: &[&str]) -> Self { let version = Self::extract_version(program, args); - let target_list = Self::extract_target_list(program, args); CargoCommand { program: program.into(), args: args.iter().map(ToString::to_string).collect(), version, - target_list, } } @@ -294,23 +289,6 @@ impl CargoCommand { Version::extract(&version) } - fn extract_target_list(program: &str, args: &[&str]) -> Option> { - // This is technically an unstable option, but we don't care because we only need this - // to build RISC-V runtimes, and those currently require a specific nightly toolchain - // anyway, so it's totally fine for this to fail in other cases. - let list = Command::new(program) - .args(args) - .args(&["rustc", "-Z", "unstable-options", "--print", "target-list"]) - // Make sure if we're called from within a `build.rs` the host toolchain won't override - // a rustup toolchain we've picked. - .env_remove("RUSTC") - .output() - .ok() - .and_then(|o| String::from_utf8(o.stdout).ok())?; - - Some(list.trim().split("\n").map(ToString::to_string).collect()) - } - /// Returns the version of this cargo command or `None` if it failed to extract the version. fn version(&self) -> Option { self.version @@ -326,19 +304,10 @@ impl CargoCommand { fn supports_substrate_runtime_env(&self, target: RuntimeTarget) -> bool { match target { RuntimeTarget::Wasm => self.supports_substrate_runtime_env_wasm(), - RuntimeTarget::Riscv => self.supports_substrate_runtime_env_riscv(), + RuntimeTarget::Riscv => true, } } - /// Check if the supplied cargo command supports our RISC-V runtime environment. - fn supports_substrate_runtime_env_riscv(&self) -> bool { - let Some(target_list) = self.target_list.as_ref() else { return false }; - // This is our custom target which currently doesn't exist on any upstream toolchain, - // so if it exists it's guaranteed to be our custom toolchain and have have everything - // we need, so any further version checks are unnecessary at this point. - target_list.contains("riscv32ema-unknown-none-elf") - } - /// Check if the supplied cargo command supports our Substrate wasm environment. /// /// This means that either the cargo version is at minimum 1.68.0 or this is a nightly cargo. @@ -409,13 +378,6 @@ fn get_bool_environment_variable(name: &str) -> Option { } } -/// Returns whether we need to also compile the standard library when compiling the runtime. -fn build_std_required() -> bool { - let default = runtime_target() == RuntimeTarget::Wasm; - - crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(default) -} - #[derive(Copy, Clone, PartialEq, Eq)] enum RuntimeTarget { Wasm, @@ -423,36 +385,55 @@ enum RuntimeTarget { } impl RuntimeTarget { - fn rustc_target(self) -> &'static str { + /// Creates a new instance. + fn new() -> Self { + let Some(value) = env::var_os(RUNTIME_TARGET) else { + return Self::Wasm; + }; + + if value == "wasm" { + Self::Wasm + } else if value == "riscv" { + Self::Riscv + } else { + build_helper::warning!( + "RUNTIME_TARGET environment variable must be set to either \"wasm\" or \"riscv\"" + ); + std::process::exit(1); + } + } + + /// Figures out the target parameter value for rustc. + fn rustc_target(self) -> String { match self { - RuntimeTarget::Wasm => "wasm32-unknown-unknown", - RuntimeTarget::Riscv => "riscv32ema-unknown-none-elf", + RuntimeTarget::Wasm => "wasm32-unknown-unknown".to_string(), + RuntimeTarget::Riscv => { + let path = polkavm_linker::target_json_32_path().expect("riscv not found"); + path.into_os_string().into_string().unwrap() + }, } } - fn build_subdirectory(self) -> &'static str { - // Keep the build directories separate so that when switching between - // the targets we won't trigger unnecessary rebuilds. + /// Figures out the target directory name used by cargo. + fn rustc_target_dir(self) -> &'static str { match self { - RuntimeTarget::Wasm => "wbuild", - RuntimeTarget::Riscv => "rbuild", + RuntimeTarget::Wasm => "wasm32-unknown-unknown", + RuntimeTarget::Riscv => "riscv32emac-unknown-none-polkavm", } } -} -fn runtime_target() -> RuntimeTarget { - let Some(value) = env::var_os(RUNTIME_TARGET) else { - return RuntimeTarget::Wasm; - }; + /// Figures out the build-std argument. + fn rustc_target_build_std(self) -> Option<&'static str> { + if !crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(true) { + return None; + } - if value == "wasm" { - RuntimeTarget::Wasm - } else if value == "riscv" { - RuntimeTarget::Riscv - } else { - build_helper::warning!( - "the '{RUNTIME_TARGET}' environment variable has an invalid value; it must be either 'wasm' or 'riscv'" - ); - std::process::exit(1); + // This is a nightly-only flag. + let arg = match self { + RuntimeTarget::Wasm => "build-std", + RuntimeTarget::Riscv => "build-std=core,alloc", + }; + + Some(arg) } } diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index 4de6b87f618d..9abfd1725237 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -196,11 +196,14 @@ fn check_wasm_toolchain_installed( error, colorize_aux_message(&"-".repeat(60)), )) - } + }; } let version = dummy_crate.get_rustc_version(); - if crate::build_std_required() { + + let target = RuntimeTarget::new(); + assert!(target == RuntimeTarget::Wasm); + if target.rustc_target_build_std().is_some() { if let Some(sysroot) = dummy_crate.get_sysroot() { let src_path = Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 26edd2ea1f22..6530e4c22fb9 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -109,6 +109,15 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { crate_metadata } +/// Keep the build directories separate so that when switching between the +/// targets we won't trigger unnecessary rebuilds. +fn build_subdirectory(target: RuntimeTarget) -> &'static str { + match target { + RuntimeTarget::Wasm => "wbuild", + RuntimeTarget::Riscv => "rbuild", + } +} + /// Creates the WASM project, compiles the WASM binary and compacts the WASM binary. /// /// # Returns @@ -125,7 +134,7 @@ pub(crate) fn create_and_compile( #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) -> (Option, WasmBinaryBloaty) { let runtime_workspace_root = get_wasm_workspace_root(); - let runtime_workspace = runtime_workspace_root.join(target.build_subdirectory()); + let runtime_workspace = runtime_workspace_root.join(build_subdirectory(target)); let crate_metadata = crate_metadata(orig_project_cargo_toml); @@ -770,7 +779,7 @@ impl BuildConfiguration { .collect::>() .iter() .rev() - .take_while(|c| c.as_os_str() != target.build_subdirectory()) + .take_while(|c| c.as_os_str() != build_subdirectory(target)) .last() .expect("We put the runtime project within a `target/.../[rw]build` path; qed") .as_os_str() @@ -841,9 +850,7 @@ fn build_bloaty_blob( "-C target-cpu=mvp -C target-feature=-sign-ext -C link-arg=--export-table ", ); }, - RuntimeTarget::Riscv => { - rustflags.push_str("-C target-feature=+lui-addi-fusion -C relocation-model=pie -C link-arg=--emit-relocs -C link-arg=--unique "); - }, + RuntimeTarget::Riscv => (), } rustflags.push_str(default_rustflags); @@ -907,10 +914,9 @@ fn build_bloaty_blob( // // So here we force the compiler to also compile the standard library crates for us // to make sure that they also only use the MVP features. - if crate::build_std_required() { - // Unfortunately this is still a nightly-only flag, but FWIW it is pretty widely used - // so it's unlikely to break without a replacement. - build_cmd.arg("-Z").arg("build-std"); + if let Some(arg) = target.rustc_target_build_std() { + build_cmd.arg("-Z").arg(arg); + if !cargo_cmd.supports_nightly_features() { build_cmd.env("RUSTC_BOOTSTRAP", "1"); } @@ -934,7 +940,7 @@ fn build_bloaty_blob( let blob_name = get_blob_name(target, &manifest_path); let target_directory = project .join("target") - .join(target.rustc_target()) + .join(target.rustc_target_dir()) .join(blob_build_profile.directory()); match target { RuntimeTarget::Riscv => { @@ -968,7 +974,7 @@ fn build_bloaty_blob( }, }; - std::fs::write(&polkavm_path, program.as_bytes()) + std::fs::write(&polkavm_path, program) .expect("writing the blob to a file always works"); } From 654d60c3a373cb5268f50e6cd9274580e12dce87 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 5 Dec 2024 09:42:32 +0100 Subject: [PATCH 64/68] ci: skip check-semver in master and merge queue (#6762) tbd --- .github/workflows/check-semver.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index e9bedd16e6d1..11b386da21e9 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -78,6 +78,11 @@ jobs: - name: check semver run: | + if [ -z "$PR" ]; then + echo "Skipping master/merge queue" + exit 0 + fi + export CARGO_TARGET_DIR=target export RUSTFLAGS='-A warnings -A missing_docs' export SKIP_WASM_BUILD=1 From f4a196ab1473856c9c5992239fcc2f14c2c42914 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 5 Dec 2024 09:54:31 +0100 Subject: [PATCH 65/68] Optimize initialization of networking protocol benchmarks (#6636) # Description These changes should enhance the quality of benchmark results by excluding worker initialization time from the measurements and reducing the overall duration of the benchmarks. ### Integration It should not affect any downstream projects. ### Review Notes - Workers initialize once per benchmark to avoid side effects. - The listen address is assigned when a worker starts. - Benchmarks are divided into two groups by size to create better charts for comparison. --------- Co-authored-by: GitHub Action --- prdoc/pr_6636.prdoc | 9 + .../network/benches/notifications_protocol.rs | 386 ++++++++---------- .../benches/request_response_protocol.rs | 318 ++++++++------- 3 files changed, 353 insertions(+), 360 deletions(-) create mode 100644 prdoc/pr_6636.prdoc diff --git a/prdoc/pr_6636.prdoc b/prdoc/pr_6636.prdoc new file mode 100644 index 000000000000..1db5fd54d971 --- /dev/null +++ b/prdoc/pr_6636.prdoc @@ -0,0 +1,9 @@ +title: Optimize initialization of networking protocol benchmarks +doc: +- audience: Node Dev + description: |- + These changes should enhance the quality of benchmark results by excluding worker initialization time from the measurements and reducing the overall duration of the benchmarks. + +crates: +- name: sc-network + validate: false diff --git a/substrate/client/network/benches/notifications_protocol.rs b/substrate/client/network/benches/notifications_protocol.rs index c1e18c7b7f47..40a810d616b5 100644 --- a/substrate/client/network/benches/notifications_protocol.rs +++ b/substrate/client/network/benches/notifications_protocol.rs @@ -25,55 +25,42 @@ use sc_network::{ FullNetworkConfiguration, MultiaddrWithPeerId, NetworkConfiguration, NonReservedPeerMode, NotificationHandshake, Params, ProtocolId, Role, SetConfig, }, - service::traits::NotificationEvent, + service::traits::{NetworkService, NotificationEvent}, Litep2pNetworkBackend, NetworkBackend, NetworkWorker, NotificationMetrics, NotificationService, - Roles, + PeerId, Roles, }; use sc_network_common::{sync::message::BlockAnnouncesHandshake, ExHashT}; -use sc_network_types::build_multiaddr; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Zero}; -use std::{ - net::{IpAddr, Ipv4Addr, TcpListener}, - str::FromStr, -}; +use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::runtime; +use tokio::{sync::Mutex, task::JoinHandle}; -const MAX_SIZE: u64 = 2u64.pow(30); -const SAMPLE_SIZE: usize = 50; -const NOTIFICATIONS: usize = 50; -const EXPONENTS: &[(u32, &'static str)] = &[ - (6, "64B"), - (9, "512B"), - (12, "4KB"), - (15, "64KB"), - (18, "256KB"), - (21, "2MB"), - (24, "16MB"), - (27, "128MB"), +const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of notifications, label) + (6, 100, "64B"), + (9, 100, "512B"), + (12, 100, "4KB"), + (15, 100, "64KB"), ]; - -// TODO: It's be better to bind system-provided port when initializing the worker -fn get_listen_address() -> sc_network::Multiaddr { - let ip = Ipv4Addr::from_str("127.0.0.1").unwrap(); - let listener = TcpListener::bind((IpAddr::V4(ip), 0)).unwrap(); // Bind to a random port - let local_addr = listener.local_addr().unwrap(); - let port = local_addr.port(); - - build_multiaddr!(Ip4(ip), Tcp(port)) -} +const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of notifications, label) + (18, 10, "256KB"), + (21, 10, "2MB"), + (24, 10, "16MB"), + (27, 10, "128MB"), +]; +const MAX_SIZE: u64 = 2u64.pow(30); fn create_network_worker( - listen_addr: sc_network::Multiaddr, -) -> (N, Box) +) -> (N, Arc, Arc>>) where B: BlockT + 'static, H: ExHashT, N: NetworkBackend, { let role = Role::Full; - let mut net_conf = NetworkConfiguration::new_local(); - net_conf.listen_addresses = vec![listen_addr]; + let net_conf = NetworkConfiguration::new_local(); let network_config = FullNetworkConfiguration::::new(&net_conf, None); let genesis_hash = runtime::Hash::zero(); let (block_announce_config, notification_service) = N::notification_config( @@ -110,96 +97,122 @@ where notification_metrics: NotificationMetrics::new(None), }) .unwrap(); + let network_service = worker.network_service(); + let notification_service = Arc::new(Mutex::new(notification_service)); - (worker, notification_service) + (worker, network_service, notification_service) } -async fn run_serially(size: usize, limit: usize) +struct BenchSetup { + notification_service1: Arc>>, + notification_service2: Arc>>, + peer_id2: PeerId, + handle1: JoinHandle<()>, + handle2: JoinHandle<()>, +} + +impl Drop for BenchSetup { + fn drop(&mut self) { + self.handle1.abort(); + self.handle2.abort(); + } +} + +fn setup_workers(rt: &tokio::runtime::Runtime) -> Arc where B: BlockT + 'static, H: ExHashT, N: NetworkBackend, { - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, mut notification_service1) = create_network_worker::(listen_address1); - let (worker2, mut notification_service2) = - create_network_worker::(listen_address2.clone()); - let peer_id2: sc_network::PeerId = worker2.network_service().local_peer_id().into(); + let _guard = rt.enter(); - worker1 - .network_service() - .add_reserved_peer(MultiaddrWithPeerId { multiaddr: listen_address2, peer_id: peer_id2 }) - .unwrap(); + let (worker1, network_service1, notification_service1) = create_network_worker::(); + let (worker2, network_service2, notification_service2) = create_network_worker::(); + let peer_id2: sc_network::PeerId = network_service2.local_peer_id().into(); + let handle1 = tokio::spawn(worker1.run()); + let handle2 = tokio::spawn(worker2.run()); - let network1_run = worker1.run(); - let network2_run = worker2.run(); - let (tx, rx) = async_channel::bounded(10); + let ready = tokio::spawn({ + let notification_service1 = Arc::clone(¬ification_service1); + let notification_service2 = Arc::clone(¬ification_service2); - let network1 = tokio::spawn(async move { - let mut sent_counter = 0; - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - event = notification_service1.next_event() => { - match event { - Some(NotificationEvent::NotificationStreamOpened { .. }) => { - sent_counter += 1; - notification_service1 - .send_async_notification(&peer_id2, vec![0; size]) - .await - .unwrap(); - }, - Some(NotificationEvent::NotificationStreamClosed { .. }) => { - if sent_counter >= limit { - break; - } - panic!("Unexpected stream closure {:?}", event); - } - event => panic!("Unexpected event {:?}", event), - }; - }, - message = rx.recv() => { - match message { - Ok(Some(_)) => { - sent_counter += 1; - notification_service1 - .send_async_notification(&peer_id2, vec![0; size]) - .await - .unwrap(); - }, - Ok(None) => break, - Err(err) => panic!("Unexpected error {:?}", err), + async move { + let listen_address2 = { + while network_service2.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + network_service2.listen_addresses()[0].clone() + }; + network_service1 + .add_reserved_peer(MultiaddrWithPeerId { + multiaddr: listen_address2, + peer_id: peer_id2, + }) + .unwrap(); - } + let mut notification_service1 = notification_service1.lock().await; + let mut notification_service2 = notification_service2.lock().await; + loop { + tokio::select! { + Some(event) = notification_service1.next_event() => { + if let NotificationEvent::NotificationStreamOpened { .. } = event { + break; + } + }, + Some(event) = notification_service2.next_event() => { + if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event { + result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); + } + }, } } } }); - let network2 = tokio::spawn(async move { - let mut received_counter = 0; - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - event = notification_service2.next_event() => { - match event { - Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => { - result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); - }, - Some(NotificationEvent::NotificationStreamOpened { .. }) => {}, - Some(NotificationEvent::NotificationReceived { .. }) => { - received_counter += 1; - if received_counter >= limit { - let _ = tx.send(None).await; - break - } - let _ = tx.send(Some(())).await; - }, - event => panic!("Unexpected event {:?}", event), - }; - }, + + tokio::task::block_in_place(|| { + let _ = tokio::runtime::Handle::current().block_on(ready); + }); + + Arc::new(BenchSetup { + notification_service1, + notification_service2, + peer_id2, + handle1, + handle2, + }) +} + +async fn run_serially(setup: Arc, size: usize, limit: usize) { + let (tx, rx) = async_channel::bounded(1); + let _ = tx.send(Some(())).await; + let network1 = tokio::spawn({ + let notification_service1 = Arc::clone(&setup.notification_service1); + let peer_id2 = setup.peer_id2; + async move { + let mut notification_service1 = notification_service1.lock().await; + while let Ok(message) = rx.recv().await { + let Some(_) = message else { break }; + notification_service1 + .send_async_notification(&peer_id2, vec![0; size]) + .await + .unwrap(); + } + } + }); + let network2 = tokio::spawn({ + let notification_service2 = Arc::clone(&setup.notification_service2); + async move { + let mut notification_service2 = notification_service2.lock().await; + let mut received_counter = 0; + while let Some(event) = notification_service2.next_event().await { + if let NotificationEvent::NotificationReceived { .. } = event { + received_counter += 1; + if received_counter >= limit { + let _ = tx.send(None).await; + break; + } + let _ = tx.send(Some(())).await; + } } } }); @@ -207,77 +220,34 @@ where let _ = tokio::join!(network1, network2); } -async fn run_with_backpressure(size: usize, limit: usize) -where - B: BlockT + 'static, - H: ExHashT, - N: NetworkBackend, -{ - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, mut notification_service1) = create_network_worker::(listen_address1); - let (worker2, mut notification_service2) = - create_network_worker::(listen_address2.clone()); - let peer_id2: sc_network::PeerId = worker2.network_service().local_peer_id().into(); - - worker1 - .network_service() - .add_reserved_peer(MultiaddrWithPeerId { multiaddr: listen_address2, peer_id: peer_id2 }) - .unwrap(); - - let network1_run = worker1.run(); - let network2_run = worker2.run(); - - let network1 = tokio::spawn(async move { - let mut sent_counter = 0; - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - event = notification_service1.next_event() => { - match event { - Some(NotificationEvent::NotificationStreamOpened { .. }) => { - while sent_counter < limit { - sent_counter += 1; - notification_service1 - .send_async_notification(&peer_id2, vec![0; size]) - .await - .unwrap(); - } - }, - Some(NotificationEvent::NotificationStreamClosed { .. }) => { - if sent_counter != limit { panic!("Stream closed unexpectedly") } - break - }, - event => panic!("Unexpected event {:?}", event), - }; - }, +async fn run_with_backpressure(setup: Arc, size: usize, limit: usize) { + let (tx, rx) = async_channel::bounded(1); + let network1 = tokio::spawn({ + let setup = Arc::clone(&setup); + async move { + let mut notification_service1 = setup.notification_service1.lock().await; + for _ in 0..limit { + notification_service1 + .send_async_notification(&setup.peer_id2, vec![0; size]) + .await + .unwrap(); } + let _ = rx.recv().await; } }); - let network2 = tokio::spawn(async move { - let mut received_counter = 0; - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - event = notification_service2.next_event() => { - match event { - Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => { - result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); - }, - Some(NotificationEvent::NotificationStreamOpened { .. }) => {}, - Some(NotificationEvent::NotificationStreamClosed { .. }) => { - if received_counter != limit { panic!("Stream closed unexpectedly") } - break - }, - Some(NotificationEvent::NotificationReceived { .. }) => { - received_counter += 1; - if received_counter >= limit { break } - }, - event => panic!("Unexpected event {:?}", event), - }; - }, + let network2 = tokio::spawn({ + let setup = Arc::clone(&setup); + async move { + let mut notification_service2 = setup.notification_service2.lock().await; + let mut received_counter = 0; + while let Some(event) = notification_service2.next_event().await { + if let NotificationEvent::NotificationReceived { .. } = event { + received_counter += 1; + if received_counter >= limit { + let _ = tx.send(()).await; + break; + } + } } } }); @@ -285,64 +255,64 @@ where let _ = tokio::join!(network1, network2); } -fn run_benchmark(c: &mut Criterion) { +fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { let rt = tokio::runtime::Runtime::new().unwrap(); let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group("notifications_benchmark"); + let mut group = c.benchmark_group(group); group.plot_config(plot_config); - for &(exponent, label) in EXPONENTS.iter() { + let libp2p_setup = setup_workers::>(&rt); + for &(exponent, limit, label) in payload.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(NOTIFICATIONS as u64 * size as u64)); - + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); group.bench_with_input( BenchmarkId::new("libp2p/serially", label), - &(size, NOTIFICATIONS), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::>(size, limit) - }); + b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); }, ); group.bench_with_input( - BenchmarkId::new("litep2p/serially", label), - &(size, NOTIFICATIONS), + BenchmarkId::new("libp2p/with_backpressure", label), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::( - size, limit, - ) - }); + b.to_async(&rt) + .iter(|| run_with_backpressure(Arc::clone(&libp2p_setup), size, limit)); }, ); + } + drop(libp2p_setup); + + let litep2p_setup = setup_workers::(&rt); + for &(exponent, limit, label) in payload.iter() { + let size = 2usize.pow(exponent); + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); group.bench_with_input( - BenchmarkId::new("libp2p/with_backpressure", label), - &(size, NOTIFICATIONS), + BenchmarkId::new("litep2p/serially", label), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_with_backpressure::>( - size, limit, - ) - }); + b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); }, ); group.bench_with_input( BenchmarkId::new("litep2p/with_backpressure", label), - &(size, NOTIFICATIONS), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_with_backpressure::( - size, limit, - ) - }); + b.to_async(&rt) + .iter(|| run_with_backpressure(Arc::clone(&litep2p_setup), size, limit)); }, ); } + drop(litep2p_setup); } -criterion_group! { - name = benches; - config = Criterion::default().sample_size(SAMPLE_SIZE); - targets = run_benchmark +fn run_benchmark_with_small_payload(c: &mut Criterion) { + run_benchmark(c, SMALL_PAYLOAD, "notifications_protocol/small_payload"); } + +fn run_benchmark_with_large_payload(c: &mut Criterion) { + run_benchmark(c, LARGE_PAYLOAD, "notifications_protocol/large_payload"); +} + +criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); criterion_main!(benches); diff --git a/substrate/client/network/benches/request_response_protocol.rs b/substrate/client/network/benches/request_response_protocol.rs index b428d0d75ac5..85381112b753 100644 --- a/substrate/client/network/benches/request_response_protocol.rs +++ b/substrate/client/network/benches/request_response_protocol.rs @@ -25,46 +25,39 @@ use sc_network::{ FullNetworkConfiguration, IncomingRequest, NetworkConfiguration, NonReservedPeerMode, NotificationHandshake, OutgoingResponse, Params, ProtocolId, Role, SetConfig, }, + service::traits::NetworkService, IfDisconnected, Litep2pNetworkBackend, NetworkBackend, NetworkRequest, NetworkWorker, - NotificationMetrics, NotificationService, Roles, + NotificationMetrics, NotificationService, PeerId, Roles, }; use sc_network_common::{sync::message::BlockAnnouncesHandshake, ExHashT}; -use sc_network_types::build_multiaddr; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Zero}; -use std::{ - net::{IpAddr, Ipv4Addr, TcpListener}, - str::FromStr, - time::Duration, -}; +use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::runtime; +use tokio::{sync::Mutex, task::JoinHandle}; const MAX_SIZE: u64 = 2u64.pow(30); -const SAMPLE_SIZE: usize = 50; -const REQUESTS: usize = 50; -const EXPONENTS: &[(u32, &'static str)] = &[ - (6, "64B"), - (9, "512B"), - (12, "4KB"), - (15, "64KB"), - (18, "256KB"), - (21, "2MB"), - (24, "16MB"), - (27, "128MB"), +const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of requests, label) + (6, 100, "64B"), + (9, 100, "512B"), + (12, 100, "4KB"), + (15, 100, "64KB"), +]; +const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of requests, label) + (18, 10, "256KB"), + (21, 10, "2MB"), + (24, 10, "16MB"), + (27, 10, "128MB"), ]; -fn get_listen_address() -> sc_network::Multiaddr { - let ip = Ipv4Addr::from_str("127.0.0.1").unwrap(); - let listener = TcpListener::bind((IpAddr::V4(ip), 0)).unwrap(); // Bind to a random port - let local_addr = listener.local_addr().unwrap(); - let port = local_addr.port(); - - build_multiaddr!(Ip4(ip), Tcp(port)) -} - -pub fn create_network_worker( - listen_addr: sc_network::Multiaddr, -) -> (N, async_channel::Receiver, Box) +pub fn create_network_worker() -> ( + N, + Arc, + async_channel::Receiver, + Arc>>, +) where B: BlockT + 'static, H: ExHashT, @@ -80,8 +73,7 @@ where Some(tx), ); let role = Role::Full; - let mut net_conf = NetworkConfiguration::new_local(); - net_conf.listen_addresses = vec![listen_addr]; + let net_conf = NetworkConfiguration::new_local(); let mut network_config = FullNetworkConfiguration::new(&net_conf, None); network_config.add_request_response_protocol(request_response_config); let genesis_hash = runtime::Hash::zero(); @@ -119,71 +111,115 @@ where notification_metrics: NotificationMetrics::new(None), }) .unwrap(); + let notification_service = Arc::new(Mutex::new(notification_service)); + let network_service = worker.network_service(); - (worker, rx, notification_service) + (worker, network_service, rx, notification_service) } -async fn run_serially(size: usize, limit: usize) +struct BenchSetup { + #[allow(dead_code)] + notification_service1: Arc>>, + #[allow(dead_code)] + notification_service2: Arc>>, + network_service1: Arc, + peer_id2: PeerId, + handle1: JoinHandle<()>, + handle2: JoinHandle<()>, + #[allow(dead_code)] + rx1: async_channel::Receiver, + rx2: async_channel::Receiver, +} + +impl Drop for BenchSetup { + fn drop(&mut self) { + self.handle1.abort(); + self.handle2.abort(); + } +} + +fn setup_workers(rt: &tokio::runtime::Runtime) -> Arc where B: BlockT + 'static, H: ExHashT, N: NetworkBackend, { - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, _rx1, _notification_service1) = create_network_worker::(listen_address1); - let service1 = worker1.network_service().clone(); - let (worker2, rx2, _notification_service2) = - create_network_worker::(listen_address2.clone()); + let _guard = rt.enter(); + + let (worker1, network_service1, rx1, notification_service1) = + create_network_worker::(); + let (worker2, network_service2, rx2, notification_service2) = + create_network_worker::(); let peer_id2 = worker2.network_service().local_peer_id(); + let handle1 = tokio::spawn(worker1.run()); + let handle2 = tokio::spawn(worker2.run()); - worker1.network_service().add_known_address(peer_id2, listen_address2.into()); + let ready = tokio::spawn({ + let network_service1 = Arc::clone(&network_service1); - let network1_run = worker1.run(); - let network2_run = worker2.run(); - let (break_tx, break_rx) = async_channel::bounded(10); - let requests = async move { - let mut sent_counter = 0; - while sent_counter < limit { - let _ = service1 - .request( - peer_id2.into(), - "/request-response/1".into(), - vec![0; 2], - None, - IfDisconnected::TryConnect, - ) - .await - .unwrap(); - sent_counter += 1; + async move { + let listen_address2 = { + while network_service2.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + network_service2.listen_addresses()[0].clone() + }; + network_service1.add_known_address(peer_id2, listen_address2.into()); } - let _ = break_tx.send(()).await; - }; + }); - let network1 = tokio::spawn(async move { - tokio::pin!(requests); - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - _ = &mut requests => break, + tokio::task::block_in_place(|| { + let _ = tokio::runtime::Handle::current().block_on(ready); + }); + + Arc::new(BenchSetup { + notification_service1, + notification_service2, + network_service1, + peer_id2, + handle1, + handle2, + rx1, + rx2, + }) +} + +async fn run_serially(setup: Arc, size: usize, limit: usize) { + let (break_tx, break_rx) = async_channel::bounded(1); + let network1 = tokio::spawn({ + let network_service1 = Arc::clone(&setup.network_service1); + let peer_id2 = setup.peer_id2; + async move { + for _ in 0..limit { + let _ = network_service1 + .request( + peer_id2.into(), + "/request-response/1".into(), + vec![0; 2], + None, + IfDisconnected::TryConnect, + ) + .await + .unwrap(); } + let _ = break_tx.send(()).await; } }); - let network2 = tokio::spawn(async move { - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - res = rx2.recv() => { - let IncomingRequest { pending_response, .. } = res.unwrap(); - pending_response.send(OutgoingResponse { - result: Ok(vec![0; size]), - reputation_changes: vec![], - sent_feedback: None, - }).unwrap(); - }, - _ = break_rx.recv() => break, + let network2 = tokio::spawn({ + let rx2 = setup.rx2.clone(); + async move { + loop { + tokio::select! { + res = rx2.recv() => { + let IncomingRequest { pending_response, .. } = res.unwrap(); + pending_response.send(OutgoingResponse { + result: Ok(vec![0; size]), + reputation_changes: vec![], + sent_feedback: None, + }).unwrap(); + }, + _ = break_rx.recv() => break, + } } } }); @@ -194,29 +230,12 @@ where // The libp2p request-response implementation does not provide any backpressure feedback. // So this benchmark is useless until we implement it for litep2p. #[allow(dead_code)] -async fn run_with_backpressure(size: usize, limit: usize) -where - B: BlockT + 'static, - H: ExHashT, - N: NetworkBackend, -{ - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, _rx1, _notification_service1) = create_network_worker::(listen_address1); - let service1 = worker1.network_service().clone(); - let (worker2, rx2, _notification_service2) = - create_network_worker::(listen_address2.clone()); - let peer_id2 = worker2.network_service().local_peer_id(); - - worker1.network_service().add_known_address(peer_id2, listen_address2.into()); - - let network1_run = worker1.run(); - let network2_run = worker2.run(); - let (break_tx, break_rx) = async_channel::bounded(10); +async fn run_with_backpressure(setup: Arc, size: usize, limit: usize) { + let (break_tx, break_rx) = async_channel::bounded(1); let requests = futures::future::join_all((0..limit).into_iter().map(|_| { let (tx, rx) = futures::channel::oneshot::channel(); - service1.start_request( - peer_id2.into(), + setup.network_service1.start_request( + setup.peer_id2.into(), "/request-response/1".into(), vec![0; 8], None, @@ -227,77 +246,72 @@ where })); let network1 = tokio::spawn(async move { - tokio::pin!(requests); - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - responses = &mut requests => { - for res in responses { - res.unwrap().unwrap(); - } - let _ = break_tx.send(()).await; - break; - }, - } + let responses = requests.await; + for res in responses { + res.unwrap().unwrap(); } + let _ = break_tx.send(()).await; }); let network2 = tokio::spawn(async move { - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - res = rx2.recv() => { - let IncomingRequest { pending_response, .. } = res.unwrap(); - pending_response.send(OutgoingResponse { - result: Ok(vec![0; size]), - reputation_changes: vec![], - sent_feedback: None, - }).unwrap(); - }, - _ = break_rx.recv() => break, - } + for _ in 0..limit { + let IncomingRequest { pending_response, .. } = setup.rx2.recv().await.unwrap(); + pending_response + .send(OutgoingResponse { + result: Ok(vec![0; size]), + reputation_changes: vec![], + sent_feedback: None, + }) + .unwrap(); } + break_rx.recv().await }); let _ = tokio::join!(network1, network2); } -fn run_benchmark(c: &mut Criterion) { +fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { let rt = tokio::runtime::Runtime::new().unwrap(); let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group("request_response_benchmark"); + let mut group = c.benchmark_group(group); group.plot_config(plot_config); - for &(exponent, label) in EXPONENTS.iter() { + let libp2p_setup = setup_workers::>(&rt); + for &(exponent, limit, label) in payload.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(REQUESTS as u64 * size as u64)); + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); group.bench_with_input( BenchmarkId::new("libp2p/serially", label), - &(size, REQUESTS), - |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::>(size, limit) - }); - }, - ); - group.bench_with_input( - BenchmarkId::new("litep2p/serially", label), - &(size, REQUESTS), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::( - size, limit, - ) - }); + b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); }, ); } + drop(libp2p_setup); + + // TODO: NetworkRequest::request should be implemented for Litep2pNetworkService + let litep2p_setup = setup_workers::(&rt); + // for &(exponent, limit, label) in payload.iter() { + // let size = 2usize.pow(exponent); + // group.throughput(Throughput::Bytes(limit as u64 * size as u64)); + // group.bench_with_input( + // BenchmarkId::new("litep2p/serially", label), + // &(size, limit), + // |b, &(size, limit)| { + // b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); + // }, + // ); + // } + drop(litep2p_setup); } -criterion_group! { - name = benches; - config = Criterion::default().sample_size(SAMPLE_SIZE); - targets = run_benchmark +fn run_benchmark_with_small_payload(c: &mut Criterion) { + run_benchmark(c, SMALL_PAYLOAD, "request_response_benchmark/small_payload"); } + +fn run_benchmark_with_large_payload(c: &mut Criterion) { + run_benchmark(c, LARGE_PAYLOAD, "request_response_benchmark/large_payload"); +} + +criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); criterion_main!(benches); From 1befeed826bbf76cc1e6bbe62451506e1f11cb60 Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 5 Dec 2024 19:12:48 +0800 Subject: [PATCH 66/68] Fix with more tests --- .../outbound-router/src/v2/convert.rs | 11 +- .../src/tests/snowbridge_v2_outbound.rs | 276 ++++++++++++------ .../runtimes/testing/penpal/src/xcm_config.rs | 34 ++- 3 files changed, 229 insertions(+), 92 deletions(-) diff --git a/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs b/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs index 96eeea06bc8b..1e5a84ba0e81 100644 --- a/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs +++ b/bridges/snowbridge/primitives/outbound-router/src/v2/convert.rs @@ -130,7 +130,7 @@ where let fee_amount = self.extract_remote_fee()?; // Get ENA reserve asset from WithdrawAsset. - let enas = + let mut enas = match_expression!(self.peek(), Ok(WithdrawAsset(reserve_assets)), reserve_assets); if enas.is_some() { let _ = self.next(); @@ -145,6 +145,15 @@ where if pnas.is_some() { let _ = self.next(); } + + // Try to get ENA again if it is after PNA + if enas.is_none() { + enas = + match_expression!(self.peek(), Ok(WithdrawAsset(reserve_assets)), reserve_assets); + if enas.is_some() { + let _ = self.next(); + } + } // Check AliasOrigin. let origin_location = match_expression!(self.next()?, AliasOrigin(origin), origin) .ok_or(AliasOriginExpected)?; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs index 3be500e98953..c58b6b9b5659 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge_v2_outbound.rs @@ -20,14 +20,12 @@ use crate::{ use emulated_integration_tests_common::PenpalBTeleportableAssetLocation; use frame_support::traits::fungibles::Mutate; use hex_literal::hex; -use rococo_westend_system_emulated_network::{ - bridge_hub_rococo_emulated_chain::genesis::ASSETHUB_PARA_ID, - penpal_emulated_chain::{ - penpal_runtime::xcm_config::{ - CheckingAccount, LocalTeleportableToAssetHub, TELEPORTABLE_ASSET_ID, - }, - PenpalAssetOwner, +use rococo_westend_system_emulated_network::penpal_emulated_chain::{ + penpal_runtime::xcm_config::{ + derived_from_here, AccountIdOf, CheckingAccount, LocalTeleportableToAssetHub, + TELEPORTABLE_ASSET_ID, }, + PenpalAssetOwner, }; use snowbridge_core::AssetMetadata; use snowbridge_outbound_primitives::TransactInfo; @@ -39,7 +37,7 @@ use xcm_executor::traits::ConvertLocation; const INITIAL_FUND: u128 = 50_000_000_000_000; const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); const AGENT_ADDRESS: [u8; 20] = hex!("90A987B944Cb1dCcE5564e5FDeCD7a54D3de27Fe"); -const TOKEN_AMOUNT: u128 = 1_000_000_000_000; +const TOKEN_AMOUNT: u128 = 10_000_000_000_000; const REMOTE_FEE_AMOUNT_IN_WETH: u128 = 400_000_000_000; const LOCAL_FEE_AMOUNT_IN_DOT: u128 = 800_000_000_000; @@ -64,12 +62,11 @@ pub fn beneficiary() -> Location { } pub fn asset_hub() -> Location { - Location::new(1, Parachain(ASSETHUB_PARA_ID)) + Location::new(1, Parachain(AssetHubWestend::para_id().into())) } pub fn fund_on_bh() { - let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); - let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); + let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(asset_hub()); BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); } @@ -96,21 +93,9 @@ pub fn register_weth_on_ah() { assert!(::ForeignAssets::asset_exists( weth_location().try_into().unwrap(), )); - - assert_ok!(::ForeignAssets::mint_into( - weth_location().try_into().unwrap(), - &AssetHubWestendReceiver::get(), - TOKEN_AMOUNT, - )); - - assert_ok!(::ForeignAssets::mint_into( - weth_location().try_into().unwrap(), - &AssetHubWestendSender::get(), - TOKEN_AMOUNT, - )); }); } -pub fn register_relay_token() { +pub fn register_relay_token_on_bh() { BridgeHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; type RuntimeOrigin = ::RuntimeOrigin; @@ -148,16 +133,6 @@ pub fn register_weth_on_penpal() { true, 1, )); - assert_ok!(::ForeignAssets::mint_into( - weth_location().try_into().unwrap(), - &PenpalBReceiver::get(), - TOKEN_AMOUNT, - )); - assert_ok!(::ForeignAssets::mint_into( - weth_location().try_into().unwrap(), - &PenpalBSender::get(), - TOKEN_AMOUNT, - )); }); } @@ -194,10 +169,16 @@ pub fn register_pal_on_ah() { } pub fn fund_on_penpal() { + let sudo_account = derived_from_here::< + AccountIdOf< + rococo_westend_system_emulated_network::penpal_emulated_chain::penpal_runtime::Runtime, + >, + >(); PenpalB::fund_accounts(vec![ (PenpalBReceiver::get(), INITIAL_FUND), (PenpalBSender::get(), INITIAL_FUND), (CheckingAccount::get(), INITIAL_FUND), + (sudo_account.clone(), INITIAL_FUND), ]); PenpalB::execute_with(|| { assert_ok!(::ForeignAssets::mint_into( @@ -210,6 +191,11 @@ pub fn fund_on_penpal() { &PenpalBSender::get(), TOKEN_AMOUNT, )); + assert_ok!(::ForeignAssets::mint_into( + Location::parent(), + &sudo_account, + TOKEN_AMOUNT, + )); }); PenpalB::execute_with(|| { assert_ok!(::Assets::mint_into( @@ -222,6 +208,28 @@ pub fn fund_on_penpal() { &PenpalBSender::get(), TOKEN_AMOUNT, )); + assert_ok!(::Assets::mint_into( + TELEPORTABLE_ASSET_ID, + &sudo_account, + TOKEN_AMOUNT, + )); + }); + PenpalB::execute_with(|| { + assert_ok!(::ForeignAssets::mint_into( + weth_location().try_into().unwrap(), + &PenpalBReceiver::get(), + TOKEN_AMOUNT, + )); + assert_ok!(::ForeignAssets::mint_into( + weth_location().try_into().unwrap(), + &PenpalBSender::get(), + TOKEN_AMOUNT, + )); + assert_ok!(::ForeignAssets::mint_into( + weth_location().try_into().unwrap(), + &sudo_account, + TOKEN_AMOUNT, + )); }); } @@ -238,6 +246,9 @@ pub fn set_trust_reserve_on_penpal() { } pub fn fund_on_ah() { + AssetHubWestend::fund_accounts(vec![(AssetHubWestendSender::get(), INITIAL_FUND)]); + AssetHubWestend::fund_accounts(vec![(AssetHubWestendReceiver::get(), INITIAL_FUND)]); + let penpal_sovereign = AssetHubWestend::sovereign_account_id_of( AssetHubWestend::sibling_location_of(PenpalB::para_id()), ); @@ -248,6 +259,16 @@ pub fn fund_on_ah() { &penpal_sovereign, TOKEN_AMOUNT, )); + assert_ok!(::ForeignAssets::mint_into( + weth_location().try_into().unwrap(), + &AssetHubWestendReceiver::get(), + TOKEN_AMOUNT, + )); + assert_ok!(::ForeignAssets::mint_into( + weth_location().try_into().unwrap(), + &AssetHubWestendSender::get(), + TOKEN_AMOUNT, + )); }); let ethereum_sovereign: AccountId = @@ -260,7 +281,7 @@ pub fn fund_on_ah() { AssetHubWestend::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); } -pub fn create_pools() { +pub fn create_pools_on_ah() { // We create a pool between WND and WETH in AssetHub to support paying for fees with WETH. let ethereum_sovereign: AccountId = EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&Location::new( @@ -272,8 +293,6 @@ pub fn create_pools() { AssetHubWestend::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); PenpalB::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); create_pool_with_native_on!(AssetHubWestend, weth_location(), true, ethereum_sovereign.clone()); - // We also need a pool between WND and WETH on PenpalB to support paying for fees with WETH. - create_pool_with_native_on!(PenpalB, weth_location(), true, ethereum_sovereign.clone()); } pub fn register_pal_on_bh() { @@ -297,12 +316,84 @@ pub fn register_pal_on_bh() { }); } +fn register_ah_user_agent_on_ethereum() { + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + let location = Location::new( + 1, + [ + Parachain(AssetHubWestend::para_id().into()), + AccountId32 { network: None, id: AssetHubWestendSender::get().into() }, + ], + ); + + assert_ok!( + ::EthereumSystem::force_create_agent( + RuntimeOrigin::root(), + bx!(location.into()), + ) + ); + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::CreateAgent{ .. }) => {},] + ); + }); +} + +pub fn register_penpal_agent_on_ethereum() { + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + let location = Location::new(1, [Parachain(PenpalB::para_id().into())]); + + assert_ok!( + ::EthereumSystem::force_create_agent( + RuntimeOrigin::root(), + bx!(location.into()), + ) + ); + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::CreateAgent{ .. }) => {},] + ); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeOrigin = ::RuntimeOrigin; + + let location = Location::new( + 1, + [ + Parachain(PenpalB::para_id().into()), + AccountId32 { network: None, id: PenpalBSender::get().into() }, + ], + ); + + assert_ok!( + ::EthereumSystem::force_create_agent( + RuntimeOrigin::root(), + bx!(location.into()), + ) + ); + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::CreateAgent{ .. }) => {},] + ); + }); +} + #[test] fn send_weth_from_asset_hub_to_ethereum() { fund_on_bh(); register_weth_on_ah(); + fund_on_ah(); + AssetHubWestend::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; @@ -361,7 +452,7 @@ fn send_weth_from_asset_hub_to_ethereum() { } #[test] -fn transfer_relay_token() { +fn transfer_relay_token_from_ah() { let ethereum_sovereign: AccountId = EthereumLocationsConverterFor::<[u8; 32]>::convert_location(ðereum()) .unwrap() @@ -369,9 +460,11 @@ fn transfer_relay_token() { fund_on_bh(); + register_relay_token_on_bh(); + register_weth_on_ah(); - register_relay_token(); + fund_on_ah(); // Send token to Ethereum AssetHubWestend::execute_with(|| { @@ -445,9 +538,11 @@ fn transfer_relay_token() { fn send_weth_and_dot_from_asset_hub_to_ethereum() { fund_on_bh(); + register_relay_token_on_bh(); + register_weth_on_ah(); - register_relay_token(); + fund_on_ah(); AssetHubWestend::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; @@ -506,33 +601,6 @@ fn send_weth_and_dot_from_asset_hub_to_ethereum() { }); } -#[test] -fn create_agent() { - BridgeHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - type RuntimeOrigin = ::RuntimeOrigin; - - let location = Location::new( - 1, - [ - Parachain(AssetHubWestend::para_id().into()), - AccountId32 { network: None, id: AssetHubWestendSender::get().into() }, - ], - ); - - assert_ok!( - ::EthereumSystem::force_create_agent( - RuntimeOrigin::root(), - bx!(location.into()), - ) - ); - assert_expected_events!( - BridgeHubWestend, - vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::CreateAgent{ .. }) => {},] - ); - }); -} - #[test] fn transact_with_agent() { let weth_asset_location: Location = @@ -540,9 +608,11 @@ fn transact_with_agent() { fund_on_bh(); + register_ah_user_agent_on_ethereum(); + register_weth_on_ah(); - BridgeHubWestend::execute_with(|| {}); + fund_on_ah(); AssetHubWestend::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; @@ -598,7 +668,7 @@ fn transact_with_agent() { ])); ::PolkadotXcm::execute( - RuntimeOrigin::signed(AssetHubWestendReceiver::get()), + RuntimeOrigin::signed(AssetHubWestendSender::get()), bx!(xcms), Weight::from(EXECUTION_WEIGHT), ) @@ -615,23 +685,26 @@ fn transact_with_agent() { }); } -#[test] -fn send_penpal_native_asset_to_ethereum() { +fn send_message_from_penpal_to_ethereum(sudo: bool) { + // bh fund_on_bh(); + register_penpal_agent_on_ethereum(); + // ah register_weth_on_ah(); register_pal_on_ah(); register_pal_on_bh(); fund_on_ah(); - fund_on_penpal(); - register_weth_on_penpal(); + create_pools_on_ah(); + // penpal set_trust_reserve_on_penpal(); - create_pools(); + register_weth_on_penpal(); + fund_on_penpal(); PenpalB::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; let local_fee_asset_on_penpal = - Asset { id: AssetId(Location::parent()), fun: Fungible(TOKEN_AMOUNT) }; + Asset { id: AssetId(Location::parent()), fun: Fungible(LOCAL_FEE_AMOUNT_IN_DOT) }; let remote_fee_asset_on_ah = Asset { id: AssetId(weth_location()), fun: Fungible(REMOTE_FEE_AMOUNT_IN_WETH) }; @@ -639,9 +712,11 @@ fn send_penpal_native_asset_to_ethereum() { let remote_fee_asset_on_ethereum = Asset { id: AssetId(weth_location()), fun: Fungible(REMOTE_FEE_AMOUNT_IN_WETH) }; - let transfer_asset = + let pna = Asset { id: AssetId(LocalTeleportableToAssetHub::get()), fun: Fungible(TOKEN_AMOUNT) }; + let ena = Asset { id: AssetId(weth_location()), fun: Fungible(TOKEN_AMOUNT / 2) }; + let transfer_asset_reanchor_on_ah = Asset { id: AssetId(PenpalBTeleportableAssetLocation::get()), fun: Fungible(TOKEN_AMOUNT), @@ -651,13 +726,14 @@ fn send_penpal_native_asset_to_ethereum() { local_fee_asset_on_penpal.clone(), remote_fee_asset_on_ah.clone(), remote_fee_asset_on_ethereum.clone(), - transfer_asset.clone(), + pna.clone(), + ena.clone(), ]; let transact_info = TransactInfo { target: Default::default(), data: vec![], gas_limit: 40000, value: 0 }; - let xcms = VersionedXcm::from(Xcm(vec![ + let xcm = VersionedXcm::from(Xcm(vec![ WithdrawAsset(assets.clone().into()), PayFees { asset: local_fee_asset_on_penpal.clone() }, InitiateTransfer { @@ -670,10 +746,11 @@ fn send_penpal_native_asset_to_ethereum() { AssetTransferFilter::ReserveWithdraw(Definite( remote_fee_asset_on_ethereum.clone().into(), )), + AssetTransferFilter::ReserveWithdraw(Definite(ena.clone().into())), // Should use Teleport here because: // a. Penpal is configured to allow teleport specific asset to AH // b. AH is configured to trust asset teleport from sibling chain - AssetTransferFilter::Teleport(Definite(transfer_asset.clone().into())), + AssetTransferFilter::Teleport(Definite(pna.clone().into())), ], remote_xcm: Xcm(vec![InitiateTransfer { destination: ethereum(), @@ -681,11 +758,14 @@ fn send_penpal_native_asset_to_ethereum() { remote_fee_asset_on_ethereum.clone().into(), ))), preserve_origin: true, - // should use ReserveDeposit because Ethereum does not trust asset from penpal. - // transfer_asset should be reachored first on AH - assets: vec![AssetTransferFilter::ReserveDeposit(Definite( - transfer_asset_reanchor_on_ah.clone().into(), - ))], + assets: vec![ + // should use ReserveDeposit because Ethereum does not trust asset from + // penpal. transfer_asset should be reachored first on AH + AssetTransferFilter::ReserveDeposit(Definite( + transfer_asset_reanchor_on_ah.clone().into(), + )), + AssetTransferFilter::ReserveWithdraw(Definite(ena.clone().into())), + ], remote_xcm: Xcm(vec![ DepositAsset { assets: Wild(All), beneficiary: beneficiary() }, Transact { @@ -697,11 +777,19 @@ fn send_penpal_native_asset_to_ethereum() { }, ])); - assert_ok!(::PolkadotXcm::execute( - RuntimeOrigin::signed(PenpalBSender::get()), - bx!(xcms), - Weight::from(EXECUTION_WEIGHT), - )); + if sudo { + assert_ok!(::PolkadotXcm::execute( + RuntimeOrigin::root(), + bx!(xcm.clone()), + Weight::from(EXECUTION_WEIGHT), + )); + } else { + assert_ok!(::PolkadotXcm::execute( + RuntimeOrigin::signed(PenpalBSender::get()), + bx!(xcm.clone()), + Weight::from(EXECUTION_WEIGHT), + )); + } }); AssetHubWestend::execute_with(|| { @@ -724,3 +812,13 @@ fn send_penpal_native_asset_to_ethereum() { ); }); } + +#[test] +fn send_message_from_penpal_to_ethereum_with_sudo() { + send_message_from_penpal_to_ethereum(true) +} + +#[test] +fn send_message_from_penpal_to_ethereum_with_user_origin() { + send_message_from_penpal_to_ethereum(false) +} diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 10481d5d2ebc..cb83994c0161 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -30,6 +30,7 @@ use super::{ }; use crate::{BaseDeliveryFee, FeeAssetId, TransactionByteFee}; use assets_common::TrustBackedAssetsAsLocation; +use codec::{Decode, Encode}; use core::marker::PhantomData; use frame_support::{ parameter_types, @@ -45,7 +46,9 @@ use parachains_common::{xcm_config::AssetFeeAsExistentialDepositMultiplier, TREA use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::{impls::ToAuthor, xcm_sender::ExponentialPrice}; use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; -use sp_runtime::traits::{AccountIdConversion, ConvertInto, Identity, TryConvertInto}; +use sp_runtime::traits::{ + AccountIdConversion, ConvertInto, Identity, TrailingZeroInput, TryConvertInto, +}; use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ AccountId32Aliases, AliasOriginRootUsingFilter, AllowHrmpNotificationsFromRelayChain, @@ -59,7 +62,10 @@ use xcm_builder::{ SovereignSignedViaLocation, StartsWith, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, }; -use xcm_executor::{traits::JustTry, XcmExecutor}; +use xcm_executor::{ + traits::{ConvertLocation, JustTry}, + XcmExecutor, +}; parameter_types! { pub const RelayLocation: Location = Location::parent(); @@ -82,10 +88,34 @@ parameter_types! { PalletInstance(TrustBackedAssetsPalletIndex::get()).into(); } +pub fn derived_from_here() -> AccountId +where + AccountId: Decode + Eq + Clone, +{ + b"Here" + .using_encoded(|b| AccountId::decode(&mut TrailingZeroInput::new(b))) + .expect("infinite length input; no invalid inputs for type; qed") +} + +/// A [`Location`] consisting of a single `Here` [`Junction`] will be converted to the +/// here `AccountId`. +pub struct HereIsPreset(PhantomData); +impl ConvertLocation for HereIsPreset { + fn convert_location(location: &Location) -> Option { + if location.contains_parents_only(0) { + Some(derived_from_here::()) + } else { + None + } + } +} + /// Type for specifying how a `Location` can be converted into an `AccountId`. This is used /// when determining ownership of accounts for asset transacting and when attempting to use XCM /// `Transact` in order to determine the dispatch Origin. pub type LocationToAccountId = ( + // Here converts to `AccountId`. + HereIsPreset, // The parent (Relay-chain) origin converts to the parent `AccountId`. ParentIsPreset, // Sibling parachain origins convert to AccountId via the `ParaId::into`. From 27403eb01a67e324314e17b63f2783eabbff733a Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 5 Dec 2024 21:11:53 +0800 Subject: [PATCH 67/68] Add BridgeHubDualMessageRouter --- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 - .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../runtimes/bridge-hubs/common/src/lib.rs | 4 ++- .../bridge-hubs/common/src/message_queue.rs | 33 +++++++++++++++++-- 4 files changed, 35 insertions(+), 5 deletions(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index a090d1e9799c..1809d78cf0b6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -419,7 +419,6 @@ impl pallet_message_queue::Config for Runtime { RuntimeCall, >, EthereumOutboundQueue, - EthereumOutboundQueue, >; type Size = u32; // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index bf91526ab079..1d837efa7c13 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -392,7 +392,7 @@ impl pallet_message_queue::Config for Runtime { type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor; #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = bridge_hub_common::BridgeHubMessageRouter< + type MessageProcessor = bridge_hub_common::BridgeHubDualMessageRouter< xcm_builder::ProcessXcmMessage< AggregateMessageOrigin, xcm_executor::XcmExecutor, diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/common/src/lib.rs index b806b8cdb22d..13585ddf0840 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/common/src/lib.rs @@ -19,4 +19,6 @@ pub mod message_queue; pub mod xcm_version; pub use digest_item::CustomDigestItem; -pub use message_queue::{AggregateMessageOrigin, BridgeHubMessageRouter}; +pub use message_queue::{ + AggregateMessageOrigin, BridgeHubDualMessageRouter, BridgeHubMessageRouter, +}; diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs index cdc4c741d863..1e6404fcd008 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs @@ -83,8 +83,37 @@ impl From for AggregateMessageOrigin { } } +pub struct BridgeHubMessageRouter( + PhantomData<(XcmpProcessor, SnowbridgeProcessor)>, +) +where + XcmpProcessor: ProcessMessage, + SnowbridgeProcessor: ProcessMessage; +impl ProcessMessage + for BridgeHubMessageRouter +where + XcmpProcessor: ProcessMessage, + SnowbridgeProcessor: ProcessMessage, +{ + type Origin = AggregateMessageOrigin; + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + id: &mut [u8; 32], + ) -> Result { + use AggregateMessageOrigin::*; + match origin { + Here | Parent | Sibling(_) => + XcmpProcessor::process_message(message, origin, meter, id), + Snowbridge(_) => SnowbridgeProcessor::process_message(message, origin, meter, id), + SnowbridgeV2(_) => Err(ProcessMessageError::Unsupported), + } + } +} + /// Routes messages to either the XCMP or Snowbridge processor. -pub struct BridgeHubMessageRouter( +pub struct BridgeHubDualMessageRouter( PhantomData<(XcmpProcessor, SnowbridgeProcessor, SnowbridgeProcessorV2)>, ) where @@ -92,7 +121,7 @@ where SnowbridgeProcessor: ProcessMessage; impl ProcessMessage - for BridgeHubMessageRouter + for BridgeHubDualMessageRouter where XcmpProcessor: ProcessMessage, SnowbridgeProcessor: ProcessMessage, From a0fff215c46a53e2d8015e8f08e65ae9d744c122 Mon Sep 17 00:00:00 2001 From: ron Date: Thu, 5 Dec 2024 21:42:00 +0800 Subject: [PATCH 68/68] PR doc --- prdoc/pr_6706.prdoc | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 prdoc/pr_6706.prdoc diff --git a/prdoc/pr_6706.prdoc b/prdoc/pr_6706.prdoc new file mode 100644 index 000000000000..d04a2dceeea4 --- /dev/null +++ b/prdoc/pr_6706.prdoc @@ -0,0 +1,26 @@ +title: 'Snowbridge Unordered Message Delivery - Outbound Queue' +doc: +- audience: Node Dev + description: |- + New pallets for unordered message delivery for Snowbridge, specifically the Outbound Queue part. No breaking changes + are made in this PR, only new functionality added. + +crates: +- name: snowbridge-pallet-outbound-queue-v2 + bump: minor +- name: snowbridge-outbound-queue-runtime-api-v2 + bump: minor +- name: snowbridge-core + bump: major +- name: snowbridge-outbound-primitives + bump: major +- name: snowbridge-router-primitives + bump: major +- name: snowbridge-outbound-router-primitives + bump: major +- name: bridge-hub-westend-integration-tests + bump: major +- name: bridge-hub-westend-runtime + bump: major +- name: bridge-hub-rococo-runtime + bump: minor \ No newline at end of file
Release notes

Sourced from lycheeverse/lychee-action's releases.

Version 2.1.0

What's Changed

New Contributors

Full Changelog: https://github.com/lycheeverse/lychee-action/compare/v2...v2.1.0