From 16d232019991482c5b8a2a31033fe2e1712c8110 Mon Sep 17 00:00:00 2001 From: wd30130 Date: Thu, 7 Mar 2024 21:12:46 +0800 Subject: [PATCH] feat(*): upgrade to Polkadot SDK V1.7.0 --- Cargo.toml | 273 +- client/consensus/aura/Cargo.toml | 1 + client/consensus/aura/src/collator.rs | 16 +- node/Cargo.toml | 7 +- node/src/chain_spec.rs | 345 +- node/src/cli.rs | 7 +- node/src/command.rs | 4 +- node/src/on_demand_order.rs | 9 +- node/src/service.rs | 10 +- pallets/assets-bridge/src/mock.rs | 9 +- pallets/assurance/src/mock.rs | 7 +- pallets/evm-utils/src/mock.rs | 8 +- .../precompile/substrate-utils/src/mock.rs | 8 +- pallets/liquidation/src/lib.rs | 2 +- pallets/liquidation/src/mock.rs | 5 +- pallets/order/src/mock.rs | 5 +- pallets/pallet-xcm/Cargo.toml | 78 - pallets/pallet-xcm/src/asset_transfer.rs | 106 - pallets/pallet-xcm/src/benchmarking.rs | 304 -- pallets/pallet-xcm/src/controller.rs | 191 -- pallets/pallet-xcm/src/fee_handling.rs | 121 - pallets/pallet-xcm/src/lib.rs | 2761 ----------------- pallets/pallet-xcm/src/migration.rs | 75 - pallets/pallet-xcm/src/migrations.rs | 349 --- pallets/pallet-xcm/src/mock.rs | 644 ---- .../pallet-xcm/src/tests/assets_transfer.rs | 1381 --------- pallets/pallet-xcm/src/tests/mod.rs | 951 ------ pallets/pot/rpc/src/lib.rs | 7 +- pallets/pot/runtime-api/src/lib.rs | 1 - pallets/pot/src/mock.rs | 4 +- pallets/preimage/Cargo.toml | 56 - pallets/preimage/src/benchmarking.rs | 248 -- pallets/preimage/src/lib.rs | 591 ---- pallets/preimage/src/migration.rs | 273 -- pallets/preimage/src/mock.rs | 197 -- pallets/preimage/src/preimages.rs | 341 -- pallets/preimage/src/storage.rs | 306 -- pallets/preimage/src/tests.rs | 525 ---- pallets/preimage/src/weights.rs | 470 --- primitives/system/src/lib.rs | 1 - runtime/Cargo.toml | 15 +- runtime/src/contracts_config.rs | 1 + runtime/src/governance/fellowship.rs | 8 + runtime/src/lib.rs | 158 +- .../src/weights/pallet_ranked_collective.rs | 91 +- runtime/src/xcm_config.rs | 28 +- runtime/src/xcms/matches_token_ex.rs | 88 +- runtime/src/xcms/xcm_weight.rs | 80 +- 48 files changed, 562 insertions(+), 10604 deletions(-) delete mode 100644 pallets/pallet-xcm/Cargo.toml delete mode 100644 pallets/pallet-xcm/src/asset_transfer.rs delete mode 100644 pallets/pallet-xcm/src/benchmarking.rs delete mode 100644 pallets/pallet-xcm/src/controller.rs delete mode 100644 pallets/pallet-xcm/src/fee_handling.rs delete mode 100644 pallets/pallet-xcm/src/lib.rs delete mode 100644 pallets/pallet-xcm/src/migration.rs delete mode 100644 pallets/pallet-xcm/src/migrations.rs delete mode 100644 pallets/pallet-xcm/src/mock.rs delete mode 100644 pallets/pallet-xcm/src/tests/assets_transfer.rs delete mode 100644 pallets/pallet-xcm/src/tests/mod.rs delete mode 100644 pallets/preimage/Cargo.toml delete mode 100644 pallets/preimage/src/benchmarking.rs delete mode 100644 pallets/preimage/src/lib.rs delete mode 100644 pallets/preimage/src/migration.rs delete mode 100644 pallets/preimage/src/mock.rs delete mode 100644 pallets/preimage/src/preimages.rs delete mode 100644 pallets/preimage/src/storage.rs delete mode 100644 pallets/preimage/src/tests.rs delete mode 100644 pallets/preimage/src/weights.rs diff --git a/Cargo.toml b/Cargo.toml index aba57ac..e2a769c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,9 +25,7 @@ members = [ "pallets/pot/rpc", "pallets/pot", "pallets/assurance", - "pallets/pallet-xcm", "pallets/liquidation", - "pallets/preimage", ] resolver = "2" @@ -51,7 +49,7 @@ futures = "0.3.30" hex = { version = "0.4.3", default-features = false } hex-literal = "0.4.1" impl-trait-for-tuples = "0.2.1" -jsonrpsee ="0.16.2" +jsonrpsee ="0.20.3" log = { version = "0.4.20", default-features = false } parity-scale-codec = { version = "3.6.4", default-features = false } scale-codec = { package = "parity-scale-codec", version = "3.6.4", default-features = false, features = ["derive"] } @@ -66,158 +64,159 @@ tracing = "0.1.37" url = "2.4.0" # Substrate Client -sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-consensus-babe = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-consensus-manual-seal = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-consensus-slots = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-executor = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-network = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-network-sync = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sc-sysinfo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-tracing = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-consensus-babe = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-consensus-manual-seal = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-consensus-slots = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-executor = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-network = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-network-sync = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sc-sysinfo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-tracing = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } # Substrate Primitive -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-application-crypto = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-arithmetic = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-state-machine = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-trie = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-version = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-application-crypto = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sp-arithmetic = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-state-machine = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-trie = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-version = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } # Substrate FRAME -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -frame-executive = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-assets = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -pallet-collective = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-contracts = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-contracts-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-conviction-voting = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-ranked-collective = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-referenda = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-society ={ git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -pallet-whitelist = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} +frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +frame-executive = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-assets = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-contracts = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-conviction-voting = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-message-queue = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-ranked-collective = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-referenda = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-society ={ git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +pallet-whitelist = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} # Substrate Utility -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } # XCM -xcm = { package = "staging-xcm", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -xcm-builder = { package = "staging-xcm-builder", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -xcm-executor = { package = "staging-xcm-executor", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} +xcm = { package = "staging-xcm", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +xcm-builder = { package = "staging-xcm-builder", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +xcm-executor = { package = "staging-xcm-executor", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} # Cumulus Client -cumulus-client-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-collator = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-consensus-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-consensus-proposer = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0"} +cumulus-client-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +cumulus-client-collator = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +cumulus-client-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +cumulus-client-consensus-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +cumulus-client-consensus-proposer = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +cumulus-client-parachain-inherent = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +cumulus-client-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0"} # Cumulus Primitive -cumulus-primitives-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-primitives-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-primitives-timestamp ={ git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -cumulus-primitives-utility = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +cumulus-primitives-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +cumulus-primitives-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +cumulus-primitives-timestamp ={ git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +cumulus-primitives-utility = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } # Cumulus Pallet -cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false} -cumulus-pallet-xcm = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -cumulus-relay-chain-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -pallet-collator-selection = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -parachain-info = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -parachains-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false} +cumulus-pallet-xcm = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +cumulus-relay-chain-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +pallet-collator-selection = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +parachain-info = { package = "staging-parachain-info", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +parachains-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } # Polkadot -pallet-xcm = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -polkadot-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -polkadot-parachain-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -polkadot-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -polkadot-node-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -polkadot-node-subsystem = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -polkadot-overseer = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -primitives = { package = "polkadot-primitives", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" , default-features = false } -runtime-parachains = { package = "polkadot-runtime-parachains", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } -polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-xcm = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +polkadot-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +polkadot-parachain-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +polkadot-node-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +polkadot-node-subsystem = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +polkadot-overseer = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" } +polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +primitives = { package = "polkadot-primitives", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0" , default-features = false } +runtime-parachains = { package = "polkadot-runtime-parachains", git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } +polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.7.0", default-features = false } # Frontier Primitive -fp-account = { version = "1.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fp-evm = { version = "3.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fp-dynamic-fee = { version = "1.0.0", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fp-rpc = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fp-self-contained = { version = "1.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false, features = ["serde", "try-runtime"] } +fp-account = { version = "1.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fp-evm = { version = "3.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fp-dynamic-fee = { version = "1.0.0", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fp-rpc = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fp-self-contained = { version = "1.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false, features = ["serde", "try-runtime"] } # Frontier Client -fc-api = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fc-cli = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fc-consensus = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fc-db = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fc-mapping-sync = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fc-rpc = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fc-rpc-core = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -fc-storage = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } +fc-api = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fc-cli = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fc-consensus = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fc-db = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fc-mapping-sync = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fc-rpc = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fc-rpc-core = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +fc-storage = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } #Frontier FRAME -pallet-base-fee = { version = "1.0.0", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -pallet-dynamic-fee = { version = "4.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -pallet-ethereum = { version = "4.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -pallet-evm = { version = "6.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -pallet-evm-chain-id = { version = "1.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -pallet-evm-precompile-modexp = { version = "2.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -pallet-evm-precompile-sha3fips = { version = "2.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -pallet-evm-precompile-simple = { version = "2.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -pallet-hotfix-sufficients = { version = "1.0.0", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } -precompile-utils = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.1.0", default-features = false } +pallet-base-fee = { version = "1.0.0", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +pallet-dynamic-fee = { version = "4.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +pallet-ethereum = { version = "4.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +pallet-evm = { version = "6.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +pallet-evm-chain-id = { version = "1.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +pallet-evm-precompile-modexp = { version = "2.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +pallet-evm-precompile-sha3fips = { version = "2.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +pallet-evm-precompile-simple = { version = "2.0.0-dev", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +pallet-hotfix-sufficients = { version = "1.0.0", git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } +precompile-utils = { git = "https://github.com/paritytech/frontier", branch = "polkadot-v1.7.0", default-features = false } diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 204c078..ddc6edb 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -42,6 +42,7 @@ substrate-prometheus-endpoint = { workspace = true } # Cumulus cumulus-client-consensus-common = { workspace = true } +cumulus-client-parachain-inherent = { workspace = true } cumulus-relay-chain-interface = { workspace = true } cumulus-client-consensus-proposer = { workspace = true } cumulus-primitives-aura = { workspace = true } diff --git a/client/consensus/aura/src/collator.rs b/client/consensus/aura/src/collator.rs index 1b346a7..29ce01a 100644 --- a/client/consensus/aura/src/collator.rs +++ b/client/consensus/aura/src/collator.rs @@ -23,7 +23,8 @@ use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{ relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, }; -use cumulus_primitives_parachain_inherent::ParachainInherentData; + +use cumulus_client_parachain_inherent::{ParachainInherentData, ParachainInherentDataProvider}; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::{Collation, MaybeCompressedPoV}; @@ -120,7 +121,7 @@ where timestamp: impl Into>, order_record: Arc>>, ) -> Result<(ParachainInherentData, InherentData), Box> { - let paras_inherent_data = ParachainInherentData::create_at( + let paras_inherent_data = ParachainInherentDataProvider::create_at( relay_parent, &self.relay_client, validation_data, @@ -198,7 +199,7 @@ where let mut digest = additional_pre_digest.into().unwrap_or_default(); digest.push(slot_claim.pre_digest.clone()); - let proposal = self + let maybe_proposal = self .proposer .propose( &parent_header, @@ -211,6 +212,15 @@ where .await .map_err(|e| Box::new(e) as Box)?; + let proposal = match maybe_proposal { + None => { + return Err( + Box::::from("None proposal") as Box + ) + }, //Ok(None), + Some(p) => p, + }; + let sealed_importable = seal::<_, P>( proposal.block, proposal.storage_changes, diff --git a/node/Cargo.toml b/node/Cargo.toml index 8ca51f8..eb21402 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parachain-magnet-node" -version = "0.5.1" +version = "0.5.6" authors = ["Magnet"] description = "A scalable evm smart contract platform node, utilizing DOT as the gas fee." license = "Apache License 2.0" @@ -155,7 +155,4 @@ try-runtime = [ "polkadot-cli/try-runtime", "sp-runtime/try-runtime", ] -network-protocol-staging = [ - "cumulus-client-service/network-protocol-staging", - "polkadot-cli/network-protocol-staging", -] + diff --git a/node/src/chain_spec.rs b/node/src/chain_spec.rs index 6deea15..6d220cd 100644 --- a/node/src/chain_spec.rs +++ b/node/src/chain_spec.rs @@ -70,53 +70,47 @@ pub fn development_config() -> ChainSpec { properties.insert("tokenDecimals".into(), 18.into()); properties.insert("ss58Format".into(), 42.into()); - ChainSpec::from_genesis( - // Name - "Development", - // ID - "dev", - ChainType::Development, - move || { - testnet_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - get_account_id_from_seed::("Alice"), - 1000.into(), - ) - }, - Vec::new(), - None, - None, - None, - None, + ChainSpec::builder( + parachain_magnet_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { - relay_chain: "rococo-local".into(), // You MUST set this to the correct network! + relay_chain: "rococo-local".into(), + // You MUST set this to the correct network! para_id: 1000, }, ) + .with_name("Development") + .with_id("dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(testnet_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + get_account_id_from_seed::("Alice"), + 1000.into(), + )) + .build() } pub fn local_testnet_config() -> ChainSpec { @@ -126,59 +120,50 @@ pub fn local_testnet_config() -> ChainSpec { properties.insert("tokenDecimals".into(), 18.into()); properties.insert("ss58Format".into(), 42.into()); - ChainSpec::from_genesis( - // Name - "Local Testnet", - // ID - "local_testnet", - ChainType::Local, - move || { - testnet_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - get_account_id_from_seed::("Alice"), - 1000.into(), - ) - }, - // Bootnodes - Vec::new(), - // Telemetry - None, - // Protocol ID - Some("magnet-local"), - // Fork ID - None, - // Properties - Some(properties), - // Extensions + #[allow(deprecated)] + ChainSpec::builder( + parachain_magnet_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { - relay_chain: "rococo-local".into(), // You MUST set this to the correct network! + relay_chain: "rococo-local".into(), + // You MUST set this to the correct network! para_id: 1000, }, ) + .with_name("Local Testnet") + .with_id("local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(testnet_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + get_account_id_from_seed::("Alice"), + 1000.into(), + )) + .with_protocol_id("magnet-local") + .with_properties(properties) + .build() } fn testnet_genesis( @@ -186,58 +171,94 @@ fn testnet_genesis( endowed_accounts: Vec, root: AccountId, id: ParaId, -) -> parachain_magnet_runtime::RuntimeGenesisConfig { +) -> serde_json::Value { let alice = get_from_seed::("Alice"); let bob = get_from_seed::("Bob"); - parachain_magnet_runtime::RuntimeGenesisConfig { - system: parachain_magnet_runtime::SystemConfig { - code: parachain_magnet_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - ..Default::default() - }, - balances: parachain_magnet_runtime::BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 81)).collect(), + let evm_accounts = { + let mut map = BTreeMap::new(); + map.insert( + // H160 address of Alice dev account + // Derived from SS58 (42 prefix) address + // SS58: 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY + // hex: 0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d + // Using the full hex key, truncating to the first 20 bytes (the first 40 hex + // chars) + H160::from_str("d43593c715fdd31c61141abd04a99fd6822c8558") + .expect("internal H160 is valid; qed"), + fp_evm::GenesisAccount { + balance: U256::from_str("0xffffffffffffffffffffffffffffffff") + .expect("internal U256 is valid; qed"), + code: Default::default(), + nonce: Default::default(), + storage: Default::default(), + }, + ); + map.insert( + // H160 address of CI test runner account + H160::from_str("6be02d1d3665660d22ff9624b7be0551ee1ac91b") + .expect("internal H160 is valid; qed"), + fp_evm::GenesisAccount { + balance: U256::from_str("0xffffffffffffffffffffffffffffffff") + .expect("internal U256 is valid; qed"), + code: Default::default(), + nonce: Default::default(), + storage: Default::default(), + }, + ); + map.insert( + // H160 address for benchmark usage + H160::from_str("1000000000000000000000000000000000000001") + .expect("internal H160 is valid; qed"), + fp_evm::GenesisAccount { + nonce: U256::from(1), + balance: U256::from(1_000_000_000_000_000_000_000_000u128), + storage: Default::default(), + code: vec![0x00], + }, + ); + map + }; + + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u128 << 81)).collect::>(), }, - assets: parachain_magnet_runtime::AssetsConfig { - assets: vec![ - (1, alice.into(), true, 1_000_000_0000_0000_0000), - (2, bob.into(), true, 2_000_000_0000_0000_0000), + "assets": { + "assets": vec![ + (1, alice, true, 1_000_000_0000_0000_0000u128), + (2, bob, true, 2_000_000_0000_0000_0000u128), ], // Genesis metadata: Vec<(id, name, symbol, decimals)> - metadata: vec![ - (1, "asset-1".into(), "ALT1".into(), 18), - (2, "asset-2".into(), "ALT2".into(), 18), + "metadata": vec![ + (1, "asset-1", "ALT1", 18), + (2, "asset-2", "ALT2", 18), ], // Genesis accounts: Vec<(id, account_id, balance)> - accounts: vec![ - (1, alice.into(), 500_000_000_0000_0000_0000), - (2, bob.into(), 500_000_000_0000_0000_0000), + "accounts": vec![ + (1, alice, 500_000_000_0000_0000_0000u128), + (2, bob, 500_000_000_0000_0000_0000u128), ], }, - assets_bridge: parachain_magnet_runtime::AssetsBridgeConfig { - admin_key: Some(root.clone()), + "assets_bridge": { + "admin_key": Some(root.clone()), }, - council: parachain_magnet_runtime::CouncilConfig { - phantom: PhantomData, - members: endowed_accounts + "council": { + "members": endowed_accounts .iter() .enumerate() .filter_map(|(idx, acc)| if idx % 2 == 0 { Some(acc.clone()) } else { None }) .collect::>(), }, - parachain_info: parachain_magnet_runtime::ParachainInfoConfig { - parachain_id: id, - ..Default::default() + "parachain_info": { + "parachain_id": id, }, - collator_selection: parachain_magnet_runtime::CollatorSelectionConfig { - invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), - candidacy_bond: EXISTENTIAL_DEPOSIT * 16, - ..Default::default() + "collator_selection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacy_bond": EXISTENTIAL_DEPOSIT * 16, }, - session: parachain_magnet_runtime::SessionConfig { - keys: invulnerables + "session": { + "keys": invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -246,76 +267,20 @@ fn testnet_genesis( template_session_keys(aura), // session keys ) }) - .collect(), + .collect::>(), }, // no need to pass anything to aura, in fact it will panic if we do. Session will take care // of this. - aura: Default::default(), - aura_ext: Default::default(), - parachain_system: Default::default(), - polkadot_xcm: parachain_magnet_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() + "polkadot_xcm": { + "safe_xcm_version": Some(SAFE_XCM_VERSION), }, - transaction_payment: Default::default(), - sudo: parachain_magnet_runtime::SudoConfig { key: Some(root.clone()) }, + "sudo": { "key": Some(root.clone()) }, // EVM compatibility - evm_chain_id: parachain_magnet_runtime::EVMChainIdConfig { - chain_id: u64::from(u32::from(id)), - ..Default::default() + "evm_chain_id": { + "chain_id": u64::from(u32::from(id)), }, - evm: parachain_magnet_runtime::EVMConfig { - accounts: { - let mut map = BTreeMap::new(); - map.insert( - // H160 address of Alice dev account - // Derived from SS58 (42 prefix) address - // SS58: 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY - // hex: 0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d - // Using the full hex key, truncating to the first 20 bytes (the first 40 hex - // chars) - H160::from_str("d43593c715fdd31c61141abd04a99fd6822c8558") - .expect("internal H160 is valid; qed"), - fp_evm::GenesisAccount { - balance: U256::from_str("0xffffffffffffffffffffffffffffffff") - .expect("internal U256 is valid; qed"), - code: Default::default(), - nonce: Default::default(), - storage: Default::default(), - }, - ); - map.insert( - // H160 address of CI test runner account - H160::from_str("6be02d1d3665660d22ff9624b7be0551ee1ac91b") - .expect("internal H160 is valid; qed"), - fp_evm::GenesisAccount { - balance: U256::from_str("0xffffffffffffffffffffffffffffffff") - .expect("internal U256 is valid; qed"), - code: Default::default(), - nonce: Default::default(), - storage: Default::default(), - }, - ); - map.insert( - // H160 address for benchmark usage - H160::from_str("1000000000000000000000000000000000000001") - .expect("internal H160 is valid; qed"), - fp_evm::GenesisAccount { - nonce: U256::from(1), - balance: U256::from(1_000_000_000_000_000_000_000_000u128), - storage: Default::default(), - code: vec![0x00], - }, - ); - map - }, - ..Default::default() - }, - ethereum: Default::default(), - dynamic_fee: Default::default(), - base_fee: Default::default(), - assurance: Default::default(), - } + "evm": { "accounts": evm_accounts }, + }) } diff --git a/node/src/cli.rs b/node/src/cli.rs index b4dd648..e40f550 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -25,8 +25,11 @@ pub enum Subcommand { /// Remove the whole chain. PurgeChain(cumulus_client_cli::PurgeChainCmd), - /// Export the genesis state of the parachain. - ExportGenesisState(cumulus_client_cli::ExportGenesisStateCommand), + /// Export the genesis head data of the parachain. + /// + /// Head data is the encoded block header. + #[command(alias = "export-genesis-state")] + ExportGenesisHead(cumulus_client_cli::ExportGenesisHeadCommand), /// Export the genesis wasm of the parachain. ExportGenesisWasm(cumulus_client_cli::ExportGenesisWasmCommand), diff --git a/node/src/command.rs b/node/src/command.rs index ef67a0a..d6655f1 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -163,12 +163,12 @@ pub fn run() -> Result<()> { cmd.run(config, polkadot_config) }) }, - Some(Subcommand::ExportGenesisState(cmd)) => { + Some(Subcommand::ExportGenesisHead(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| { let partials = new_partial(&config, ð_cfg)?; - cmd.run(&*config.chain_spec, &*partials.client) + cmd.run(partials.client) }) }, Some(Subcommand::ExportGenesisWasm(cmd)) => { diff --git a/node/src/on_demand_order.rs b/node/src/on_demand_order.rs index c8ef467..f9e25f1 100644 --- a/node/src/on_demand_order.rs +++ b/node/src/on_demand_order.rs @@ -16,7 +16,10 @@ use crate::{submit_order::build_rpc_for_submit_order, submit_order::SubmitOrderError}; use codec::{Codec, Decode}; -use cumulus_primitives_core::relay_chain::vstaging::Assignment; + +//use cumulus_primitives_core::relay_chain::vstaging::Assignment; +use runtime_parachains::scheduler::common::Assignment; + use cumulus_primitives_core::{ relay_chain::BlockNumber as RelayBlockNumber, ParaId, PersistedValidationData, }; @@ -97,7 +100,7 @@ async fn start_on_demand( .transpose() .ok()?; if p_active_config.is_some() { - let result = p_active_config.unwrap().on_demand_cores > 0; + let result = p_active_config.unwrap().coretime_cores > 0; //on_demand_cores Some(result) } else { None @@ -363,7 +366,7 @@ where .transpose()?; if let Some(vvs) = on_demand_queue.clone() { for vv in vvs.into_iter() { - if vv.para_id == para_id { + if vv.para_id() == para_id { exist_order = true; break; } diff --git a/node/src/service.rs b/node/src/service.rs index fab36f3..2c83ba5 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -407,10 +407,14 @@ async fn start_node_impl( // Here you can check whether the hardware meets your chains' requirements. Putting a link // in there and swapping out the requirements for your own are probably a good idea. The // requirements for a para-chain are dictated by its relay-chain. - if !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) && validator { - log::warn!( - "⚠️ The hardware does not meet the minimal requirements for role 'Authority'." + match SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) { + Err(err) if validator => { + log::warn!( + "⚠️ The hardware does not meet the minimal requirements {} for role 'Authority'.", + err ); + }, + _ => {}, } if let Some(ref mut telemetry) = telemetry { diff --git a/pallets/assets-bridge/src/mock.rs b/pallets/assets-bridge/src/mock.rs index 42ec5d2..fda35cf 100644 --- a/pallets/assets-bridge/src/mock.rs +++ b/pallets/assets-bridge/src/mock.rs @@ -15,6 +15,7 @@ pub use crate as assets_bridge; pub use assets_bridge::{Config, Error, Event as AssetsBridgeEvent}; use frame_support::{ + derive_impl, pallet_prelude::Weight, parameter_types, traits::{AsEnsureOriginWithArg, ConstU32}, @@ -55,6 +56,7 @@ parameter_types! { pub const SS58Prefix: u8 = 44; } +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -108,7 +110,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = (); type FreezeIdentifier = (); - type MaxHolds = (); + type RuntimeFreezeReason = (); type MaxFreezes = (); } @@ -152,6 +154,10 @@ impl pallet_assets::Config for Test { type CreateOrigin = AsEnsureOriginWithArg>; } +parameter_types! { + pub SuicideQuickClearLimit: u32 = 0; +} + impl pallet_evm::Config for Test { type FeeCalculator = (); type GasWeightMapping = pallet_evm::FixedGasWeightMapping; @@ -173,6 +179,7 @@ impl pallet_evm::Config for Test { type GasLimitPovSizeRatio = GasLimitPovSizeRatio; type Timestamp = Timestamp; type WeightInfo = (); + type SuicideQuickClearLimit = SuicideQuickClearLimit; } impl assets_bridge::Config for Test { diff --git a/pallets/assurance/src/mock.rs b/pallets/assurance/src/mock.rs index 3feb119..2640979 100644 --- a/pallets/assurance/src/mock.rs +++ b/pallets/assurance/src/mock.rs @@ -22,6 +22,7 @@ use cumulus_pallet_parachain_system::{ }; use cumulus_primitives_core::ParaId; use frame_support::{ + derive_impl, dispatch::DispatchClass, parameter_types, traits::{ConstU32, ConstU64, Get}, @@ -74,6 +75,7 @@ impl Get for BlockWeights { pub type AccountId = AccountId32; +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; @@ -113,7 +115,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); - type MaxHolds = (); + type RuntimeFreezeReason = (); } parameter_types! { @@ -144,12 +146,13 @@ impl cumulus_pallet_parachain_system::Config for Test { type OnSystemEvent = (); type SelfParaId = ParachainId; type OutboundXcmpMessageSource = (); - type DmpMessageHandler = (); type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = (); type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = cumulus_pallet_parachain_system::AnyRelayNumber; type ConsensusHook = TestConsensusHook; + type DmpQueue = frame_support::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; + type WeightInfo = (); } std::thread_local! { diff --git a/pallets/evm-utils/src/mock.rs b/pallets/evm-utils/src/mock.rs index 0a26c47..265e58b 100644 --- a/pallets/evm-utils/src/mock.rs +++ b/pallets/evm-utils/src/mock.rs @@ -17,6 +17,7 @@ use super::*; use frame_support::{ + derive_impl, dispatch::DispatchClass, parameter_types, traits::{ConstU32, ConstU64, FindAuthor, Get}, @@ -72,6 +73,7 @@ impl Get for BlockWeights { pub type AccountId = AccountId32; +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; @@ -111,7 +113,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); - type MaxHolds = (); + type RuntimeFreezeReason = (); } parameter_types! { @@ -172,7 +174,7 @@ parameter_types! { pub const GasLimitPovSizeRatio: u64 = BLOCK_GAS_LIMIT.saturating_div(MAX_POV_SIZE); pub WeightPerGas: Weight = Weight::from_parts(20_000, 0); pub MockPrecompiles: MockPrecompileSet = MockPrecompileSet; - //pub SuicideQuickClearLimit: u32 = 0; + pub SuicideQuickClearLimit: u32 = 0; } impl pallet_evm::Config for Test { @@ -194,7 +196,7 @@ impl pallet_evm::Config for Test { type OnCreate = (); type FindAuthor = FindAuthorTruncated; type GasLimitPovSizeRatio = GasLimitPovSizeRatio; - //type SuicideQuickClearLimit = SuicideQuickClearLimit; + type SuicideQuickClearLimit = SuicideQuickClearLimit; type Timestamp = Timestamp; type WeightInfo = (); } diff --git a/pallets/evm/precompile/substrate-utils/src/mock.rs b/pallets/evm/precompile/substrate-utils/src/mock.rs index 1d57777..8a36849 100644 --- a/pallets/evm/precompile/substrate-utils/src/mock.rs +++ b/pallets/evm/precompile/substrate-utils/src/mock.rs @@ -17,6 +17,7 @@ use super::*; use frame_support::{ + derive_impl, dispatch::DispatchClass, parameter_types, traits::{ConstU32, ConstU64, FindAuthor, Get}, @@ -71,6 +72,7 @@ impl Get for BlockWeights { pub type AccountId = AccountId32; +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; @@ -110,7 +112,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); - type MaxHolds = (); + type RuntimeFreezeReason = (); } parameter_types! { @@ -197,7 +199,7 @@ parameter_types! { pub const GasLimitPovSizeRatio: u64 = BLOCK_GAS_LIMIT.saturating_div(MAX_POV_SIZE); pub WeightPerGas: Weight = Weight::from_parts(20_000, 0); pub MockPrecompiles: MockPrecompileSet = MockPrecompileSet::<_>::new(); - //pub SuicideQuickClearLimit: u32 = 0; + pub SuicideQuickClearLimit: u32 = 0; } impl pallet_evm::Config for Test { @@ -219,7 +221,7 @@ impl pallet_evm::Config for Test { type OnCreate = (); type FindAuthor = FindAuthorTruncated; type GasLimitPovSizeRatio = GasLimitPovSizeRatio; - //type SuicideQuickClearLimit = SuicideQuickClearLimit; + type SuicideQuickClearLimit = SuicideQuickClearLimit; type Timestamp = Timestamp; type WeightInfo = (); } diff --git a/pallets/liquidation/src/lib.rs b/pallets/liquidation/src/lib.rs index dbfc77d..a22b8a8 100644 --- a/pallets/liquidation/src/lib.rs +++ b/pallets/liquidation/src/lib.rs @@ -275,7 +275,7 @@ pub mod pallet { continue; } - let transfer_call = pallet_balances::Call::::transfer { + let transfer_call = pallet_balances::Call::::transfer_allow_death { dest: T::Lookup::unlookup(recipient), value: amount.into(), }; diff --git a/pallets/liquidation/src/mock.rs b/pallets/liquidation/src/mock.rs index f9c22d2..bc8404d 100644 --- a/pallets/liquidation/src/mock.rs +++ b/pallets/liquidation/src/mock.rs @@ -1,7 +1,7 @@ pub(crate) use crate as pallet_liquidation; pub(crate) use crate::Event as LiquidationEvent; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, weights::{ constants::ExtrinsicBaseWeight, WeightToFeeCoefficient, WeightToFeeCoefficients, @@ -67,6 +67,7 @@ parameter_types! { pub const OperationAccountName: &'static str = "maintenance"; } +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); @@ -109,7 +110,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = (); type FreezeIdentifier = (); - type MaxHolds = (); + type RuntimeFreezeReason = (); type MaxFreezes = (); } diff --git a/pallets/order/src/mock.rs b/pallets/order/src/mock.rs index ec1047b..26c13be 100644 --- a/pallets/order/src/mock.rs +++ b/pallets/order/src/mock.rs @@ -17,7 +17,7 @@ use crate::{self as order_pallet, OrderGasCost}; use codec::Encode; pub use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{Everything, Hooks}, }; use frame_system as system; @@ -49,6 +49,7 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); @@ -90,7 +91,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); - type MaxHolds = (); + type RuntimeFreezeReason = (); } parameter_types! { pub const SlotWidth: u32 = 2; diff --git a/pallets/pallet-xcm/Cargo.toml b/pallets/pallet-xcm/Cargo.toml deleted file mode 100644 index 527c3b8..0000000 --- a/pallets/pallet-xcm/Cargo.toml +++ /dev/null @@ -1,78 +0,0 @@ -[package] -name = "pallet-xcm" -version = "1.0.0" -description = "A pallet for handling XCM programs." -authors.workspace = true -edition.workspace = true -license.workspace = true - -[dependencies] -bounded-collections = { workspace = true, default-features = false } -codec = { package = "parity-scale-codec", workspace = true, default-features = false, features = ["derive"] } -scale-info = { workspace = true, default-features = false, features = ["derive"] } -serde = { workspace = true, optional = true, features = ["derive"] } -log = { workspace = true, default-features = false } - -frame-support = { workspace = true, features = ["experimental"], default-features = false} -frame-system = { workspace = true, default-features = false} -sp-core = { workspace = true, default-features = false} -sp-io = { workspace = true, default-features = false} -sp-runtime = { workspace = true, default-features = false} -sp-std = { workspace = true, default-features = false} - -xcm = { workspace = true, default-features = false } -xcm-executor = { workspace = true, default-features = false } -xcm-builder = { workspace = true, default-features = false } - -# marked optional, used in benchmarking -frame-benchmarking = { workspace = true, default-features = false} -pallet-balances = { workspace = true, default-features = false} - -[dev-dependencies] -pallet-assets = { workspace = true, default-features = false} -polkadot-runtime-parachains = { workspace = true, default-features = false} -polkadot-parachain-primitives = { workspace = true, default-features = false} - -[features] -default = [ "std" ] -std = [ - "bounded-collections/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-balances/std", - "scale-info/std", - "serde", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", - "pallet-assets/std", - "polkadot-runtime-parachains/std", - "polkadot-parachain-primitives/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-parachains/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "pallet-assets/try-runtime", - "pallet-balances/try-runtime", - "polkadot-runtime-parachains/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/pallets/pallet-xcm/src/asset_transfer.rs b/pallets/pallet-xcm/src/asset_transfer.rs deleted file mode 100644 index a178eeb..0000000 --- a/pallets/pallet-xcm/src/asset_transfer.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//Modified by Alex Wang, 2023/11 - -use frame_support::traits::ContainsPair; -use scale_info::TypeInfo; -use sp_runtime::codec::{Decode, Encode}; -use xcm::prelude::*; -use xcm_executor::traits::TransactAsset; - -/// Errors related to determining asset transfer support. -#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] -pub enum Error { - /// Invalid non-concrete asset. - NotConcrete, - /// Reserve chain could not be determined for assets. - UnknownReserve, -} - -/// Specify which type of asset transfer is required for a particular `(asset, dest)` combination. -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum TransferType { - /// should teleport `asset` to `dest` - Teleport, - /// should reserve-transfer `asset` to `dest`, using local chain as reserve - LocalReserve, - /// should reserve-transfer `asset` to `dest`, using `dest` as reserve - DestinationReserve, - /// should reserve-transfer `asset` to `dest`, using remote chain `MultiLocation` as reserve - RemoteReserve(MultiLocation), -} - -/// A trait for identifying asset transfer type based on `IsTeleporter` and `IsReserve` -/// configurations. -pub trait XcmAssetTransfers { - /// Combinations of (Asset, Location) pairs which we trust as reserves. Meaning - /// reserve-based-transfers are to be used for assets matching this filter. - type IsReserve: ContainsPair; - - /// Combinations of (Asset, Location) pairs which we trust as teleporters. Meaning teleports are - /// to be used for assets matching this filter. - type IsTeleporter: ContainsPair; - - /// How to withdraw and deposit an asset. - type AssetTransactor: TransactAsset; - - /// Determine transfer type to be used for transferring `asset` from local chain to `dest`. - fn determine_for(asset: &MultiAsset, dest: &MultiLocation) -> Result { - if Self::IsTeleporter::contains(asset, dest) { - // we trust destination for teleporting asset - return Ok(TransferType::Teleport); - } else if Self::IsReserve::contains(asset, dest) { - // we trust destination as asset reserve location - return Ok(TransferType::DestinationReserve); - } - - // try to determine reserve location based on asset id/location - let asset_location = match asset.id { - Concrete(location) => Ok(Self::chain_location(&location)), - _ => Err(Error::NotConcrete), - }?; - if asset_location == MultiLocation::here() - || Self::IsTeleporter::contains(asset, &asset_location) - { - // if the asset is local, then it's a local reserve - // it's also a local reserve if the asset's location is not `here` but it's a location - // where it can be teleported to `here` => local reserve - Ok(TransferType::LocalReserve) - } else if Self::IsReserve::contains(asset, &asset_location) { - // remote location that is recognized as reserve location for asset - Ok(TransferType::RemoteReserve(asset_location)) - } else { - // remote location that is not configured either as teleporter or reserve => cannot - // determine asset reserve - Err(Error::UnknownReserve) - } - } - - fn chain_location(slf: &MultiLocation) -> MultiLocation { - let mut clone = *slf; - // start popping junctions until we reach chain identifier - while let Some(j) = clone.last() { - if matches!(j, Junction::Parachain(_) | Junction::GlobalConsensus(_)) { - // return chain subsection - return clone; - } else { - (clone, _) = clone.split_last_interior(); - } - } - MultiLocation::new(clone.parents, Junctions::Here) - } -} diff --git a/pallets/pallet-xcm/src/benchmarking.rs b/pallets/pallet-xcm/src/benchmarking.rs deleted file mode 100644 index 3aca247..0000000 --- a/pallets/pallet-xcm/src/benchmarking.rs +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use super::*; -use bounded_collections::{ConstU32, WeakBoundedVec}; -use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; -use frame_support::{traits::Currency, weights::Weight}; -use frame_system::RawOrigin; -use sp_std::prelude::*; -use xcm::{latest::prelude::*, v2}; - -type RuntimeOrigin = ::RuntimeOrigin; - -// existential deposit multiplier -const ED_MULTIPLIER: u32 = 100; - -/// Pallet we're benchmarking here. -pub struct Pallet(crate::Pallet); - -/// Trait that must be implemented by runtime to be able to benchmark pallet properly. -pub trait Config: crate::Config { - /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. - /// - /// If `None`, the benchmarks that depend on a reachable destination will be skipped. - fn reachable_dest() -> Option { - None - } - - /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be - /// teleported to. Used only in benchmarks. - /// - /// Implementation should also make sure `dest` is reachable/connected. - /// - /// If `None`, the benchmarks that depend on this will be skipped. - fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - None - } - - /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be - /// reserve-transferred to. Used only in benchmarks. - /// - /// Implementation should also make sure `dest` is reachable/connected. - /// - /// If `None`, the benchmarks that depend on this will be skipped. - fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - None - } -} - -benchmarks! { - where_clause { - where - T: pallet_balances::Config, - ::Balance: From + Into, - } - send { - let send_origin = - T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - if T::SendXcmOrigin::try_origin(send_origin.clone()).is_err() { - return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) - } - let msg = Xcm(vec![ClearOrigin]); - let versioned_dest: VersionedMultiLocation = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); - let versioned_msg = VersionedXcm::from(msg); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) - - teleport_assets { - let (asset, destination) = T::teleportable_asset_and_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )?; - - let transferred_amount = match &asset.fun { - Fungible(amount) => *amount, - _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), - }.into(); - let assets: MultiAssets = asset.into(); - - let existential_deposit = T::ExistentialDeposit::get(); - let caller = whitelisted_caller(); - - // Give some multiple of the existential deposit - let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - assert!(balance >= transferred_amount); - let _ = as Currency<_>>::make_free_balance_be(&caller, balance); - // verify initial balance - assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); - - let send_origin = RawOrigin::Signed(caller.clone()); - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) - .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmTeleportFilter::contains(&(origin_location, assets.clone().into_inner())) { - return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) - } - - let recipient = [0u8; 32]; - let versioned_dest: VersionedMultiLocation = destination.into(); - let versioned_beneficiary: VersionedMultiLocation = - AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = assets.into(); - }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) - verify { - // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) - assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); - } - - reserve_transfer_assets { - let (asset, destination) = T::reserve_transferable_asset_and_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )?; - - let transferred_amount = match &asset.fun { - Fungible(amount) => *amount, - _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), - }.into(); - let assets: MultiAssets = asset.into(); - - let existential_deposit = T::ExistentialDeposit::get(); - let caller = whitelisted_caller(); - - // Give some multiple of the existential deposit - let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - assert!(balance >= transferred_amount); - let _ = as Currency<_>>::make_free_balance_be(&caller, balance); - // verify initial balance - assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); - - let send_origin = RawOrigin::Signed(caller.clone()); - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) - .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmReserveTransferFilter::contains(&(origin_location, assets.clone().into_inner())) { - return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) - } - - let recipient = [0u8; 32]; - let versioned_dest: VersionedMultiLocation = destination.into(); - let versioned_beneficiary: VersionedMultiLocation = - AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = assets.into(); - }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) - verify { - // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) - assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); - } - - execute { - let execute_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(execute_origin.clone()) - .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - let msg = Xcm(vec![ClearOrigin]); - if !T::XcmExecuteFilter::contains(&(origin_location, msg.clone())) { - return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) - } - let versioned_msg = VersionedXcm::from(msg); - }: _>(execute_origin, Box::new(versioned_msg), Weight::zero()) - - force_xcm_version { - let loc = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )?; - let xcm_version = 2; - }: _(RawOrigin::Root, Box::new(loc), xcm_version) - - force_default_xcm_version {}: _(RawOrigin::Root, Some(2)) - - force_subscribe_version_notify { - let versioned_loc: VersionedMultiLocation = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); - }: _(RawOrigin::Root, Box::new(versioned_loc)) - - force_unsubscribe_version_notify { - let loc = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )?; - let versioned_loc: VersionedMultiLocation = loc.into(); - let _ = crate::Pallet::::request_version_notify(loc); - }: _(RawOrigin::Root, Box::new(versioned_loc)) - - force_suspension {}: _(RawOrigin::Root, true) - - migrate_supported_version { - let old_version = XCM_VERSION - 1; - let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); - SupportedVersion::::insert(old_version, loc, old_version); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); - } - - migrate_version_notifiers { - let old_version = XCM_VERSION - 1; - let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); - VersionNotifiers::::insert(old_version, loc, 0); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); - } - - already_notified_target { - let loc = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads(1))), - )?; - let loc = VersionedMultiLocation::from(loc); - let current_version = T::AdvertisedXcmVersion::get(); - VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), current_version)); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); - } - - notify_current_targets { - let loc = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), - )?; - let loc = VersionedMultiLocation::from(loc); - let current_version = T::AdvertisedXcmVersion::get(); - let old_version = current_version - 1; - VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), old_version)); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); - } - - notify_target_migration_fail { - let bad_loc: v2::MultiLocation = v2::Junction::Plurality { - id: v2::BodyId::Named(WeakBoundedVec::>::try_from(vec![0; 32]) - .expect("vec has a length of 32 bits; qed")), - part: v2::BodyPart::Voice, - } - .into(); - let bad_loc = VersionedMultiLocation::from(bad_loc); - let current_version = T::AdvertisedXcmVersion::get(); - VersionNotifyTargets::::insert(current_version, bad_loc, (0, Weight::zero(), current_version)); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); - } - - migrate_version_notify_targets { - let current_version = T::AdvertisedXcmVersion::get(); - let old_version = current_version - 1; - let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); - VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), current_version)); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); - } - - migrate_and_notify_old_targets { - let loc = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), - )?; - let loc = VersionedMultiLocation::from(loc); - let old_version = T::AdvertisedXcmVersion::get() - 1; - VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), old_version)); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); - } - - new_query { - let responder = MultiLocation::from(Parent); - let timeout = 1u32.into(); - let match_querier = MultiLocation::from(Here); - }: { - crate::Pallet::::new_query(responder, timeout, match_querier); - } - - take_response { - let responder = MultiLocation::from(Parent); - let timeout = 1u32.into(); - let match_querier = MultiLocation::from(Here); - let query_id = crate::Pallet::::new_query(responder, timeout, match_querier); - let infos = (0 .. xcm::v3::MaxPalletsInfo::get()).map(|_| PalletInfo::new( - u32::MAX, - (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), - (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), - u32::MAX, - u32::MAX, - u32::MAX, - ).unwrap()).collect::>(); - crate::Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); - - }: { - as QueryHandler>::take_response(query_id); - } - - impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext_with_balances(Vec::new()), - crate::mock::Test - ); -} diff --git a/pallets/pallet-xcm/src/controller.rs b/pallets/pallet-xcm/src/controller.rs deleted file mode 100644 index c9545b7..0000000 --- a/pallets/pallet-xcm/src/controller.rs +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! A set of traits that define how a pallet interface with XCM. -//! Controller traits defined in this module are high-level traits that will rely on other traits -//! from `xcm-executor` to perform their tasks. - -//Modified by Alex Wang 2023/11 - -use frame_support::pallet_prelude::DispatchError; -use sp_std::boxed::Box; -use xcm::prelude::*; -pub use xcm_executor::traits::QueryHandler; - -/// Umbrella trait for all Controller traits. -pub trait Controller: - ExecuteController + SendController + QueryController -{ -} - -impl Controller for T where - T: ExecuteController - + SendController - + QueryController -{ -} - -/// Weight functions needed for [`ExecuteController`]. -pub trait ExecuteControllerWeightInfo { - /// Weight for [`ExecuteController::execute`] - fn execute() -> Weight; -} - -/// Execute an XCM locally, for a given origin. -/// -/// An implementation of that trait will handle the low-level details of the execution, such as: -/// - Validating and Converting the origin to a MultiLocation. -/// - Handling versioning. -/// - Calling the internal executor, which implements [`ExecuteXcm`]. -pub trait ExecuteController { - /// Weight information for ExecuteController functions. - type WeightInfo: ExecuteControllerWeightInfo; - - /// Attempt to execute an XCM locally, and return the outcome. - /// - /// # Parameters - /// - /// - `origin`: the origin of the call. - /// - `message`: the XCM program to be executed. - /// - `max_weight`: the maximum weight that can be consumed by the execution. - fn execute( - origin: Origin, - message: Box>, - max_weight: Weight, - ) -> Result; -} - -/// Weight functions needed for [`SendController`]. -pub trait SendControllerWeightInfo { - /// Weight for [`SendController::send`] - fn send() -> Weight; -} - -/// Send an XCM from a given origin. -/// -/// An implementation of that trait will handle the low-level details of dispatching an XCM, such -/// as: -/// - Validating and Converting the origin to an interior location. -/// - Handling versioning. -/// - Calling the internal router, which implements [`SendXcm`]. -pub trait SendController { - /// Weight information for SendController functions. - type WeightInfo: SendControllerWeightInfo; - - /// Send an XCM to be executed by a remote location. - /// - /// # Parameters - /// - /// - `origin`: the origin of the call. - /// - `dest`: the destination of the message. - /// - `msg`: the XCM to be sent. - fn send( - origin: Origin, - dest: Box, - message: Box>, - ) -> Result; -} - -/// Weight functions needed for [`QueryController`]. -pub trait QueryControllerWeightInfo { - /// Weight for [`QueryController::query`] - fn query() -> Weight; - - /// Weight for [`QueryHandler::take_response`] - fn take_response() -> Weight; -} - -/// Query a remote location, from a given origin. -/// -/// An implementation of that trait will handle the low-level details of querying a remote location, -/// such as: -/// - Validating and Converting the origin to an interior location. -/// - Handling versioning. -/// - Calling the [`QueryHandler`] to register the query. -pub trait QueryController: QueryHandler { - /// Weight information for QueryController functions. - type WeightInfo: QueryControllerWeightInfo; - - /// Query a remote location. - /// - /// # Parameters - /// - /// - `origin`: the origin of the call, used to determine the responder. - /// - `timeout`: the maximum block number that the query should be responded to. - /// - `match_querier`: the querier that the query should be responded to. - fn query( - origin: Origin, - timeout: Timeout, - match_querier: VersionedMultiLocation, - ) -> Result; -} - -impl ExecuteController for () { - type WeightInfo = (); - fn execute( - _origin: Origin, - _message: Box>, - _max_weight: Weight, - ) -> Result { - Ok(Outcome::Error(XcmError::Unimplemented)) - } -} - -impl ExecuteControllerWeightInfo for () { - fn execute() -> Weight { - Weight::zero() - } -} - -impl SendController for () { - type WeightInfo = (); - fn send( - _origin: Origin, - _dest: Box, - _message: Box>, - ) -> Result { - Ok(Default::default()) - } -} - -impl SendControllerWeightInfo for () { - fn send() -> Weight { - Weight::zero() - } -} - -impl QueryControllerWeightInfo for () { - fn query() -> Weight { - Weight::zero() - } - fn take_response() -> Weight { - Weight::zero() - } -} - -/* -impl QueryController for () { - type WeightInfo = (); - - fn query( - _origin: Origin, - _timeout: Timeout, - _match_querier: VersionedMultiLocation, - ) -> Result { - Ok(Default::default()) - } -} -*/ diff --git a/pallets/pallet-xcm/src/fee_handling.rs b/pallets/pallet-xcm/src/fee_handling.rs deleted file mode 100644 index c158d5d..0000000 --- a/pallets/pallet-xcm/src/fee_handling.rs +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use core::marker::PhantomData; -use frame_support::traits::{Contains, Get}; -use xcm::prelude::*; -use xcm_executor::traits::{FeeManager, FeeReason, TransactAsset}; - -/// Handles the fees that are taken by certain XCM instructions. -pub trait HandleFee { - /// Do something with the fee which has been paid. Doing nothing here silently burns the - /// fees. - /// - /// Returns any part of the fee that wasn't consumed. - fn handle_fee(fee: MultiAssets, context: Option<&XcmContext>, reason: FeeReason) - -> MultiAssets; -} - -// Default `HandleFee` implementation that just burns the fee. -impl HandleFee for () { - fn handle_fee(_: MultiAssets, _: Option<&XcmContext>, _: FeeReason) -> MultiAssets { - MultiAssets::new() - } -} - -#[impl_trait_for_tuples::impl_for_tuples(1, 30)] -impl HandleFee for Tuple { - fn handle_fee( - fee: MultiAssets, - context: Option<&XcmContext>, - reason: FeeReason, - ) -> MultiAssets { - let mut unconsumed_fee = fee; - for_tuples!( #( - unconsumed_fee = Tuple::handle_fee(unconsumed_fee, context, reason); - if unconsumed_fee.is_none() { - return unconsumed_fee; - } - )* ); - - unconsumed_fee - } -} - -/// A `FeeManager` implementation that permits the specified `WaivedLocations` to not pay for fees -/// and that uses the provided `HandleFee` implementation otherwise. -pub struct XcmFeeManagerFromComponents( - PhantomData<(WaivedLocations, HandleFee)>, -); -impl, FeeHandler: HandleFee> FeeManager - for XcmFeeManagerFromComponents -{ - fn is_waived(origin: Option<&MultiLocation>, _: FeeReason) -> bool { - let Some(loc) = origin else { return false }; - WaivedLocations::contains(loc) - } - - fn handle_fee(fee: MultiAssets, context: Option<&XcmContext>, reason: FeeReason) { - FeeHandler::handle_fee(fee, context, reason); - } -} - -/// Try to deposit the given fee in the specified account. -/// Burns the fee in case of a failure. -pub fn deposit_or_burn_fee>( - fee: MultiAssets, - context: Option<&XcmContext>, - receiver: AccountId, -) { - let dest = AccountId32 { network: None, id: receiver.into() }.into(); - for asset in fee.into_inner() { - if let Err(e) = AssetTransactor::deposit_asset(&asset, &dest, context) { - log::trace!( - target: "xcm::fees", - "`AssetTransactor::deposit_asset` returned error: {:?}. Burning fee: {:?}. \ - They might be burned.", - e, asset, - ); - } - } -} - -/// A `HandleFee` implementation that simply deposits the fees into a specific on-chain -/// `ReceiverAccount`. -/// -/// It reuses the `AssetTransactor` configured on the XCM executor to deposit fee assets. If -/// the `AssetTransactor` returns an error while calling `deposit_asset`, then a warning will be -/// logged and the fee burned. -pub struct XcmFeeToAccount( - PhantomData<(AssetTransactor, AccountId, ReceiverAccount)>, -); - -impl< - AssetTransactor: TransactAsset, - AccountId: Clone + Into<[u8; 32]>, - ReceiverAccount: Get, - > HandleFee for XcmFeeToAccount -{ - fn handle_fee( - fee: MultiAssets, - context: Option<&XcmContext>, - _reason: FeeReason, - ) -> MultiAssets { - deposit_or_burn_fee::(fee, context, ReceiverAccount::get()); - - MultiAssets::new() - } -} diff --git a/pallets/pallet-xcm/src/lib.rs b/pallets/pallet-xcm/src/lib.rs deleted file mode 100644 index 43db7fa..0000000 --- a/pallets/pallet-xcm/src/lib.rs +++ /dev/null @@ -1,2761 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Pallet to handle XCM messages. - -//Modified by Alex Wang 2023/11 - -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; -#[cfg(test)] -mod mock; -#[cfg(test)] -mod tests; - -pub mod asset_transfer; -pub mod controller; -pub mod migration; - -use asset_transfer::{Error as AssetTransferError, TransferType, XcmAssetTransfers}; -use controller::{ - ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, - SendController, SendControllerWeightInfo, -}; - -use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::{ - dispatch::GetDispatchInfo, - pallet_prelude::*, - traits::{ - Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, - OriginTrait, WithdrawReasons, - }, - PalletId, -}; -use frame_system::pallet_prelude::{BlockNumberFor, *}; -pub use pallet::*; -use scale_info::TypeInfo; -use sp_runtime::{ - traits::{ - AccountIdConversion, BadOrigin, BlakeTwo256, BlockNumberProvider, Dispatchable, Hash, - Saturating, Zero, - }, - RuntimeDebug, -}; -use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; -use xcm::{latest::QueryResponseInfo, prelude::*}; -use xcm_executor::{ - traits::{ - CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, DropAssets, MatchesFungible, - OnResponse, Properties, QueryHandler, QueryResponseStatus, TransactAsset, - VersionChangeNotifier, WeightBounds, - }, - Assets, -}; - -pub trait WeightInfo { - fn send() -> Weight; - fn teleport_assets() -> Weight; - fn reserve_transfer_assets() -> Weight; - fn execute() -> Weight; - fn force_xcm_version() -> Weight; - fn force_default_xcm_version() -> Weight; - fn force_subscribe_version_notify() -> Weight; - fn force_unsubscribe_version_notify() -> Weight; - fn force_suspension() -> Weight; - fn migrate_supported_version() -> Weight; - fn migrate_version_notifiers() -> Weight; - fn already_notified_target() -> Weight; - fn notify_current_targets() -> Weight; - fn notify_target_migration_fail() -> Weight; - fn migrate_version_notify_targets() -> Weight; - fn migrate_and_notify_old_targets() -> Weight; - fn new_query() -> Weight; - fn take_response() -> Weight; -} - -/// fallback implementation -pub struct TestWeightInfo; -impl WeightInfo for TestWeightInfo { - fn send() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn teleport_assets() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn reserve_transfer_assets() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn execute() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn force_xcm_version() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn force_default_xcm_version() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn force_subscribe_version_notify() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn force_unsubscribe_version_notify() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn force_suspension() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn migrate_supported_version() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn migrate_version_notifiers() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn already_notified_target() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn notify_current_targets() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn notify_target_migration_fail() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn migrate_version_notify_targets() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn migrate_and_notify_old_targets() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn new_query() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn take_response() -> Weight { - Weight::from_parts(100_000_000, 0) - } -} - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::{ - dispatch::{GetDispatchInfo, PostDispatchInfo}, - parameter_types, - }; - use frame_system::Config as SysConfig; - use sp_core::H256; - use sp_runtime::traits::Dispatchable; - use xcm_executor::traits::{MatchesFungible, WeightBounds}; - - parameter_types! { - /// An implementation of `Get` which just returns the latest XCM version which we can - /// support. - pub const CurrentXcmVersion: u32 = XCM_VERSION; - } - - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); - - #[pallet::pallet] - #[pallet::storage_version(STORAGE_VERSION)] - #[pallet::without_storage_info] - pub struct Pallet(_); - - pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - - #[pallet::config] - /// The module configuration trait. - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - /// A lockable currency. - // TODO: We should really use a trait which can handle multiple currencies. - type Currency: LockableCurrency>; - - /// The `MultiAsset` matcher for `Currency`. - type CurrencyMatcher: MatchesFungible>; - - /// Required origin for sending XCM messages. If successful, it resolves to `MultiLocation` - /// which exists as an interior location within this chain's XCM context. - type SendXcmOrigin: EnsureOrigin< - ::RuntimeOrigin, - Success = MultiLocation, - >; - - /// The type used to actually dispatch an XCM to its destination. - type XcmRouter: SendXcm; - - /// Required origin for executing XCM messages, including the teleport functionality. If - /// successful, then it resolves to `MultiLocation` which exists as an interior location - /// within this chain's XCM context. - type ExecuteXcmOrigin: EnsureOrigin< - ::RuntimeOrigin, - Success = MultiLocation, - >; - - /// Our XCM filter which messages to be executed using `XcmExecutor` must pass. - type XcmExecuteFilter: Contains<(MultiLocation, Xcm<::RuntimeCall>)>; - - type XcmExecutorConfig: xcm_executor::Config; - - /// Something to execute an XCM message. - type XcmExecutor: ExecuteXcm<::RuntimeCall>; - - /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. - type XcmTeleportFilter: Contains<(MultiLocation, Vec)>; - - /// Our XCM filter which messages to be reserve-transferred using the dedicated extrinsic - /// must pass. - type XcmReserveTransferFilter: Contains<(MultiLocation, Vec)>; - - /// Means of measuring the weight consumed by an XCM message locally. - type Weigher: WeightBounds<::RuntimeCall>; - - /// This chain's Universal Location. - type UniversalLocation: Get; - - /// The runtime `Origin` type. - type RuntimeOrigin: From + From<::RuntimeOrigin>; - - /// The runtime `Call` type. - type RuntimeCall: Parameter - + GetDispatchInfo - + Dispatchable< - RuntimeOrigin = ::RuntimeOrigin, - PostInfo = PostDispatchInfo, - >; - - const VERSION_DISCOVERY_QUEUE_SIZE: u32; - - /// The latest supported version that we advertise. Generally just set it to - /// `pallet_xcm::CurrentXcmVersion`. - type AdvertisedXcmVersion: Get; - - /// The origin that is allowed to call privileged operations on the XCM pallet - type AdminOrigin: EnsureOrigin<::RuntimeOrigin>; - - /// The assets which we consider a given origin is trusted if they claim to have placed a - /// lock. - type TrustedLockers: ContainsPair; - - /// How to get an `AccountId` value from a `MultiLocation`, useful for handling asset locks. - type SovereignAccountOf: ConvertLocation; - - /// The maximum number of local XCM locks that a single account may have. - type MaxLockers: Get; - - /// The maximum number of consumers a single remote lock may have. - type MaxRemoteLockConsumers: Get; - - /// The ID type for local consumers of remote locks. - type RemoteLockConsumerIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; - } - //< - impl XcmAssetTransfers for Pallet { - type IsReserve = ::IsReserve; - type IsTeleporter = ::IsTeleporter; - type AssetTransactor = ::AssetTransactor; - } - - impl ExecuteControllerWeightInfo for Pallet { - fn execute() -> Weight { - T::WeightInfo::execute() - } - } - - impl ExecuteController, ::RuntimeCall> for Pallet { - type WeightInfo = Self; - fn execute( - origin: OriginFor, - message: Box::RuntimeCall>>, - max_weight: Weight, - ) -> Result { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; - let value = (origin_location, message); - ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); - let (origin_location, message) = value; - let outcome = T::XcmExecutor::execute_xcm_in_credit( - origin_location, - message, - hash, - max_weight, - max_weight, - ); - Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); - Ok(outcome) - } - } - - impl SendControllerWeightInfo for Pallet { - fn send() -> Weight { - T::WeightInfo::send() - } - } - - impl SendController> for Pallet { - type WeightInfo = Self; - fn send( - origin: OriginFor, - dest: Box, - message: Box>, - ) -> Result { - let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - let interior: Junctions = - origin_location.try_into().map_err(|_| Error::::InvalidOrigin)?; - let dest = MultiLocation::try_from(*dest).map_err(|()| Error::::BadVersion)?; - let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; - - let message_id = - Self::send_xcm(interior, dest, message.clone()).map_err(Error::::from)?; - let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; - Self::deposit_event(e); - Ok(message_id) - } - } - - impl QueryControllerWeightInfo for Pallet { - fn query() -> Weight { - T::WeightInfo::new_query() - } - fn take_response() -> Weight { - T::WeightInfo::take_response() - } - } - - impl QueryController, BlockNumberFor> for Pallet { - type WeightInfo = Self; - - fn query( - origin: OriginFor, - timeout: BlockNumberFor, - match_querier: VersionedMultiLocation, - ) -> Result { - let responder = ::ExecuteXcmOrigin::ensure_origin(origin)?; - let query_id = ::new_query( - responder, - timeout, - MultiLocation::try_from(match_querier) - .map_err(|_| Into::::into(Error::::BadVersion))?, - ); - - Ok(query_id) - } - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// Execution of an XCM message was attempted. - Attempted { outcome: xcm::latest::Outcome }, - /// A XCM message was sent. - Sent { - origin: MultiLocation, - destination: MultiLocation, - message: Xcm<()>, - message_id: XcmHash, - }, - /// Query response received which does not match a registered query. This may be because a - /// matching query was never registered, it may be because it is a duplicate response, or - /// because the query timed out. - UnexpectedResponse { origin: MultiLocation, query_id: QueryId }, - /// Query response has been received and is ready for taking with `take_response`. There is - /// no registered notification call. - ResponseReady { query_id: QueryId, response: Response }, - /// Query response has been received and query is removed. The registered notification has - /// been dispatched and executed successfully. - Notified { query_id: QueryId, pallet_index: u8, call_index: u8 }, - /// Query response has been received and query is removed. The registered notification - /// could not be dispatched because the dispatch weight is greater than the maximum weight - /// originally budgeted by this runtime for the query result. - NotifyOverweight { - query_id: QueryId, - pallet_index: u8, - call_index: u8, - actual_weight: Weight, - max_budgeted_weight: Weight, - }, - /// Query response has been received and query is removed. There was a general error with - /// dispatching the notification call. - NotifyDispatchError { query_id: QueryId, pallet_index: u8, call_index: u8 }, - /// Query response has been received and query is removed. The dispatch was unable to be - /// decoded into a `Call`; this might be due to dispatch function having a signature which - /// is not `(origin, QueryId, Response)`. - NotifyDecodeFailed { query_id: QueryId, pallet_index: u8, call_index: u8 }, - /// Expected query response has been received but the origin location of the response does - /// not match that expected. The query remains registered for a later, valid, response to - /// be received and acted upon. - InvalidResponder { - origin: MultiLocation, - query_id: QueryId, - expected_location: Option, - }, - /// Expected query response has been received but the expected origin location placed in - /// storage by this runtime previously cannot be decoded. The query remains registered. - /// - /// This is unexpected (since a location placed in storage in a previously executing - /// runtime should be readable prior to query timeout) and dangerous since the possibly - /// valid response will be dropped. Manual governance intervention is probably going to be - /// needed. - InvalidResponderVersion { origin: MultiLocation, query_id: QueryId }, - /// Received query response has been read and removed. - ResponseTaken { query_id: QueryId }, - /// Some assets have been placed in an asset trap. - AssetsTrapped { hash: H256, origin: MultiLocation, assets: VersionedMultiAssets }, - /// An XCM version change notification message has been attempted to be sent. - /// - /// The cost of sending it (borne by the chain) is included. - VersionChangeNotified { - destination: MultiLocation, - result: XcmVersion, - cost: MultiAssets, - message_id: XcmHash, - }, - /// The supported version of a location has been changed. This might be through an - /// automatic notification or a manual intervention. - SupportedVersionChanged { location: MultiLocation, version: XcmVersion }, - /// A given location which had a version change subscription was dropped owing to an error - /// sending the notification to it. - NotifyTargetSendFail { location: MultiLocation, query_id: QueryId, error: XcmError }, - /// A given location which had a version change subscription was dropped owing to an error - /// migrating the location to our new XCM format. - NotifyTargetMigrationFail { location: VersionedMultiLocation, query_id: QueryId }, - /// Expected query response has been received but the expected querier location placed in - /// storage by this runtime previously cannot be decoded. The query remains registered. - /// - /// This is unexpected (since a location placed in storage in a previously executing - /// runtime should be readable prior to query timeout) and dangerous since the possibly - /// valid response will be dropped. Manual governance intervention is probably going to be - /// needed. - InvalidQuerierVersion { origin: MultiLocation, query_id: QueryId }, - /// Expected query response has been received but the querier location of the response does - /// not match the expected. The query remains registered for a later, valid, response to - /// be received and acted upon. - InvalidQuerier { - origin: MultiLocation, - query_id: QueryId, - expected_querier: MultiLocation, - maybe_actual_querier: Option, - }, - /// A remote has requested XCM version change notification from us and we have honored it. - /// A version information message is sent to them and its cost is included. - VersionNotifyStarted { destination: MultiLocation, cost: MultiAssets, message_id: XcmHash }, - /// We have requested that a remote chain send us XCM version change notifications. - VersionNotifyRequested { - destination: MultiLocation, - cost: MultiAssets, - message_id: XcmHash, - }, - /// We have requested that a remote chain stops sending us XCM version change - /// notifications. - VersionNotifyUnrequested { - destination: MultiLocation, - cost: MultiAssets, - message_id: XcmHash, - }, - /// Fees were paid from a location for an operation (often for using `SendXcm`). - FeesPaid { paying: MultiLocation, fees: MultiAssets }, - /// Some assets have been claimed from an asset trap - AssetsClaimed { hash: H256, origin: MultiLocation, assets: VersionedMultiAssets }, - } - - #[pallet::origin] - #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] - pub enum Origin { - /// It comes from somewhere in the XCM space wanting to transact. - Xcm(MultiLocation), - /// It comes as an expected response from an XCM location. - Response(MultiLocation), - } - impl From for Origin { - fn from(location: MultiLocation) -> Origin { - Origin::Xcm(location) - } - } - - #[pallet::error] - pub enum Error { - /// The desired destination was unreachable, generally because there is a no way of routing - /// to it. - Unreachable, - /// There was some other issue (i.e. not to do with routing) in sending the message. - /// Perhaps a lack of space for buffering the message. - SendFailure, - /// The message execution fails the filter. - Filtered, - /// The message's weight could not be determined. - UnweighableMessage, - /// The destination `MultiLocation` provided cannot be inverted. - DestinationNotInvertible, - /// The assets to be sent are empty. - Empty, - /// Could not re-anchor the assets to declare the fees for the destination chain. - CannotReanchor, - /// Too many assets have been attempted for transfer. - TooManyAssets, - /// Origin is invalid for sending. - InvalidOrigin, - /// The version of the `Versioned` value used is not able to be interpreted. - BadVersion, - /// The given location could not be used (e.g. because it cannot be expressed in the - /// desired version of XCM). - BadLocation, - /// The referenced subscription could not be found. - NoSubscription, - /// The location is invalid since it already has a subscription from us. - AlreadySubscribed, - /// Could not check-out the assets for teleportation to the destination chain. - CannotCheckOutTeleport, - /// The owner does not own (all) of the asset that they wish to do the operation on. - LowBalance, - /// The asset owner has too many locks on the asset. - TooManyLocks, - /// The given account is not an identifiable sovereign account for any location. - AccountNotSovereign, - /// The operation required fees to be paid which the initiator could not meet. - FeesNotMet, - /// A remote lock with the corresponding data could not be found. - LockNotFound, - /// The unlock operation cannot succeed because there are still consumers of the lock. - InUse, - /// Invalid non-concrete asset. - InvalidAssetNotConcrete, - /// Invalid asset, reserve chain could not be determined for it. - InvalidAssetUnknownReserve, - /// Invalid asset, do not support remote asset reserves with different fees reserves. - InvalidAssetUnsupportedReserve, - /// Too many assets with different reserve locations have been attempted for transfer. - TooManyReserves, - /// Local XCM execution of asset transfer incomplete. - LocalExecutionIncomplete, - } - - impl From for Error { - fn from(e: SendError) -> Self { - match e { - SendError::Fees => Error::::FeesNotMet, - SendError::NotApplicable => Error::::Unreachable, - _ => Error::::SendFailure, - } - } - } - - impl From for Error { - fn from(e: AssetTransferError) -> Self { - match e { - AssetTransferError::NotConcrete => Error::::InvalidAssetNotConcrete, - AssetTransferError::UnknownReserve => Error::::InvalidAssetUnknownReserve, - } - } - } - - /// The status of a query. - #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] - pub enum QueryStatus { - /// The query was sent but no response has yet been received. - Pending { - /// The `QueryResponse` XCM must have this origin to be considered a reply for this - /// query. - responder: VersionedMultiLocation, - /// The `QueryResponse` XCM must have this value as the `querier` field to be - /// considered a reply for this query. If `None` then the querier is ignored. - maybe_match_querier: Option, - maybe_notify: Option<(u8, u8)>, - timeout: BlockNumber, - }, - /// The query is for an ongoing version notification subscription. - VersionNotifier { origin: VersionedMultiLocation, is_active: bool }, - /// A response has been received. - Ready { response: VersionedResponse, at: BlockNumber }, - } - - #[derive(Copy, Clone)] - pub(crate) struct LatestVersionedMultiLocation<'a>(pub(crate) &'a MultiLocation); - impl<'a> EncodeLike for LatestVersionedMultiLocation<'a> {} - impl<'a> Encode for LatestVersionedMultiLocation<'a> { - fn encode(&self) -> Vec { - let mut r = VersionedMultiLocation::from(MultiLocation::default()).encode(); - r.truncate(1); - self.0.using_encoded(|d| r.extend_from_slice(d)); - r - } - } - - #[derive(Clone, Encode, Decode, Eq, PartialEq, Ord, PartialOrd, TypeInfo)] - pub enum VersionMigrationStage { - MigrateSupportedVersion, - MigrateVersionNotifiers, - NotifyCurrentTargets(Option>), - MigrateAndNotifyOldTargets, - } - - impl Default for VersionMigrationStage { - fn default() -> Self { - Self::MigrateSupportedVersion - } - } - - /// The latest available query index. - #[pallet::storage] - pub(super) type QueryCounter = StorageValue<_, QueryId, ValueQuery>; - - /// The ongoing queries. - #[pallet::storage] - #[pallet::getter(fn query)] - pub(super) type Queries = - StorageMap<_, Blake2_128Concat, QueryId, QueryStatus>, OptionQuery>; - - /// The existing asset traps. - /// - /// Key is the blake2 256 hash of (origin, versioned `MultiAssets`) pair. Value is the number of - /// times this pair has been trapped (usually just 1 if it exists at all). - #[pallet::storage] - #[pallet::getter(fn asset_trap)] - pub(super) type AssetTraps = StorageMap<_, Identity, H256, u32, ValueQuery>; - - /// Default version to encode XCM when latest version of destination is unknown. If `None`, - /// then the destinations whose XCM version is unknown are considered unreachable. - #[pallet::storage] - #[pallet::whitelist_storage] - pub(super) type SafeXcmVersion = StorageValue<_, XcmVersion, OptionQuery>; - - /// The Latest versions that we know various locations support. - #[pallet::storage] - pub(super) type SupportedVersion = StorageDoubleMap< - _, - Twox64Concat, - XcmVersion, - Blake2_128Concat, - VersionedMultiLocation, - XcmVersion, - OptionQuery, - >; - - /// All locations that we have requested version notifications from. - #[pallet::storage] - pub(super) type VersionNotifiers = StorageDoubleMap< - _, - Twox64Concat, - XcmVersion, - Blake2_128Concat, - VersionedMultiLocation, - QueryId, - OptionQuery, - >; - - /// The target locations that are subscribed to our version changes, as well as the most recent - /// of our versions we informed them of. - #[pallet::storage] - pub(super) type VersionNotifyTargets = StorageDoubleMap< - _, - Twox64Concat, - XcmVersion, - Blake2_128Concat, - VersionedMultiLocation, - (QueryId, Weight, XcmVersion), - OptionQuery, - >; - - pub struct VersionDiscoveryQueueSize(PhantomData); - impl Get for VersionDiscoveryQueueSize { - fn get() -> u32 { - T::VERSION_DISCOVERY_QUEUE_SIZE - } - } - - /// Destinations whose latest XCM version we would like to know. Duplicates not allowed, and - /// the `u32` counter is the number of times that a send to the destination has been attempted, - /// which is used as a prioritization. - #[pallet::storage] - #[pallet::whitelist_storage] - pub(super) type VersionDiscoveryQueue = StorageValue< - _, - BoundedVec<(VersionedMultiLocation, u32), VersionDiscoveryQueueSize>, - ValueQuery, - >; - - /// The current migration's stage, if any. - #[pallet::storage] - pub(super) type CurrentMigration = - StorageValue<_, VersionMigrationStage, OptionQuery>; - - #[derive(Clone, Encode, Decode, Eq, PartialEq, Ord, PartialOrd, TypeInfo, MaxEncodedLen)] - #[scale_info(skip_type_params(MaxConsumers))] - pub struct RemoteLockedFungibleRecord> { - /// Total amount of the asset held by the remote lock. - pub amount: u128, - /// The owner of the locked asset. - pub owner: VersionedMultiLocation, - /// The location which holds the original lock. - pub locker: VersionedMultiLocation, - /// Local consumers of the remote lock with a consumer identifier and the amount - /// of fungible asset every consumer holds. - /// Every consumer can hold up to total amount of the remote lock. - pub consumers: BoundedVec<(ConsumerIdentifier, u128), MaxConsumers>, - } - - impl> RemoteLockedFungibleRecord { - /// Amount of the remote lock in use by consumers. - /// Returns `None` if the remote lock has no consumers. - pub fn amount_held(&self) -> Option { - self.consumers.iter().max_by(|x, y| x.1.cmp(&y.1)).map(|max| max.1) - } - } - - /// Fungible assets which we know are locked on a remote chain. - #[pallet::storage] - pub(super) type RemoteLockedFungibles = StorageNMap< - _, - ( - NMapKey, - NMapKey, - NMapKey, - ), - RemoteLockedFungibleRecord, - OptionQuery, - >; - - /// Fungible assets which we know are locked on this chain. - #[pallet::storage] - pub(super) type LockedFungibles = StorageMap< - _, - Blake2_128Concat, - T::AccountId, - BoundedVec<(BalanceOf, VersionedMultiLocation), T::MaxLockers>, - OptionQuery, - >; - - /// Global suspension state of the XCM executor. - #[pallet::storage] - pub(super) type XcmExecutionSuspended = StorageValue<_, bool, ValueQuery>; - - #[pallet::genesis_config] - pub struct GenesisConfig { - #[serde(skip)] - pub _config: sp_std::marker::PhantomData, - /// The default version to encode outgoing XCM messages with. - pub safe_xcm_version: Option, - } - - impl Default for GenesisConfig { - fn default() -> Self { - Self { safe_xcm_version: Some(XCM_VERSION), _config: Default::default() } - } - } - - #[pallet::genesis_build] - impl BuildGenesisConfig for GenesisConfig { - fn build(&self) { - SafeXcmVersion::::set(self.safe_xcm_version); - } - } - - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_initialize(_n: BlockNumberFor) -> Weight { - let mut weight_used = Weight::zero(); - if let Some(migration) = CurrentMigration::::get() { - // Consume 10% of block at most - let max_weight = T::BlockWeights::get().max_block / 10; - let (w, maybe_migration) = Self::check_xcm_version_change(migration, max_weight); - CurrentMigration::::set(maybe_migration); - weight_used.saturating_accrue(w); - } - - // Here we aim to get one successful version negotiation request sent per block, ordered - // by the destinations being most sent to. - let mut q = VersionDiscoveryQueue::::take().into_inner(); - // TODO: correct weights. - weight_used.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - q.sort_by_key(|i| i.1); - while let Some((versioned_dest, _)) = q.pop() { - if let Ok(dest) = MultiLocation::try_from(versioned_dest) { - if Self::request_version_notify(dest).is_ok() { - // TODO: correct weights. - weight_used.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - break; - } - } - } - // Should never fail since we only removed items. But better safe than panicking as it's - // way better to drop the queue than panic on initialize. - if let Ok(q) = BoundedVec::try_from(q) { - VersionDiscoveryQueue::::put(q); - } - weight_used - } - } - - pub mod migrations { - use super::*; - use frame_support::traits::{PalletInfoAccess, StorageVersion}; - - #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] - enum QueryStatusV0 { - Pending { - responder: VersionedMultiLocation, - maybe_notify: Option<(u8, u8)>, - timeout: BlockNumber, - }, - VersionNotifier { - origin: VersionedMultiLocation, - is_active: bool, - }, - Ready { - response: VersionedResponse, - at: BlockNumber, - }, - } - impl From> for QueryStatus { - fn from(old: QueryStatusV0) -> Self { - use QueryStatusV0::*; - match old { - Pending { responder, maybe_notify, timeout } => QueryStatus::Pending { - responder, - maybe_notify, - timeout, - maybe_match_querier: Some(MultiLocation::here().into()), - }, - VersionNotifier { origin, is_active } => { - QueryStatus::VersionNotifier { origin, is_active } - }, - Ready { response, at } => QueryStatus::Ready { response, at }, - } - } - } - - pub fn migrate_to_v1( - ) -> frame_support::weights::Weight { - let on_chain_storage_version =

::on_chain_storage_version(); - log::info!( - target: "runtime::xcm", - "Running migration storage v1 for xcm with storage version {:?}", - on_chain_storage_version, - ); - - if on_chain_storage_version < 1 { - let mut count = 0; - Queries::::translate::>, _>(|_key, value| { - count += 1; - Some(value.into()) - }); - StorageVersion::new(1).put::

(); - log::info!( - target: "runtime::xcm", - "Running migration storage v1 for xcm with storage version {:?} was complete", - on_chain_storage_version, - ); - // calculate and return migration weights - T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) - } else { - log::warn!( - target: "runtime::xcm", - "Attempted to apply migration to v1 but failed because storage version is {:?}", - on_chain_storage_version, - ); - T::DbWeight::get().reads(1) - } - } - } - - #[pallet::call] - impl Pallet { - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::send())] - pub fn send( - origin: OriginFor, - dest: Box, - message: Box>, - ) -> DispatchResult { - >::send(origin, dest, message)?; - Ok(()) - } - - /// Teleport some assets from the local chain to some destination chain. - /// - /// **This function is deprecated: Use `limited_teleport_assets` instead.** - /// - /// Fee payment on the destination side is made from the asset in the `assets` vector of - /// index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited, - /// with all fees taken as needed from the asset. - /// - /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, - /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send - /// from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will - /// generally be an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to - /// pay the fee on the `dest` side. May not be empty. - /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay - /// fees. - #[pallet::call_index(1)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - let count = assets.len() as u32; - let mut message = Xcm(vec![ - WithdrawAsset(assets), - SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] - pub fn teleport_assets( - origin: OriginFor, - dest: Box, - beneficiary: Box, - assets: Box, - fee_asset_item: u32, - ) -> DispatchResult { - Self::do_teleport_assets(origin, dest, beneficiary, assets, fee_asset_item, Unlimited) - } - - /// Transfer some assets from the local chain to the sovereign account of a destination - /// chain and forward a notification XCM. - /// - /// **This function is deprecated: Use `limited_reserve_transfer_assets` instead.** - /// - /// Fee payment on the destination side is made from the asset in the `assets` vector of - /// index `fee_asset_item`. The weight limit for fees is not provided and thus is unlimited, - /// with all fees taken as needed from the asset. - /// - /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, - /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send - /// from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will - /// generally be an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the - /// fee on the `dest` side. - /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay - /// fees. - #[pallet::call_index(2)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - // heaviest version of locally executed XCM program: equivalent in weight to - // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] - pub fn reserve_transfer_assets( - origin: OriginFor, - dest: Box, - beneficiary: Box, - assets: Box, - fee_asset_item: u32, - ) -> DispatchResult { - Self::do_reserve_transfer_assets( - origin, - dest, - beneficiary, - assets, - fee_asset_item, - Unlimited, - ) - } - - /// Execute an XCM message from a local, signed, origin. - /// - /// An event is deposited indicating whether `msg` could be executed completely or only - /// partially. - /// - /// No more than `max_weight` will be used in its attempted execution. If this is less than - /// the maximum amount of weight that the message could take to be executed, then no - /// execution attempt will be made. - /// - /// NOTE: A successful return to this does *not* imply that the `msg` was executed - /// successfully to completion; only that it was attempted. - #[pallet::call_index(3)] - #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] - pub fn execute( - origin: OriginFor, - message: Box::RuntimeCall>>, - max_weight: Weight, - ) -> DispatchResultWithPostInfo { - let outcome = >::execute(origin, message, max_weight)?; - Ok(Some(outcome.weight_used().saturating_add(T::WeightInfo::execute())).into()) - } - - /// Extoll that a particular destination can be communicated with through a particular - /// version of XCM. - /// - /// - `origin`: Must be an origin specified by AdminOrigin. - /// - `location`: The destination that is being described. - /// - `xcm_version`: The latest version of XCM that `location` supports. - #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::force_xcm_version())] - pub fn force_xcm_version( - origin: OriginFor, - location: Box, - version: XcmVersion, - ) -> DispatchResult { - T::AdminOrigin::ensure_origin(origin)?; - let location = *location; - SupportedVersion::::insert( - XCM_VERSION, - LatestVersionedMultiLocation(&location), - version, - ); - Self::deposit_event(Event::SupportedVersionChanged { location, version }); - Ok(()) - } - - /// Set a safe XCM version (the version that XCM should be encoded with if the most recent - /// version a destination can accept is unknown). - /// - /// - `origin`: Must be an origin specified by AdminOrigin. - /// - `maybe_xcm_version`: The default XCM encoding version, or `None` to disable. - #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::force_default_xcm_version())] - pub fn force_default_xcm_version( - origin: OriginFor, - maybe_xcm_version: Option, - ) -> DispatchResult { - T::AdminOrigin::ensure_origin(origin)?; - SafeXcmVersion::::set(maybe_xcm_version); - Ok(()) - } - - /// Ask a location to notify us regarding their XCM version and any changes to it. - /// - /// - `origin`: Must be an origin specified by AdminOrigin. - /// - `location`: The location to which we should subscribe for XCM version notifications. - #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::force_subscribe_version_notify())] - pub fn force_subscribe_version_notify( - origin: OriginFor, - location: Box, - ) -> DispatchResult { - T::AdminOrigin::ensure_origin(origin)?; - let location: MultiLocation = - (*location).try_into().map_err(|()| Error::::BadLocation)?; - Self::request_version_notify(location).map_err(|e| { - match e { - XcmError::InvalidLocation => Error::::AlreadySubscribed, - _ => Error::::InvalidOrigin, - } - .into() - }) - } - - /// Require that a particular destination should no longer notify us regarding any XCM - /// version changes. - /// - /// - `origin`: Must be an origin specified by AdminOrigin. - /// - `location`: The location to which we are currently subscribed for XCM version - /// notifications which we no longer desire. - #[pallet::call_index(7)] - #[pallet::weight(T::WeightInfo::force_unsubscribe_version_notify())] - pub fn force_unsubscribe_version_notify( - origin: OriginFor, - location: Box, - ) -> DispatchResult { - T::AdminOrigin::ensure_origin(origin)?; - let location: MultiLocation = - (*location).try_into().map_err(|()| Error::::BadLocation)?; - Self::unrequest_version_notify(location).map_err(|e| { - match e { - XcmError::InvalidLocation => Error::::NoSubscription, - _ => Error::::InvalidOrigin, - } - .into() - }) - } - - /// Transfer some assets from the local chain to the sovereign account of a destination - /// chain and forward a notification XCM. - /// - /// Fee payment on the destination side is made from the asset in the `assets` vector of - /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight - /// is needed than `weight_limit`, then the operation will fail and the assets send may be - /// at risk. - /// - /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, - /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send - /// from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will - /// generally be an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the - /// fee on the `dest` side. - /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay - /// fees. - /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. - #[pallet::call_index(8)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - // heaviest version of locally executed XCM program: equivalent in weight to - // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] - pub fn limited_reserve_transfer_assets( - origin: OriginFor, - dest: Box, - beneficiary: Box, - assets: Box, - fee_asset_item: u32, - weight_limit: WeightLimit, - ) -> DispatchResult { - Self::do_reserve_transfer_assets( - origin, - dest, - beneficiary, - assets, - fee_asset_item, - weight_limit, - ) - } - - /// Teleport some assets from the local chain to some destination chain. - /// - /// Fee payment on the destination side is made from the asset in the `assets` vector of - /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight - /// is needed than `weight_limit`, then the operation will fail and the assets send may be - /// at risk. - /// - /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, - /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send - /// from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will - /// generally be an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to - /// pay the fee on the `dest` side. May not be empty. - /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay - /// fees. - /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. - #[pallet::call_index(9)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - let count = assets.len() as u32; - let mut message = Xcm(vec![ - WithdrawAsset(assets), - SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] - pub fn limited_teleport_assets( - origin: OriginFor, - dest: Box, - beneficiary: Box, - assets: Box, - fee_asset_item: u32, - weight_limit: WeightLimit, - ) -> DispatchResult { - Self::do_teleport_assets( - origin, - dest, - beneficiary, - assets, - fee_asset_item, - weight_limit, - ) - } - - /// Set or unset the global suspension state of the XCM executor. - /// - /// - `origin`: Must be an origin specified by AdminOrigin. - /// - `suspended`: `true` to suspend, `false` to resume. - #[pallet::call_index(10)] - #[pallet::weight(T::WeightInfo::force_suspension())] - pub fn force_suspension(origin: OriginFor, suspended: bool) -> DispatchResult { - T::AdminOrigin::ensure_origin(origin)?; - XcmExecutionSuspended::::set(suspended); - Ok(()) - } - } -} - -/// The maximum number of distinct assets allowed to be transferred in a single helper extrinsic. -const MAX_ASSETS_FOR_TRANSFER: usize = 2; - -impl QueryHandler for Pallet { - type QueryId = u64; - type BlockNumber = BlockNumberFor; - type Error = XcmError; - type UniversalLocation = T::UniversalLocation; - - /// Attempt to create a new query ID and register it as a query that is yet to respond. - fn new_query( - responder: impl Into, - timeout: BlockNumberFor, - match_querier: impl Into, - ) -> Self::QueryId { - Self::do_new_query(responder, None, timeout, match_querier) - } - - /// To check the status of the query, use `fn query()` passing the resultant `QueryId` - /// value. - fn report_outcome( - message: &mut Xcm<()>, - responder: impl Into, - timeout: Self::BlockNumber, - ) -> Result { - let responder = responder.into(); - let destination = Self::UniversalLocation::get() - .invert_target(&responder) - .map_err(|()| XcmError::LocationNotInvertible)?; - let query_id = Self::new_query(responder, timeout, Here); - let response_info = QueryResponseInfo { destination, query_id, max_weight: Weight::zero() }; - let report_error = Xcm(vec![ReportError(response_info)]); - message.0.insert(0, SetAppendix(report_error)); - Ok(query_id) - } - - /// Removes response when ready and emits [Event::ResponseTaken] event. - fn take_response(query_id: Self::QueryId) -> QueryResponseStatus { - match Queries::::get(query_id) { - Some(QueryStatus::Ready { response, at }) => match response.try_into() { - Ok(response) => { - Queries::::remove(query_id); - Self::deposit_event(Event::ResponseTaken { query_id }); - QueryResponseStatus::Ready { response, at } - }, - Err(_) => QueryResponseStatus::UnexpectedVersion, - }, - Some(QueryStatus::Pending { timeout, .. }) => QueryResponseStatus::Pending { timeout }, - Some(_) => QueryResponseStatus::UnexpectedVersion, - None => QueryResponseStatus::NotFound, - } - } - - #[cfg(feature = "runtime-benchmarks")] - fn expect_response(id: Self::QueryId, response: Response) { - let response = response.into(); - Queries::::insert( - id, - QueryStatus::Ready { response, at: frame_system::Pallet::::block_number() }, - ); - } -} - -impl Pallet { - /// Validate `assets` to be reserve-transferred and return their reserve location. - fn validate_assets_and_find_reserve( - assets: &[MultiAsset], - dest: &MultiLocation, - ) -> Result> { - let mut reserve = None; - for asset in assets.iter() { - if let Fungible(x) = asset.fun { - // If fungible asset, ensure non-zero amount. - ensure!(!x.is_zero(), Error::::Empty); - } - let transfer_type = ::determine_for(&asset, dest) - .map_err(Error::::from)?; - // Ensure asset is not teleportable to `dest`. - ensure!(transfer_type != TransferType::Teleport, Error::::Filtered); - if let Some(reserve) = reserve.as_ref() { - // Ensure transfer for multiple assets uses same reserve location (only fee may have - // different reserve location) - ensure!(reserve == &transfer_type, Error::::TooManyReserves); - } else { - // asset reserve identified - reserve = Some(transfer_type); - } - } - reserve.ok_or(Error::::Empty) - } - - fn do_reserve_transfer_assets( - origin: OriginFor, - dest: Box, - beneficiary: Box, - assets: Box, - fee_asset_item: u32, - weight_limit: WeightLimit, - ) -> DispatchResult { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; - let beneficiary: MultiLocation = - (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; - let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; - log::trace!( - target: "xcm::pallet_xcm::do_reserve_transfer_assets", - "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, fee-idx {:?}", - origin_location, dest, beneficiary, assets, fee_asset_item, - ); - - ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); - let value = (origin_location, assets.into_inner()); - ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); - let (origin_location, mut assets) = value; - - if fee_asset_item as usize >= assets.len() { - return Err(Error::::Empty.into()); - } - let fees = assets.swap_remove(fee_asset_item as usize); - let fees_transfer_type = - ::determine_for(&fees, &dest).map_err(Error::::from)?; - let assets_transfer_type = if assets.is_empty() { - // Single asset to transfer (one used for fees where transfer type is determined above). - ensure!(fees_transfer_type != TransferType::Teleport, Error::::Filtered); - fees_transfer_type - } else { - // Find reserve for non-fee assets. - Self::validate_assets_and_find_reserve(&assets, &dest)? - }; - - // local and remote XCM programs to potentially handle fees separately - let separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>; - if fees_transfer_type == assets_transfer_type { - // Same reserve location (fees not teleportable), we can batch together fees and assets - // in same reserve-based-transfer. - assets.push(fees.clone()); - // no need for custom fees instructions, fees are batched with assets - separate_fees_instructions = None; - } else { - // Disallow _remote reserves_ unless assets & fees have same remote reserve (covered by - // branch above). The reason for this is that we'd need to send XCMs to separate chains - // with no guarantee of delivery order on final destination; therefore we cannot - // guarantee to have fees in place on final destination chain to pay for assets - // transfer. - ensure!( - !matches!(assets_transfer_type, TransferType::RemoteReserve(_)), - Error::::InvalidAssetUnsupportedReserve - ); - let fees = fees.clone(); - let weight_limit = weight_limit.clone(); - // build fees transfer instructions to be added to assets transfers XCM programs - separate_fees_instructions = Some(match fees_transfer_type { - TransferType::LocalReserve => { - Self::local_reserve_fees_instructions(dest, fees, weight_limit)? - }, - TransferType::DestinationReserve => { - Self::destination_reserve_fees_instructions(dest, fees, weight_limit)? - }, - TransferType::Teleport => { - Self::teleport_fees_instructions(origin_location, dest, fees, weight_limit)? - }, - TransferType::RemoteReserve(_) => { - return Err(Error::::InvalidAssetUnsupportedReserve.into()) - }, - }); - }; - - Self::build_and_execute_xcm_transfer_type( - origin_location, - dest, - beneficiary, - assets, - assets_transfer_type, - fees, - separate_fees_instructions, - weight_limit, - ) - } - - fn do_teleport_assets( - origin: OriginFor, - dest: Box, - beneficiary: Box, - assets: Box, - fee_asset_item: u32, - weight_limit: WeightLimit, - ) -> DispatchResult { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let dest = (*dest).try_into().map_err(|()| Error::::BadVersion)?; - let beneficiary: MultiLocation = - (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; - let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; - - ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); - let value = (origin_location, assets.into_inner()); - ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); - let (origin_location, assets) = value; - for asset in assets.iter() { - let transfer_type = ::determine_for(asset, &dest) - .map_err(Error::::from)?; - ensure!(matches!(transfer_type, TransferType::Teleport), Error::::Filtered); - } - let fees = assets.get(fee_asset_item as usize).ok_or(Error::::Empty)?.clone(); - - Self::build_and_execute_xcm_transfer_type( - origin_location, - dest, - beneficiary, - assets, - TransferType::Teleport, - fees, - None, - weight_limit, - ) - } - - fn build_and_execute_xcm_transfer_type( - origin: MultiLocation, - dest: MultiLocation, - beneficiary: MultiLocation, - assets: Vec, - transfer_type: TransferType, - fees: MultiAsset, - separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, - weight_limit: WeightLimit, - ) -> DispatchResult { - log::trace!( - target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", - "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, transfer_type {:?}, \ - fees {:?}, fees_xcm: {:?}, weight_limit: {:?}", - origin, dest, beneficiary, assets, transfer_type, fees, separate_fees_instructions, weight_limit, - ); - let (mut local_xcm, remote_xcm) = match transfer_type { - TransferType::LocalReserve => { - let (local, remote) = Self::local_reserve_transfer_programs( - dest, - beneficiary, - assets, - fees, - separate_fees_instructions, - weight_limit, - )?; - (local, Some(remote)) - }, - TransferType::DestinationReserve => { - let (local, remote) = Self::destination_reserve_transfer_programs( - dest, - beneficiary, - assets, - fees, - separate_fees_instructions, - weight_limit, - )?; - (local, Some(remote)) - }, - TransferType::RemoteReserve(reserve) => ( - Self::remote_reserve_transfer_program( - reserve, - dest, - beneficiary, - assets, - fees, - weight_limit, - )?, - None, - ), - TransferType::Teleport => ( - Self::teleport_assets_program(dest, beneficiary, assets, fees, weight_limit)?, - None, - ), - }; - let weight = - T::Weigher::weight(&mut local_xcm).map_err(|()| Error::::UnweighableMessage)?; - let hash = local_xcm.using_encoded(sp_io::hashing::blake2_256); - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin, local_xcm, hash, weight, weight); - Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); - if let Some(remote_xcm) = remote_xcm { - outcome.ensure_complete().map_err(|_| Error::::LocalExecutionIncomplete)?; - - let (ticket, price) = validate_send::(dest, remote_xcm.clone()) - .map_err(Error::::from)?; - if origin != Here.into_location() { - Self::charge_fees(origin, price).map_err(|_| Error::::FeesNotMet)?; - } - let message_id = T::XcmRouter::deliver(ticket).map_err(Error::::from)?; - - let e = Event::Sent { origin, destination: dest, message: remote_xcm, message_id }; - Self::deposit_event(e); - } - Ok(()) - } - - fn local_reserve_fees_instructions( - dest: MultiLocation, - fees: MultiAsset, - weight_limit: WeightLimit, - ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { - let context = T::UniversalLocation::get(); - let reanchored_fees = fees - .clone() - .reanchored(&dest, context) - .map_err(|_| Error::::CannotReanchor)?; - - let local_execute_xcm = Xcm(vec![ - // move `fees` to `dest`s local sovereign account - TransferAsset { assets: fees.into(), beneficiary: dest }, - ]); - let xcm_on_dest = Xcm(vec![ - // let (dest) chain know `fees` are in its SA on reserve - ReserveAssetDeposited(reanchored_fees.clone().into()), - // buy exec using `fees` in holding deposited in above instruction - BuyExecution { fees: reanchored_fees, weight_limit }, - ]); - Ok((local_execute_xcm, xcm_on_dest)) - } - - fn local_reserve_transfer_programs( - dest: MultiLocation, - beneficiary: MultiLocation, - assets: Vec, - fees: MultiAsset, - separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, - weight_limit: WeightLimit, - ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { - // max assets is `assets` (+ potentially separately handled fee) - let max_assets = - assets.len() as u32 + separate_fees_instructions.as_ref().map(|_| 1).unwrap_or(0); - let assets: MultiAssets = assets.into(); - let context = T::UniversalLocation::get(); - let mut reanchored_assets = assets.clone(); - reanchored_assets - .reanchor(&dest, context) - .map_err(|_| Error::::CannotReanchor)?; - - // fees are either handled through dedicated instructions, or batched together with assets - let fees_already_handled = separate_fees_instructions.is_some(); - let (fees_local_xcm, fees_remote_xcm) = separate_fees_instructions - .map(|(local, remote)| (local.into_inner(), remote.into_inner())) - .unwrap_or_default(); - - // start off with any necessary local fees specific instructions - let mut local_execute_xcm = fees_local_xcm; - // move `assets` to `dest`s local sovereign account - local_execute_xcm.push(TransferAsset { assets, beneficiary: dest }); - - // on destination chain, start off with custom fee instructions - let mut xcm_on_dest = fees_remote_xcm; - // continue with rest of assets - xcm_on_dest.extend_from_slice(&[ - // let (dest) chain know assets are in its SA on reserve - ReserveAssetDeposited(reanchored_assets), - // following instructions are not exec'ed on behalf of origin chain anymore - ClearOrigin, - ]); - if !fees_already_handled { - // no custom fees instructions, they are batched together with `assets` transfer; - // BuyExecution happens after receiving all `assets` - let reanchored_fees = - fees.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; - // buy execution using `fees` batched together with above `reanchored_assets` - xcm_on_dest.push(BuyExecution { fees: reanchored_fees, weight_limit }); - } - // deposit all remaining assets in holding to `beneficiary` location - xcm_on_dest.push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); - - Ok((Xcm(local_execute_xcm), Xcm(xcm_on_dest))) - } - - fn destination_reserve_fees_instructions( - dest: MultiLocation, - fees: MultiAsset, - weight_limit: WeightLimit, - ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { - let context = T::UniversalLocation::get(); - let reanchored_fees = fees - .clone() - .reanchored(&dest, context) - .map_err(|_| Error::::CannotReanchor)?; - let fees: MultiAssets = fees.into(); - - let local_execute_xcm = Xcm(vec![ - // withdraw reserve-based fees (derivatives) - WithdrawAsset(fees.clone()), - // burn derivatives - BurnAsset(fees), - ]); - let xcm_on_dest = Xcm(vec![ - // withdraw `fees` from origin chain's sovereign account - WithdrawAsset(reanchored_fees.clone().into()), - // buy exec using `fees` in holding withdrawn in above instruction - BuyExecution { fees: reanchored_fees, weight_limit }, - ]); - Ok((local_execute_xcm, xcm_on_dest)) - } - - fn destination_reserve_transfer_programs( - dest: MultiLocation, - beneficiary: MultiLocation, - assets: Vec, - fees: MultiAsset, - separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, - weight_limit: WeightLimit, - ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { - // max assets is `assets` (+ potentially separately handled fee) - let max_assets = - assets.len() as u32 + separate_fees_instructions.as_ref().map(|_| 1).unwrap_or(0); - let assets: MultiAssets = assets.into(); - let context = T::UniversalLocation::get(); - let mut reanchored_assets = assets.clone(); - reanchored_assets - .reanchor(&dest, context) - .map_err(|_| Error::::CannotReanchor)?; - - // fees are either handled through dedicated instructions, or batched together with assets - let fees_already_handled = separate_fees_instructions.is_some(); - let (fees_local_xcm, fees_remote_xcm) = separate_fees_instructions - .map(|(local, remote)| (local.into_inner(), remote.into_inner())) - .unwrap_or_default(); - - // start off with any necessary local fees specific instructions - let mut local_execute_xcm = fees_local_xcm; - // continue with rest of assets - local_execute_xcm.extend_from_slice(&[ - // withdraw reserve-based assets - WithdrawAsset(assets.clone()), - // burn reserve-based assets - BurnAsset(assets), - ]); - - // on destination chain, start off with custom fee instructions - let mut xcm_on_dest = fees_remote_xcm; - // continue with rest of assets - xcm_on_dest.extend_from_slice(&[ - // withdraw `assets` from origin chain's sovereign account - WithdrawAsset(reanchored_assets), - // following instructions are not exec'ed on behalf of origin chain anymore - ClearOrigin, - ]); - if !fees_already_handled { - // no custom fees instructions, they are batched together with `assets` transfer; - // BuyExecution happens after receiving all `assets` - let reanchored_fees = - fees.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; - // buy execution using `fees` batched together with above `reanchored_assets` - xcm_on_dest.push(BuyExecution { fees: reanchored_fees, weight_limit }); - } - // deposit all remaining assets in holding to `beneficiary` location - xcm_on_dest.push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); - - Ok((Xcm(local_execute_xcm), Xcm(xcm_on_dest))) - } - - // function assumes fees and assets have the same remote reserve - fn remote_reserve_transfer_program( - reserve: MultiLocation, - dest: MultiLocation, - beneficiary: MultiLocation, - assets: Vec, - fees: MultiAsset, - weight_limit: WeightLimit, - ) -> Result::RuntimeCall>, Error> { - let max_assets = assets.len() as u32; - let context = T::UniversalLocation::get(); - // we spend up to half of fees for execution on reserve and other half for execution on - // destination - let (fees_half_1, fees_half_2) = Self::halve_fees(fees)?; - // identifies fee item as seen by `reserve` - to be used at reserve chain - let reserve_fees = fees_half_1 - .reanchored(&reserve, context) - .map_err(|_| Error::::CannotReanchor)?; - // identifies fee item as seen by `dest` - to be used at destination chain - let dest_fees = - fees_half_2.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; - // identifies `dest` as seen by `reserve` - let dest = dest.reanchored(&reserve, context).map_err(|_| Error::::CannotReanchor)?; - // xcm to be executed at dest - let xcm_on_dest = Xcm(vec![ - BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }, - DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, - ]); - // xcm to be executed on reserve - let xcm_on_reserve = Xcm(vec![ - BuyExecution { fees: reserve_fees, weight_limit }, - DepositReserveAsset { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, - ]); - Ok(Xcm(vec![ - WithdrawAsset(assets.into()), - InitiateReserveWithdraw { - assets: Wild(AllCounted(max_assets)), - reserve, - xcm: xcm_on_reserve, - }, - ])) - } - - fn teleport_fees_instructions( - origin: MultiLocation, - dest: MultiLocation, - fees: MultiAsset, - weight_limit: WeightLimit, - ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { - let value = (origin, vec![fees.clone()]); - ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); - - let context = T::UniversalLocation::get(); - let reanchored_fees = fees - .clone() - .reanchored(&dest, context) - .map_err(|_| Error::::CannotReanchor)?; - - // XcmContext irrelevant in teleports checks - let dummy_context = - XcmContext { origin: None, message_id: Default::default(), topic: None }; - // We should check that the asset can actually be teleported out (for this to - // be in error, there would need to be an accounting violation by ourselves, - // so it's unlikely, but we don't want to allow that kind of bug to leak into - // a trusted chain. - ::AssetTransactor::can_check_out(&dest, &fees, &dummy_context) - .map_err(|_| Error::::CannotCheckOutTeleport)?; - ::AssetTransactor::check_out(&dest, &fees, &dummy_context); - - let fees: MultiAssets = fees.into(); - let local_execute_xcm = Xcm(vec![ - // withdraw fees - WithdrawAsset(fees.clone()), - // burn fees - BurnAsset(fees), - ]); - let xcm_on_dest = Xcm(vec![ - // (dest) chain receive teleported assets burned on origin chain - ReceiveTeleportedAsset(reanchored_fees.clone().into()), - // buy exec using `fees` in holding received in above instruction - BuyExecution { fees: reanchored_fees, weight_limit }, - ]); - Ok((local_execute_xcm, xcm_on_dest)) - } - - fn teleport_assets_program( - dest: MultiLocation, - beneficiary: MultiLocation, - assets: Vec, - mut fees: MultiAsset, - weight_limit: WeightLimit, - ) -> Result::RuntimeCall>, Error> { - let context = T::UniversalLocation::get(); - fees.reanchor(&dest, context).map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; - let xcm_on_dest = Xcm(vec![ - BuyExecution { fees, weight_limit }, - DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, - ]); - Ok(Xcm(vec![ - WithdrawAsset(assets.into()), - SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, - ])) - } - - /// Halve `fees` fungible amount. - pub(crate) fn halve_fees(fees: MultiAsset) -> Result<(MultiAsset, MultiAsset), Error> { - match fees.fun { - Fungible(amount) => { - let fee1 = amount.saturating_div(2); - let fee2 = amount.saturating_sub(fee1); - ensure!(fee1 > 0, Error::::FeesNotMet); - ensure!(fee2 > 0, Error::::FeesNotMet); - Ok((MultiAsset::from((fees.id, fee1)), MultiAsset::from((fees.id, fee2)))) - }, - NonFungible(_) => Err(Error::::FeesNotMet), - } - } - - /// Will always make progress, and will do its best not to use much more than `weight_cutoff` - /// in doing so. - pub(crate) fn check_xcm_version_change( - mut stage: VersionMigrationStage, - weight_cutoff: Weight, - ) -> (Weight, Option) { - let mut weight_used = Weight::zero(); - - let sv_migrate_weight = T::WeightInfo::migrate_supported_version(); - let vn_migrate_weight = T::WeightInfo::migrate_version_notifiers(); - let vnt_already_notified_weight = T::WeightInfo::already_notified_target(); - let vnt_notify_weight = T::WeightInfo::notify_current_targets(); - let vnt_migrate_weight = T::WeightInfo::migrate_version_notify_targets(); - let vnt_migrate_fail_weight = T::WeightInfo::notify_target_migration_fail(); - let vnt_notify_migrate_weight = T::WeightInfo::migrate_and_notify_old_targets(); - - use VersionMigrationStage::*; - - if stage == MigrateSupportedVersion { - // We assume that supported XCM version only ever increases, so just cycle through lower - // XCM versioned from the current. - for v in 0..XCM_VERSION { - for (old_key, value) in SupportedVersion::::drain_prefix(v) { - if let Ok(new_key) = old_key.into_latest() { - SupportedVersion::::insert(XCM_VERSION, new_key, value); - } - weight_used.saturating_accrue(sv_migrate_weight); - if weight_used.any_gte(weight_cutoff) { - return (weight_used, Some(stage)); - } - } - } - stage = MigrateVersionNotifiers; - } - if stage == MigrateVersionNotifiers { - for v in 0..XCM_VERSION { - for (old_key, value) in VersionNotifiers::::drain_prefix(v) { - if let Ok(new_key) = old_key.into_latest() { - VersionNotifiers::::insert(XCM_VERSION, new_key, value); - } - weight_used.saturating_accrue(vn_migrate_weight); - if weight_used.any_gte(weight_cutoff) { - return (weight_used, Some(stage)); - } - } - } - stage = NotifyCurrentTargets(None); - } - - let xcm_version = T::AdvertisedXcmVersion::get(); - - if let NotifyCurrentTargets(maybe_last_raw_key) = stage { - let mut iter = match maybe_last_raw_key { - Some(k) => VersionNotifyTargets::::iter_prefix_from(XCM_VERSION, k), - None => VersionNotifyTargets::::iter_prefix(XCM_VERSION), - }; - while let Some((key, value)) = iter.next() { - let (query_id, max_weight, target_xcm_version) = value; - let new_key: MultiLocation = match key.clone().try_into() { - Ok(k) if target_xcm_version != xcm_version => k, - _ => { - // We don't early return here since we need to be certain that we - // make some progress. - weight_used.saturating_accrue(vnt_already_notified_weight); - continue; - }, - }; - let response = Response::Version(xcm_version); - let message = - Xcm(vec![QueryResponse { query_id, response, max_weight, querier: None }]); - let event = match send_xcm::(new_key, message) { - Ok((message_id, cost)) => { - let value = (query_id, max_weight, xcm_version); - VersionNotifyTargets::::insert(XCM_VERSION, key, value); - Event::VersionChangeNotified { - destination: new_key, - result: xcm_version, - cost, - message_id, - } - }, - Err(e) => { - VersionNotifyTargets::::remove(XCM_VERSION, key); - Event::NotifyTargetSendFail { location: new_key, query_id, error: e.into() } - }, - }; - Self::deposit_event(event); - weight_used.saturating_accrue(vnt_notify_weight); - if weight_used.any_gte(weight_cutoff) { - let last = Some(iter.last_raw_key().into()); - return (weight_used, Some(NotifyCurrentTargets(last))); - } - } - stage = MigrateAndNotifyOldTargets; - } - if stage == MigrateAndNotifyOldTargets { - for v in 0..XCM_VERSION { - for (old_key, value) in VersionNotifyTargets::::drain_prefix(v) { - let (query_id, max_weight, target_xcm_version) = value; - let new_key = match MultiLocation::try_from(old_key.clone()) { - Ok(k) => k, - Err(()) => { - Self::deposit_event(Event::NotifyTargetMigrationFail { - location: old_key, - query_id: value.0, - }); - weight_used.saturating_accrue(vnt_migrate_fail_weight); - if weight_used.any_gte(weight_cutoff) { - return (weight_used, Some(stage)); - } - continue; - }, - }; - - let versioned_key = LatestVersionedMultiLocation(&new_key); - if target_xcm_version == xcm_version { - VersionNotifyTargets::::insert(XCM_VERSION, versioned_key, value); - weight_used.saturating_accrue(vnt_migrate_weight); - } else { - // Need to notify target. - let response = Response::Version(xcm_version); - let message = Xcm(vec![QueryResponse { - query_id, - response, - max_weight, - querier: None, - }]); - let event = match send_xcm::(new_key, message) { - Ok((message_id, cost)) => { - VersionNotifyTargets::::insert( - XCM_VERSION, - versioned_key, - (query_id, max_weight, xcm_version), - ); - Event::VersionChangeNotified { - destination: new_key, - result: xcm_version, - cost, - message_id, - } - }, - Err(e) => Event::NotifyTargetSendFail { - location: new_key, - query_id, - error: e.into(), - }, - }; - Self::deposit_event(event); - weight_used.saturating_accrue(vnt_notify_migrate_weight); - } - if weight_used.any_gte(weight_cutoff) { - return (weight_used, Some(stage)); - } - } - } - } - (weight_used, None) - } - - /// Request that `dest` informs us of its version. - pub fn request_version_notify(dest: impl Into) -> XcmResult { - let dest = dest.into(); - let versioned_dest = VersionedMultiLocation::from(dest); - let already = VersionNotifiers::::contains_key(XCM_VERSION, &versioned_dest); - ensure!(!already, XcmError::InvalidLocation); - let query_id = QueryCounter::::mutate(|q| { - let r = *q; - q.saturating_inc(); - r - }); - // TODO #3735: Correct weight. - let instruction = SubscribeVersion { query_id, max_response_weight: Weight::zero() }; - let (message_id, cost) = send_xcm::(dest, Xcm(vec![instruction]))?; - Self::deposit_event(Event::VersionNotifyRequested { destination: dest, cost, message_id }); - VersionNotifiers::::insert(XCM_VERSION, &versioned_dest, query_id); - let query_status = - QueryStatus::VersionNotifier { origin: versioned_dest, is_active: false }; - Queries::::insert(query_id, query_status); - Ok(()) - } - - /// Request that `dest` ceases informing us of its version. - pub fn unrequest_version_notify(dest: impl Into) -> XcmResult { - let dest = dest.into(); - let versioned_dest = LatestVersionedMultiLocation(&dest); - let query_id = VersionNotifiers::::take(XCM_VERSION, versioned_dest) - .ok_or(XcmError::InvalidLocation)?; - let (message_id, cost) = send_xcm::(dest, Xcm(vec![UnsubscribeVersion]))?; - Self::deposit_event(Event::VersionNotifyUnrequested { - destination: dest, - cost, - message_id, - }); - Queries::::remove(query_id); - Ok(()) - } - - /// Relay an XCM `message` from a given `interior` location in this context to a given `dest` - /// location. The `fee_payer` is charged for the delivery unless `None` in which case fees - /// are not charged (and instead borne by the chain). - pub fn send_xcm( - interior: impl Into, - dest: impl Into, - mut message: Xcm<()>, - ) -> Result { - let interior = interior.into(); - let dest = dest.into(); - let maybe_fee_payer = if interior != Junctions::Here { - message.0.insert(0, DescendOrigin(interior)); - Some(interior.into()) - } else { - None - }; - log::debug!(target: "xcm::send_xcm", "dest: {:?}, message: {:?}", &dest, &message); - let (ticket, price) = validate_send::(dest, message)?; - if let Some(fee_payer) = maybe_fee_payer { - Self::charge_fees(fee_payer, price).map_err(|_| SendError::Fees)?; - } - T::XcmRouter::deliver(ticket) - } - - pub fn check_account() -> T::AccountId { - const ID: PalletId = PalletId(*b"py/xcmch"); - AccountIdConversion::::into_account_truncating(&ID) - } - - /// Create a new expectation of a query response with the querier being here. - fn do_new_query( - responder: impl Into, - maybe_notify: Option<(u8, u8)>, - timeout: BlockNumberFor, - match_querier: impl Into, - ) -> u64 { - QueryCounter::::mutate(|q| { - let r = *q; - q.saturating_inc(); - Queries::::insert( - r, - QueryStatus::Pending { - responder: responder.into().into(), - maybe_match_querier: Some(match_querier.into().into()), - maybe_notify, - timeout, - }, - ); - r - }) - } - - /// Consume `message` and return another which is equivalent to it except that it reports - /// back the outcome and dispatches `notify` on this chain. - /// - /// - `message`: The message whose outcome should be reported. - /// - `responder`: The origin from which a response should be expected. - /// - `notify`: A dispatchable function which will be called once the outcome of `message` is - /// known. It may be a dispatchable in any pallet of the local chain, but other than the usual - /// origin, it must accept exactly two arguments: `query_id: QueryId` and `outcome: Response`, - /// and in that order. It should expect that the origin is `Origin::Response` and will contain - /// the responder's location. - /// - `timeout`: The block number after which it is permissible for `notify` not to be called - /// even if a response is received. - /// - /// `report_outcome_notify` may return an error if the `responder` is not invertible. - /// - /// It is assumed that the querier of the response will be `Here`. - /// - /// NOTE: `notify` gets called as part of handling an incoming message, so it should be - /// lightweight. Its weight is estimated during this function and stored ready for - /// weighing `ReportOutcome` on the way back. If it turns out to be heavier once it returns - /// then reporting the outcome will fail. Futhermore if the estimate is too high, then it - /// may be put in the overweight queue and need to be manually executed. - pub fn report_outcome_notify( - message: &mut Xcm<()>, - responder: impl Into, - notify: impl Into<::RuntimeCall>, - timeout: BlockNumberFor, - ) -> Result<(), XcmError> { - let responder = responder.into(); - let destination = T::UniversalLocation::get() - .invert_target(&responder) - .map_err(|()| XcmError::LocationNotInvertible)?; - let notify: ::RuntimeCall = notify.into(); - let max_weight = notify.get_dispatch_info().weight; - let query_id = Self::new_notify_query(responder, notify, timeout, Here); - let response_info = QueryResponseInfo { destination, query_id, max_weight }; - let report_error = Xcm(vec![ReportError(response_info)]); - message.0.insert(0, SetAppendix(report_error)); - Ok(()) - } - - /// Attempt to create a new query ID and register it as a query that is yet to respond, and - /// which will call a dispatchable when a response happens. - pub fn new_notify_query( - responder: impl Into, - notify: impl Into<::RuntimeCall>, - timeout: BlockNumberFor, - match_querier: impl Into, - ) -> u64 { - let notify = notify.into().using_encoded(|mut bytes| Decode::decode(&mut bytes)).expect( - "decode input is output of Call encode; Call guaranteed to have two enums; qed", - ); - Self::do_new_query(responder, Some(notify), timeout, match_querier) - } - - /// Note that a particular destination to whom we would like to send a message is unknown - /// and queue it for version discovery. - fn note_unknown_version(dest: &MultiLocation) { - log::trace!( - target: "xcm::pallet_xcm::note_unknown_version", - "XCM version is unknown for destination: {:?}", - dest, - ); - let versioned_dest = VersionedMultiLocation::from(*dest); - VersionDiscoveryQueue::::mutate(|q| { - if let Some(index) = q.iter().position(|i| &i.0 == &versioned_dest) { - // exists - just bump the count. - q[index].1.saturating_inc(); - } else { - let _ = q.try_push((versioned_dest, 1)); - } - }); - } - - /// Withdraw given `assets` from the given `location` and pay as XCM fees. - /// - /// Fails if: - /// - the `assets` are not known on this chain; - /// - the `assets` cannot be withdrawn with that location as the Origin. - fn charge_fees(location: MultiLocation, assets: MultiAssets) -> DispatchResult { - T::XcmExecutor::charge_fees(location, assets.clone()) - .map_err(|_| Error::::FeesNotMet)?; - Self::deposit_event(Event::FeesPaid { paying: location, fees: assets }); - Ok(()) - } -} - -pub struct LockTicket { - sovereign_account: T::AccountId, - amount: BalanceOf, - unlocker: MultiLocation, - item_index: Option, -} - -impl xcm_executor::traits::Enact for LockTicket { - fn enact(self) -> Result<(), xcm_executor::traits::LockError> { - use xcm_executor::traits::LockError::UnexpectedState; - let mut locks = LockedFungibles::::get(&self.sovereign_account).unwrap_or_default(); - match self.item_index { - Some(index) => { - ensure!(locks.len() > index, UnexpectedState); - ensure!(locks[index].1.try_as::<_>() == Ok(&self.unlocker), UnexpectedState); - locks[index].0 = locks[index].0.max(self.amount); - }, - None => { - locks - .try_push((self.amount, self.unlocker.into())) - .map_err(|(_balance, _location)| UnexpectedState)?; - }, - } - LockedFungibles::::insert(&self.sovereign_account, locks); - T::Currency::extend_lock( - *b"py/xcmlk", - &self.sovereign_account, - self.amount, - WithdrawReasons::all(), - ); - Ok(()) - } -} - -pub struct UnlockTicket { - sovereign_account: T::AccountId, - amount: BalanceOf, - unlocker: MultiLocation, -} - -impl xcm_executor::traits::Enact for UnlockTicket { - fn enact(self) -> Result<(), xcm_executor::traits::LockError> { - use xcm_executor::traits::LockError::UnexpectedState; - let mut locks = - LockedFungibles::::get(&self.sovereign_account).ok_or(UnexpectedState)?; - let mut maybe_remove_index = None; - let mut locked = BalanceOf::::zero(); - let mut found = false; - // We could just as well do with with an into_iter, filter_map and collect, however this way - // avoids making an allocation. - for (i, x) in locks.iter_mut().enumerate() { - if x.1.try_as::<_>().defensive() == Ok(&self.unlocker) { - x.0 = x.0.saturating_sub(self.amount); - if x.0.is_zero() { - maybe_remove_index = Some(i); - } - found = true; - } - locked = locked.max(x.0); - } - ensure!(found, UnexpectedState); - if let Some(remove_index) = maybe_remove_index { - locks.swap_remove(remove_index); - } - LockedFungibles::::insert(&self.sovereign_account, locks); - let reasons = WithdrawReasons::all(); - T::Currency::set_lock(*b"py/xcmlk", &self.sovereign_account, locked, reasons); - Ok(()) - } -} - -pub struct ReduceTicket { - key: (u32, T::AccountId, VersionedAssetId), - amount: u128, - locker: VersionedMultiLocation, - owner: VersionedMultiLocation, -} - -impl xcm_executor::traits::Enact for ReduceTicket { - fn enact(self) -> Result<(), xcm_executor::traits::LockError> { - use xcm_executor::traits::LockError::UnexpectedState; - let mut record = RemoteLockedFungibles::::get(&self.key).ok_or(UnexpectedState)?; - ensure!(self.locker == record.locker && self.owner == record.owner, UnexpectedState); - let new_amount = record.amount.checked_sub(self.amount).ok_or(UnexpectedState)?; - ensure!(record.amount_held().map_or(true, |h| new_amount >= h), UnexpectedState); - if new_amount == 0 { - RemoteLockedFungibles::::remove(&self.key); - } else { - record.amount = new_amount; - RemoteLockedFungibles::::insert(&self.key, &record); - } - Ok(()) - } -} - -impl xcm_executor::traits::AssetLock for Pallet { - type LockTicket = LockTicket; - type UnlockTicket = UnlockTicket; - type ReduceTicket = ReduceTicket; - - fn prepare_lock( - unlocker: MultiLocation, - asset: MultiAsset, - owner: MultiLocation, - ) -> Result, xcm_executor::traits::LockError> { - use xcm_executor::traits::LockError::*; - let sovereign_account = T::SovereignAccountOf::convert_location(&owner).ok_or(BadOwner)?; - let amount = T::CurrencyMatcher::matches_fungible(&asset).ok_or(UnknownAsset)?; - ensure!(T::Currency::free_balance(&sovereign_account) >= amount, AssetNotOwned); - let locks = LockedFungibles::::get(&sovereign_account).unwrap_or_default(); - let item_index = locks.iter().position(|x| x.1.try_as::<_>() == Ok(&unlocker)); - ensure!(item_index.is_some() || locks.len() < T::MaxLockers::get() as usize, NoResources); - Ok(LockTicket { sovereign_account, amount, unlocker, item_index }) - } - - fn prepare_unlock( - unlocker: MultiLocation, - asset: MultiAsset, - owner: MultiLocation, - ) -> Result, xcm_executor::traits::LockError> { - use xcm_executor::traits::LockError::*; - let sovereign_account = T::SovereignAccountOf::convert_location(&owner).ok_or(BadOwner)?; - let amount = T::CurrencyMatcher::matches_fungible(&asset).ok_or(UnknownAsset)?; - ensure!(T::Currency::free_balance(&sovereign_account) >= amount, AssetNotOwned); - let locks = LockedFungibles::::get(&sovereign_account).unwrap_or_default(); - let item_index = - locks.iter().position(|x| x.1.try_as::<_>() == Ok(&unlocker)).ok_or(NotLocked)?; - ensure!(locks[item_index].0 >= amount, NotLocked); - Ok(UnlockTicket { sovereign_account, amount, unlocker }) - } - - fn note_unlockable( - locker: MultiLocation, - asset: MultiAsset, - mut owner: MultiLocation, - ) -> Result<(), xcm_executor::traits::LockError> { - use xcm_executor::traits::LockError::*; - ensure!(T::TrustedLockers::contains(&locker, &asset), NotTrusted); - let amount = match asset.fun { - Fungible(a) => a, - NonFungible(_) => return Err(Unimplemented), - }; - owner.remove_network_id(); - let account = T::SovereignAccountOf::convert_location(&owner).ok_or(BadOwner)?; - let locker = locker.into(); - let owner = owner.into(); - let id: VersionedAssetId = asset.id.into(); - let key = (XCM_VERSION, account, id); - let mut record = - RemoteLockedFungibleRecord { amount, owner, locker, consumers: BoundedVec::default() }; - if let Some(old) = RemoteLockedFungibles::::get(&key) { - // Make sure that the new record wouldn't clobber any old data. - ensure!(old.locker == record.locker && old.owner == record.owner, WouldClobber); - record.consumers = old.consumers; - record.amount = record.amount.max(old.amount); - } - RemoteLockedFungibles::::insert(&key, record); - Ok(()) - } - - fn prepare_reduce_unlockable( - locker: MultiLocation, - asset: MultiAsset, - mut owner: MultiLocation, - ) -> Result { - use xcm_executor::traits::LockError::*; - let amount = match asset.fun { - Fungible(a) => a, - NonFungible(_) => return Err(Unimplemented), - }; - owner.remove_network_id(); - let sovereign_account = T::SovereignAccountOf::convert_location(&owner).ok_or(BadOwner)?; - let locker = locker.into(); - let owner = owner.into(); - let id: VersionedAssetId = asset.id.into(); - let key = (XCM_VERSION, sovereign_account, id); - - let record = RemoteLockedFungibles::::get(&key).ok_or(NotLocked)?; - // Make sure that the record contains what we expect and there's enough to unlock. - ensure!(locker == record.locker && owner == record.owner, WouldClobber); - ensure!(record.amount >= amount, NotEnoughLocked); - ensure!( - record.amount_held().map_or(true, |h| record.amount.saturating_sub(amount) >= h), - InUse - ); - Ok(ReduceTicket { key, amount, locker, owner }) - } -} - -impl WrapVersion for Pallet { - fn wrap_version( - dest: &MultiLocation, - xcm: impl Into>, - ) -> Result, ()> { - SupportedVersion::::get(XCM_VERSION, LatestVersionedMultiLocation(dest)) - .or_else(|| { - Self::note_unknown_version(dest); - SafeXcmVersion::::get() - }) - .ok_or_else(|| { - log::trace!( - target: "xcm::pallet_xcm::wrap_version", - "Could not determine a version to wrap XCM for destination: {:?}", - dest, - ); - () - }) - .and_then(|v| xcm.into().into_version(v.min(XCM_VERSION))) - } -} - -impl VersionChangeNotifier for Pallet { - /// Start notifying `location` should the XCM version of this chain change. - /// - /// When it does, this type should ensure a `QueryResponse` message is sent with the given - /// `query_id` & `max_weight` and with a `response` of `Response::Version`. This should happen - /// until/unless `stop` is called with the correct `query_id`. - /// - /// If the `location` has an ongoing notification and when this function is called, then an - /// error should be returned. - fn start( - dest: &MultiLocation, - query_id: QueryId, - max_weight: Weight, - _context: &XcmContext, - ) -> XcmResult { - let versioned_dest = LatestVersionedMultiLocation(dest); - let already = VersionNotifyTargets::::contains_key(XCM_VERSION, versioned_dest); - ensure!(!already, XcmError::InvalidLocation); - - let xcm_version = T::AdvertisedXcmVersion::get(); - let response = Response::Version(xcm_version); - let instruction = QueryResponse { query_id, response, max_weight, querier: None }; - let (message_id, cost) = send_xcm::(*dest, Xcm(vec![instruction]))?; - Self::deposit_event(Event::::VersionNotifyStarted { - destination: *dest, - cost, - message_id, - }); - - let value = (query_id, max_weight, xcm_version); - VersionNotifyTargets::::insert(XCM_VERSION, versioned_dest, value); - Ok(()) - } - - /// Stop notifying `location` should the XCM change. This is a no-op if there was never a - /// subscription. - fn stop(dest: &MultiLocation, _context: &XcmContext) -> XcmResult { - VersionNotifyTargets::::remove(XCM_VERSION, LatestVersionedMultiLocation(dest)); - Ok(()) - } - - /// Return true if a location is subscribed to XCM version changes. - fn is_subscribed(dest: &MultiLocation) -> bool { - let versioned_dest = LatestVersionedMultiLocation(dest); - VersionNotifyTargets::::contains_key(XCM_VERSION, versioned_dest) - } -} - -impl DropAssets for Pallet { - fn drop_assets(origin: &MultiLocation, assets: Assets, _context: &XcmContext) -> Weight { - if assets.is_empty() { - return Weight::zero(); - } - let versioned = VersionedMultiAssets::from(MultiAssets::from(assets)); - let hash = BlakeTwo256::hash_of(&(&origin, &versioned)); - AssetTraps::::mutate(hash, |n| *n += 1); - Self::deposit_event(Event::AssetsTrapped { hash, origin: *origin, assets: versioned }); - // TODO #3735: Put the real weight in there. - Weight::zero() - } -} - -impl ClaimAssets for Pallet { - fn claim_assets( - origin: &MultiLocation, - ticket: &MultiLocation, - assets: &MultiAssets, - _context: &XcmContext, - ) -> bool { - let mut versioned = VersionedMultiAssets::from(assets.clone()); - match (ticket.parents, &ticket.interior) { - (0, X1(GeneralIndex(i))) => { - versioned = match versioned.into_version(*i as u32) { - Ok(v) => v, - Err(()) => return false, - } - }, - (0, Here) => (), - _ => return false, - }; - let hash = BlakeTwo256::hash_of(&(origin, versioned.clone())); - match AssetTraps::::get(hash) { - 0 => return false, - 1 => AssetTraps::::remove(hash), - n => AssetTraps::::insert(hash, n - 1), - } - Self::deposit_event(Event::AssetsClaimed { hash, origin: *origin, assets: versioned }); - return true; - } -} - -impl OnResponse for Pallet { - fn expecting_response( - origin: &MultiLocation, - query_id: QueryId, - querier: Option<&MultiLocation>, - ) -> bool { - match Queries::::get(query_id) { - Some(QueryStatus::Pending { responder, maybe_match_querier, .. }) => { - MultiLocation::try_from(responder).map_or(false, |r| origin == &r) - && maybe_match_querier.map_or(true, |match_querier| { - MultiLocation::try_from(match_querier).map_or(false, |match_querier| { - querier.map_or(false, |q| q == &match_querier) - }) - }) - }, - Some(QueryStatus::VersionNotifier { origin: r, .. }) => { - MultiLocation::try_from(r).map_or(false, |r| origin == &r) - }, - _ => false, - } - } - - fn on_response( - origin: &MultiLocation, - query_id: QueryId, - querier: Option<&MultiLocation>, - response: Response, - max_weight: Weight, - _context: &XcmContext, - ) -> Weight { - let origin = *origin; - match (response, Queries::::get(query_id)) { - ( - Response::Version(v), - Some(QueryStatus::VersionNotifier { origin: expected_origin, is_active }), - ) => { - let origin: MultiLocation = match expected_origin.try_into() { - Ok(o) if o == origin => o, - Ok(o) => { - Self::deposit_event(Event::InvalidResponder { - origin, - query_id, - expected_location: Some(o), - }); - return Weight::zero(); - }, - _ => { - Self::deposit_event(Event::InvalidResponder { - origin, - query_id, - expected_location: None, - }); - // TODO #3735: Correct weight for this. - return Weight::zero(); - }, - }; - // TODO #3735: Check max_weight is correct. - if !is_active { - Queries::::insert( - query_id, - QueryStatus::VersionNotifier { origin: origin.into(), is_active: true }, - ); - } - // We're being notified of a version change. - SupportedVersion::::insert( - XCM_VERSION, - LatestVersionedMultiLocation(&origin), - v, - ); - Self::deposit_event(Event::SupportedVersionChanged { - location: origin, - version: v, - }); - Weight::zero() - }, - ( - response, - Some(QueryStatus::Pending { responder, maybe_notify, maybe_match_querier, .. }), - ) => { - if let Some(match_querier) = maybe_match_querier { - let match_querier = match MultiLocation::try_from(match_querier) { - Ok(mq) => mq, - Err(_) => { - Self::deposit_event(Event::InvalidQuerierVersion { origin, query_id }); - return Weight::zero(); - }, - }; - if querier.map_or(true, |q| q != &match_querier) { - Self::deposit_event(Event::InvalidQuerier { - origin, - query_id, - expected_querier: match_querier, - maybe_actual_querier: querier.cloned(), - }); - return Weight::zero(); - } - } - let responder = match MultiLocation::try_from(responder) { - Ok(r) => r, - Err(_) => { - Self::deposit_event(Event::InvalidResponderVersion { origin, query_id }); - return Weight::zero(); - }, - }; - if origin != responder { - Self::deposit_event(Event::InvalidResponder { - origin, - query_id, - expected_location: Some(responder), - }); - return Weight::zero(); - } - return match maybe_notify { - Some((pallet_index, call_index)) => { - // This is a bit horrible, but we happen to know that the `Call` will - // be built by `(pallet_index: u8, call_index: u8, QueryId, Response)`. - // So we just encode that and then re-encode to a real Call. - let bare = (pallet_index, call_index, query_id, response); - if let Ok(call) = bare.using_encoded(|mut bytes| { - ::RuntimeCall::decode(&mut bytes) - }) { - Queries::::remove(query_id); - let weight = call.get_dispatch_info().weight; - if weight.any_gt(max_weight) { - let e = Event::NotifyOverweight { - query_id, - pallet_index, - call_index, - actual_weight: weight, - max_budgeted_weight: max_weight, - }; - Self::deposit_event(e); - return Weight::zero(); - } - let dispatch_origin = Origin::Response(origin).into(); - match call.dispatch(dispatch_origin) { - Ok(post_info) => { - let e = Event::Notified { query_id, pallet_index, call_index }; - Self::deposit_event(e); - post_info.actual_weight - }, - Err(error_and_info) => { - let e = Event::NotifyDispatchError { - query_id, - pallet_index, - call_index, - }; - Self::deposit_event(e); - // Not much to do with the result as it is. It's up to the - // parachain to ensure that the message makes sense. - error_and_info.post_info.actual_weight - }, - } - .unwrap_or(weight) - } else { - let e = - Event::NotifyDecodeFailed { query_id, pallet_index, call_index }; - Self::deposit_event(e); - Weight::zero() - } - }, - None => { - let e = Event::ResponseReady { query_id, response: response.clone() }; - Self::deposit_event(e); - let at = frame_system::Pallet::::current_block_number(); - let response = response.into(); - Queries::::insert(query_id, QueryStatus::Ready { response, at }); - Weight::zero() - }, - }; - }, - _ => { - let e = Event::UnexpectedResponse { origin, query_id }; - Self::deposit_event(e); - Weight::zero() - }, - } - } -} - -impl CheckSuspension for Pallet { - fn is_suspended( - _origin: &MultiLocation, - _instructions: &mut [Instruction], - _max_weight: Weight, - _properties: &mut Properties, - ) -> bool { - XcmExecutionSuspended::::get() - } -} - -/// Ensure that the origin `o` represents an XCM (`Transact`) origin. -/// -/// Returns `Ok` with the location of the XCM sender or an `Err` otherwise. -pub fn ensure_xcm(o: OuterOrigin) -> Result -where - OuterOrigin: Into>, -{ - match o.into() { - Ok(Origin::Xcm(location)) => Ok(location), - _ => Err(BadOrigin), - } -} - -/// Ensure that the origin `o` represents an XCM response origin. -/// -/// Returns `Ok` with the location of the responder or an `Err` otherwise. -pub fn ensure_response(o: OuterOrigin) -> Result -where - OuterOrigin: Into>, -{ - match o.into() { - Ok(Origin::Response(location)) => Ok(location), - _ => Err(BadOrigin), - } -} - -/// Filter for `MultiLocation` to find those which represent a strict majority approval of an -/// identified plurality. -/// -/// May reasonably be used with `EnsureXcm`. -pub struct IsMajorityOfBody(PhantomData<(Prefix, Body)>); -impl, Body: Get> Contains - for IsMajorityOfBody -{ - fn contains(l: &MultiLocation) -> bool { - let maybe_suffix = l.match_and_split(&Prefix::get()); - matches!(maybe_suffix, Some(Plurality { id, part }) if id == &Body::get() && part.is_majority()) - } -} - -/// Filter for `MultiLocation` to find those which represent a voice of an identified plurality. -/// -/// May reasonably be used with `EnsureXcm`. -pub struct IsVoiceOfBody(PhantomData<(Prefix, Body)>); -impl, Body: Get> Contains - for IsVoiceOfBody -{ - fn contains(l: &MultiLocation) -> bool { - let maybe_suffix = l.match_and_split(&Prefix::get()); - matches!(maybe_suffix, Some(Plurality { id, part }) if id == &Body::get() && part == &BodyPart::Voice) - } -} - -/// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and filter -/// the `Origin::Xcm` item. -pub struct EnsureXcm(PhantomData); -impl, F: Contains> EnsureOrigin for EnsureXcm -where - O::PalletsOrigin: From + TryInto, -{ - type Success = MultiLocation; - - fn try_origin(outer: O) -> Result { - outer.try_with_caller(|caller| { - caller.try_into().and_then(|o| match o { - Origin::Xcm(location) if F::contains(&location) => Ok(location), - Origin::Xcm(location) => Err(Origin::Xcm(location).into()), - o => Err(o.into()), - }) - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(O::from(Origin::Xcm(Here.into()))) - } -} - -/// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and filter -/// the `Origin::Response` item. -pub struct EnsureResponse(PhantomData); -impl, F: Contains> EnsureOrigin - for EnsureResponse -where - O::PalletsOrigin: From + TryInto, -{ - type Success = MultiLocation; - - fn try_origin(outer: O) -> Result { - outer.try_with_caller(|caller| { - caller.try_into().and_then(|o| match o { - Origin::Response(responder) => Ok(responder), - o => Err(o.into()), - }) - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(O::from(Origin::Response(Here.into()))) - } -} - -/// A simple passthrough where we reuse the `MultiLocation`-typed XCM origin as the inner value of -/// this crate's `Origin::Xcm` value. -pub struct XcmPassthrough(PhantomData); -impl> ConvertOrigin - for XcmPassthrough -{ - fn convert_origin( - origin: impl Into, - kind: OriginKind, - ) -> Result { - let origin = origin.into(); - match kind { - OriginKind::Xcm => Ok(crate::Origin::Xcm(origin).into()), - _ => Err(origin), - } - } -} diff --git a/pallets/pallet-xcm/src/migration.rs b/pallets/pallet-xcm/src/migration.rs deleted file mode 100644 index faaa4a4..0000000 --- a/pallets/pallet-xcm/src/migration.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use crate::{Config, Pallet, VersionNotifyTargets}; -use frame_support::{ - pallet_prelude::*, - traits::{OnRuntimeUpgrade, StorageVersion}, - weights::Weight, -}; - -const DEFAULT_PROOF_SIZE: u64 = 64 * 1024; - -pub mod v1 { - use super::*; - use crate::{CurrentMigration, VersionMigrationStage}; - - /// Named with the 'VersionUnchecked'-prefix because although this implements some version - /// checking, the version checking is not complete as it will begin failing after the upgrade is - /// enacted on-chain. - /// - /// Use experimental [`MigrateToV1`] instead. - pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { - fn on_runtime_upgrade() -> Weight { - let mut weight = T::DbWeight::get().reads(1); - - if StorageVersion::get::>() != 0 { - log::warn!("skipping v1, should be removed"); - return weight; - } - - weight.saturating_accrue(T::DbWeight::get().writes(1)); - CurrentMigration::::put(VersionMigrationStage::default()); - - let translate = |pre: (u64, u64, u32)| -> Option<(u64, Weight, u32)> { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - let translated = (pre.0, Weight::from_parts(pre.1, DEFAULT_PROOF_SIZE), pre.2); - log::info!("Migrated VersionNotifyTarget {:?} to {:?}", pre, translated); - Some(translated) - }; - - VersionNotifyTargets::::translate_values(translate); - - log::info!("v1 applied successfully"); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - StorageVersion::new(1).put::>(); - weight - } - } - - /// Version checked migration to v1. - /// - /// Wrapped in [`frame_support::migrations::VersionedMigration`] so the pre/post checks don't - /// begin failing after the upgrade is enacted on-chain. - pub type MigrateToV1 = frame_support::migrations::VersionedMigration< - 0, - 1, - VersionUncheckedMigrateToV1, - crate::pallet::Pallet, - ::DbWeight, - >; -} diff --git a/pallets/pallet-xcm/src/migrations.rs b/pallets/pallet-xcm/src/migrations.rs deleted file mode 100644 index 86d8672..0000000 --- a/pallets/pallet-xcm/src/migrations.rs +++ /dev/null @@ -1,349 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//modified by Alex Wang 2023/11 - -use frame_support::{ - traits::{GetStorageVersion, NoStorageVersionSet, PalletInfoAccess, StorageVersion}, - weights::{RuntimeDbWeight, Weight}, -}; -use impl_trait_for_tuples::impl_for_tuples; -use sp_core::Get; -use sp_io::{hashing::twox_128, storage::clear_prefix, KillStorageResult}; -use sp_std::marker::PhantomData; - -/// Handles storage migration pallet versioning. -/// -/// [`VersionedMigration`] allows developers to write migrations without worrying about checking and -/// setting storage versions. Instead, the developer wraps their migration in this struct which -/// takes care of version handling using best practices. -/// -/// It takes 5 type parameters: -/// - `From`: The version being upgraded from. -/// - `To`: The version being upgraded to. -/// - `Inner`: An implementation of `OnRuntimeUpgrade`. -/// - `Pallet`: The Pallet being upgraded. -/// - `Weight`: The runtime's RuntimeDbWeight implementation. -/// -/// When a [`VersionedMigration`] `on_runtime_upgrade`, `pre_upgrade`, or `post_upgrade` method is -/// called, the on-chain version of the pallet is compared to `From`. If they match, the `Inner` -/// equivalent is called and the pallets on-chain version is set to `To` after the migration. -/// Otherwise, a warning is logged notifying the developer that the upgrade was a noop and should -/// probably be removed. -/// -/// ### Examples -/// ```ignore -/// // In file defining migrations -/// pub struct VersionUncheckedMigrateV5ToV6(sp_std::marker::PhantomData); -/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { -/// // OnRuntimeUpgrade implementation... -/// } -/// -/// pub type MigrateV5ToV6 = -/// VersionedMigration< -/// 5, -/// 6, -/// VersionUncheckedMigrateV5ToV6, -/// crate::pallet::Pallet, -/// ::DbWeight -/// >; -/// -/// // Migrations tuple to pass to the Executive pallet: -/// pub type Migrations = ( -/// // other migrations... -/// MigrateV5ToV6, -/// // other migrations... -/// ); -/// ``` -pub struct VersionedMigration { - _marker: PhantomData<(Inner, Pallet, Weight)>, -} - -/// A helper enum to wrap the pre_upgrade bytes like an Option before passing them to post_upgrade. -/// This enum is used rather than an Option to make the API clearer to the developer. -#[derive(codec::Encode, codec::Decode)] -pub enum VersionedPostUpgradeData { - /// The migration ran, inner vec contains pre_upgrade data. - MigrationExecuted(sp_std::vec::Vec), - /// This migration is a noop, do not run post_upgrade checks. - Noop, -} - -/// Implementation of the `OnRuntimeUpgrade` trait for `VersionedMigration`. -/// -/// Its main function is to perform the runtime upgrade in `on_runtime_upgrade` only if the on-chain -/// version of the pallets storage matches `From`, and after the upgrade set the on-chain storage to -/// `To`. If the versions do not match, it writes a log notifying the developer that the migration -/// is a noop. -impl< - const FROM: u16, - const TO: u16, - Inner: crate::traits::OnRuntimeUpgrade, - Pallet: GetStorageVersion + PalletInfoAccess, - DbWeight: Get, - > crate::traits::OnRuntimeUpgrade for VersionedMigration -{ - /// Executes pre_upgrade if the migration will run, and wraps the pre_upgrade bytes in - /// [`VersionedPostUpgradeData`] before passing them to post_upgrade, so it knows whether the - /// migration ran or not. - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - use codec::Encode; - let on_chain_version = Pallet::on_chain_storage_version(); - if on_chain_version == FROM { - Ok(VersionedPostUpgradeData::MigrationExecuted(Inner::pre_upgrade()?).encode()) - } else { - Ok(VersionedPostUpgradeData::Noop.encode()) - } - } - - /// Executes the versioned runtime upgrade. - /// - /// First checks if the pallets on-chain storage version matches the version of this upgrade. If - /// it matches, it calls `Inner::on_runtime_upgrade`, updates the on-chain version, and returns - /// the weight. If it does not match, it writes a log notifying the developer that the migration - /// is a noop. - fn on_runtime_upgrade() -> Weight { - let on_chain_version = Pallet::on_chain_storage_version(); - if on_chain_version == FROM { - log::info!( - "🚚 Pallet {:?} VersionedMigration migrating storage version from {:?} to {:?}.", - Pallet::name(), - FROM, - TO - ); - - // Execute the migration - let weight = Inner::on_runtime_upgrade(); - - // Update the on-chain version - StorageVersion::new(TO).put::(); - - weight.saturating_add(DbWeight::get().reads_writes(1, 1)) - } else { - log::warn!( - "🚚 Pallet {:?} VersionedMigration migration {}->{} can be removed; on-chain is already at {:?}.", - Pallet::name(), - FROM, - TO, - on_chain_version - ); - DbWeight::get().reads(1) - } - } - - /// Executes `Inner::post_upgrade` if the migration just ran. - /// - /// pre_upgrade passes [`VersionedPostUpgradeData::MigrationExecuted`] to post_upgrade if - /// the migration ran, and [`VersionedPostUpgradeData::Noop`] otherwise. - #[cfg(feature = "try-runtime")] - fn post_upgrade( - versioned_post_upgrade_data_bytes: sp_std::vec::Vec, - ) -> Result<(), sp_runtime::TryRuntimeError> { - use codec::DecodeAll; - match ::decode_all(&mut &versioned_post_upgrade_data_bytes[..]) - .map_err(|_| "VersionedMigration post_upgrade failed to decode PreUpgradeData")? - { - VersionedPostUpgradeData::MigrationExecuted(inner_bytes) => - Inner::post_upgrade(inner_bytes), - VersionedPostUpgradeData::Noop => Ok(()), - } - } -} - -/// Can store the current pallet version in storage. -pub trait StoreCurrentStorageVersion { - /// Write the current storage version to the storage. - fn store_current_storage_version(); -} - -impl + PalletInfoAccess> - StoreCurrentStorageVersion for StorageVersion -{ - fn store_current_storage_version() { - let version = ::current_storage_version(); - version.put::(); - } -} - -impl + PalletInfoAccess> - StoreCurrentStorageVersion for NoStorageVersionSet -{ - fn store_current_storage_version() { - StorageVersion::default().put::(); - } -} - -/// Trait used by [`migrate_from_pallet_version_to_storage_version`] to do the actual migration. -pub trait PalletVersionToStorageVersionHelper { - fn migrate(db_weight: &RuntimeDbWeight) -> Weight; -} - -impl PalletVersionToStorageVersionHelper for T -where - T::CurrentStorageVersion: StoreCurrentStorageVersion, -{ - fn migrate(db_weight: &RuntimeDbWeight) -> Weight { - const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; - - fn pallet_version_key(name: &str) -> [u8; 32] { - crate::storage::storage_prefix(name.as_bytes(), PALLET_VERSION_STORAGE_KEY_POSTFIX) - } - - sp_io::storage::clear(&pallet_version_key(::name())); - - >::store_current_storage_version( - ); - - db_weight.writes(2) - } -} - -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] -impl PalletVersionToStorageVersionHelper for T { - fn migrate(db_weight: &RuntimeDbWeight) -> Weight { - let mut weight = Weight::zero(); - - for_tuples!( #( weight = weight.saturating_add(T::migrate(db_weight)); )* ); - - weight - } -} - -/// Migrate from the `PalletVersion` struct to the new -/// [`StorageVersion`](crate::traits::StorageVersion) struct. -/// -/// This will remove all `PalletVersion's` from the state and insert the current storage version. -pub fn migrate_from_pallet_version_to_storage_version< - Pallets: PalletVersionToStorageVersionHelper, ->( - db_weight: &RuntimeDbWeight, -) -> Weight { - Pallets::migrate(db_weight) -} - -/// `RemovePallet` is a utility struct used to remove all storage items associated with a specific -/// pallet. -/// -/// This struct is generic over two parameters: -/// - `P` is a type that implements the `Get` trait for a static string, representing the pallet's -/// name. -/// - `DbWeight` is a type that implements the `Get` trait for `RuntimeDbWeight`, providing the -/// weight for database operations. -/// -/// On runtime upgrade, the `on_runtime_upgrade` function will clear all storage items associated -/// with the specified pallet, logging the number of keys removed. If the `try-runtime` feature is -/// enabled, the `pre_upgrade` and `post_upgrade` functions can be used to verify the storage -/// removal before and after the upgrade. -/// -/// # Examples: -/// ```ignore -/// construct_runtime! { -/// pub enum Runtime -/// { -/// System: frame_system::{Pallet, Call, Storage, Config, Event} = 0, -/// -/// SomePalletToRemove: pallet_something::{Pallet, Call, Storage, Event} = 1, -/// AnotherPalletToRemove: pallet_something_else::{Pallet, Call, Storage, Event} = 2, -/// -/// YourOtherPallets... -/// } -/// }; -/// -/// parameter_types! { -/// pub const SomePalletToRemoveStr: &'static str = "SomePalletToRemove"; -/// pub const AnotherPalletToRemoveStr: &'static str = "AnotherPalletToRemove"; -/// } -/// -/// pub type Migrations = ( -/// RemovePallet, -/// RemovePallet, -/// AnyOtherMigrations... -/// ); -/// -/// pub type Executive = frame_executive::Executive< -/// Runtime, -/// Block, -/// frame_system::ChainContext, -/// Runtime, -/// Migrations -/// >; -/// ``` -/// -/// WARNING: `RemovePallet` has no guard rails preventing it from bricking the chain if the -/// operation of removing storage for the given pallet would exceed the block weight limit. -/// -/// If your pallet has too many keys to be removed in a single block, it is advised to wait for -/// a multi-block scheduler currently under development which will allow for removal of storage -/// items (and performing other heavy migrations) over multiple blocks -/// (see ). -pub struct RemovePallet, DbWeight: Get>( - PhantomData<(P, DbWeight)>, -); -impl, DbWeight: Get> frame_support::traits::OnRuntimeUpgrade - for RemovePallet -{ - fn on_runtime_upgrade() -> frame_support::weights::Weight { - let hashed_prefix = twox_128(P::get().as_bytes()); - let keys_removed = match clear_prefix(&hashed_prefix, None) { - KillStorageResult::AllRemoved(value) => value, - KillStorageResult::SomeRemaining(value) => { - log::error!( - "`clear_prefix` failed to remove all keys for {}. THIS SHOULD NEVER HAPPEN! 🚨", - P::get() - ); - value - }, - } as u64; - - log::info!("Removed {} {} keys 🧹", keys_removed, P::get()); - - DbWeight::get().reads_writes(keys_removed + 1, keys_removed) - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - use crate::storage::unhashed::contains_prefixed_key; - - let hashed_prefix = twox_128(P::get().as_bytes()); - match contains_prefixed_key(&hashed_prefix) { - true => log::info!("Found {} keys pre-removal 👀", P::get()), - false => log::warn!( - "Migration RemovePallet<{}> can be removed (no keys found pre-removal).", - P::get() - ), - }; - Ok(sp_std::vec::Vec::new()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: sp_std::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { - use crate::storage::unhashed::contains_prefixed_key; - - let hashed_prefix = twox_128(P::get().as_bytes()); - match contains_prefixed_key(&hashed_prefix) { - true => { - log::error!("{} has keys remaining post-removal ❗", P::get()); - return Err("Keys remaining post-removal, this should never happen 🚨".into()) - }, - false => log::info!("No {} keys found post-removal 🎉", P::get()), - }; - Ok(()) - } -} diff --git a/pallets/pallet-xcm/src/mock.rs b/pallets/pallet-xcm/src/mock.rs deleted file mode 100644 index abea481..0000000 --- a/pallets/pallet-xcm/src/mock.rs +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -// Modified by Alex Wang 2023/11 - -use codec::Encode; -use frame_support::{ - construct_runtime, match_types, parameter_types, - traits::{ - AsEnsureOriginWithArg, ConstU128, ConstU32, Contains, Everything, EverythingBut, Get, - Nothing, - }, - weights::Weight, -}; -use frame_system::EnsureRoot; -use polkadot_parachain_primitives::primitives::Id as ParaId; -use polkadot_runtime_parachains::origin; -use sp_core::H256; -use sp_runtime::{traits::IdentityLookup, AccountId32, BuildStorage}; -pub use sp_std::{ - cell::RefCell, collections::btree_map::BTreeMap, fmt::Debug, marker::PhantomData, -}; -use xcm::prelude::*; -use xcm_builder::{ - AccountId32Aliases, - AllowKnownQueryResponses, - AllowSubscriptionsFrom, - AllowTopLevelPaidExecutionFrom, - Case, - ChildParachainAsNative, - ChildParachainConvertsVia, - ChildSystemParachainAsSuperuser, - CurrencyAdapter as XcmCurrencyAdapter, - DescribeAllTerminal, - FixedRateOfFungible, - FixedWeightBounds, - FungiblesAdapter, - HashedDescription, - IsConcrete, - MatchedConvertedConcreteId, - NoChecking, - SignedAccountId32AsNative, - SignedToAccountId32, - SovereignSignedViaLocation, - TakeWeightCredit, //XcmFeeManagerFromComponents, XcmFeeToAccount, -}; -use xcm_executor::{ - traits::{Identity, JustTry}, - XcmExecutor, -}; - -use crate::{self as pallet_xcm, TestWeightInfo}; - -pub type AccountId = AccountId32; -pub type Balance = u128; -type Block = frame_system::mocking::MockBlock; - -/// An implementation of [`Contains`] which contains only equal members to `T`. -pub struct Equals(PhantomData); -impl> Contains for Equals { - fn contains(t: &X) -> bool { - t == &T::get() - } -} - -#[frame_support::pallet] -pub mod pallet_test_notifier { - use crate::{ensure_response, QueryId}; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - use sp_runtime::DispatchResult; - use xcm::latest::prelude::*; - use xcm_executor::traits::QueryHandler; - - #[pallet::pallet] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config + crate::Config { - type RuntimeEvent: IsType<::RuntimeEvent> + From>; - type RuntimeOrigin: IsType<::RuntimeOrigin> - + Into::RuntimeOrigin>>; - type RuntimeCall: IsType<::RuntimeCall> + From>; - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - QueryPrepared(QueryId), - NotifyQueryPrepared(QueryId), - ResponseReceived(MultiLocation, QueryId, Response), - } - - #[pallet::error] - pub enum Error { - UnexpectedId, - BadAccountFormat, - } - - #[pallet::call] - impl Pallet { - #[pallet::call_index(0)] - #[pallet::weight(Weight::from_parts(1_000_000, 1_000_000))] - pub fn prepare_new_query(origin: OriginFor, querier: MultiLocation) -> DispatchResult { - let who = ensure_signed(origin)?; - let id = who - .using_encoded(|mut d| <[u8; 32]>::decode(&mut d)) - .map_err(|_| Error::::BadAccountFormat)?; - let qid = as QueryHandler>::new_query( - Junction::AccountId32 { network: None, id }, - 100u32.into(), - querier, - ); - Self::deposit_event(Event::::QueryPrepared(qid)); - Ok(()) - } - - #[pallet::call_index(1)] - #[pallet::weight(Weight::from_parts(1_000_000, 1_000_000))] - pub fn prepare_new_notify_query( - origin: OriginFor, - querier: MultiLocation, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - let id = who - .using_encoded(|mut d| <[u8; 32]>::decode(&mut d)) - .map_err(|_| Error::::BadAccountFormat)?; - let call = - Call::::notification_received { query_id: 0, response: Default::default() }; - let qid = crate::Pallet::::new_notify_query( - Junction::AccountId32 { network: None, id }, - ::RuntimeCall::from(call), - 100u32.into(), - querier, - ); - Self::deposit_event(Event::::NotifyQueryPrepared(qid)); - Ok(()) - } - - #[pallet::call_index(2)] - #[pallet::weight(Weight::from_parts(1_000_000, 1_000_000))] - pub fn notification_received( - origin: OriginFor, - query_id: QueryId, - response: Response, - ) -> DispatchResult { - let responder = ensure_response(::RuntimeOrigin::from(origin))?; - Self::deposit_event(Event::::ResponseReceived(responder, query_id, response)); - Ok(()) - } - } -} - -construct_runtime!( - pub enum Test - { - System: frame_system::{Pallet, Call, Storage, Config, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Assets: pallet_assets::{Pallet, Call, Storage, Config, Event}, - ParasOrigin: origin::{Pallet, Origin}, - XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config}, - TestNotifier: pallet_test_notifier::{Pallet, Call, Event}, - } -); - -thread_local! { - pub static SENT_XCM: RefCell)>> = RefCell::new(Vec::new()); -} -pub(crate) fn sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { - SENT_XCM.with(|q| (*q.borrow()).clone()) -} -pub(crate) fn take_sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { - SENT_XCM.with(|q| { - let mut r = Vec::new(); - std::mem::swap(&mut r, &mut *q.borrow_mut()); - r - }) -} -/// Sender that never returns error. -pub struct TestSendXcm; -impl SendXcm for TestSendXcm { - type Ticket = (MultiLocation, Xcm<()>); - fn validate( - dest: &mut Option, - msg: &mut Option>, - ) -> SendResult<(MultiLocation, Xcm<()>)> { - let pair = (dest.take().unwrap(), msg.take().unwrap()); - Ok((pair, MultiAssets::new())) - } - fn deliver(pair: (MultiLocation, Xcm<()>)) -> Result { - let hash = fake_message_hash(&pair.1); - SENT_XCM.with(|q| q.borrow_mut().push(pair)); - Ok(hash) - } -} -/// Sender that returns error if `X8` junction and stops routing -pub struct TestSendXcmErrX8; -impl SendXcm for TestSendXcmErrX8 { - type Ticket = (MultiLocation, Xcm<()>); - fn validate( - dest: &mut Option, - _: &mut Option>, - ) -> SendResult<(MultiLocation, Xcm<()>)> { - if dest.as_ref().unwrap().len() == 8 { - dest.take(); - Err(SendError::Transport("Destination location full")) - } else { - Err(SendError::NotApplicable) - } - } - fn deliver(pair: (MultiLocation, Xcm<()>)) -> Result { - let hash = fake_message_hash(&pair.1); - SENT_XCM.with(|q| q.borrow_mut().push(pair)); - Ok(hash) - } -} - -parameter_types! { - pub Para3000: u32 = 3000; - pub Para3000Location: MultiLocation = Parachain(Para3000::get()).into(); - pub Para3000PaymentAmount: u128 = 1; - pub Para3000PaymentMultiAssets: MultiAssets = MultiAssets::from(MultiAsset::from((Here, Para3000PaymentAmount::get()))); -} -/// Sender only sends to `Parachain(3000)` destination requiring payment. -pub struct TestPaidForPara3000SendXcm; -impl SendXcm for TestPaidForPara3000SendXcm { - type Ticket = (MultiLocation, Xcm<()>); - fn validate( - dest: &mut Option, - msg: &mut Option>, - ) -> SendResult<(MultiLocation, Xcm<()>)> { - if let Some(dest) = dest.as_ref() { - if !dest.eq(&Para3000Location::get()) { - return Err(SendError::NotApplicable); - } - } else { - return Err(SendError::NotApplicable); - } - - let pair = (dest.take().unwrap(), msg.take().unwrap()); - Ok((pair, Para3000PaymentMultiAssets::get())) - } - fn deliver(pair: (MultiLocation, Xcm<()>)) -> Result { - let hash = fake_message_hash(&pair.1); - SENT_XCM.with(|q| q.borrow_mut().push(pair)); - Ok(hash) - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - -impl frame_system::Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type BlockWeights = (); - type BlockLength = (); - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = Everything; - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -parameter_types! { - pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; -} - -impl pallet_balances::Config for Test { - type MaxLocks = MaxLocks; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - //type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; -} - -#[cfg(feature = "runtime-benchmarks")] -/// Simple conversion of `u32` into an `AssetId` for use in benchmarking. -pub struct XcmBenchmarkHelper; -#[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { - fn create_asset_id_parameter(id: u32) -> MultiLocation { - MultiLocation { parents: 1, interior: X1(Parachain(id)) } - } -} - -impl pallet_assets::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type AssetId = MultiLocation; - type AssetIdParameter = MultiLocation; - type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = EnsureRoot; - type AssetDeposit = ConstU128<1>; - type AssetAccountDeposit = ConstU128<10>; - type MetadataDepositBase = ConstU128<1>; - type MetadataDepositPerByte = ConstU128<1>; - type ApprovalDeposit = ConstU128<1>; - type StringLimit = ConstU32<50>; - type Freezer = (); - type WeightInfo = (); - type CallbackHandle = (); - type Extra = (); - type RemoveItemsLimit = ConstU32<5>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = XcmBenchmarkHelper; -} - -// This child parachain is a system parachain trusted to teleport native token. -pub const SOME_SYSTEM_PARA: u32 = 1001; - -// This child parachain acts as trusted reserve for its assets in tests. -// USDT allowed to teleport to/from here. -pub const FOREIGN_ASSET_RESERVE_PARA_ID: u32 = 2001; -// Inner junction of reserve asset on `FOREIGN_ASSET_RESERVE_PARA_ID`. -pub const FOREIGN_ASSET_INNER_JUNCTION: Junction = GeneralIndex(1234567); - -// This child parachain acts as trusted reserve for say.. USDC that can be used for fees. -pub const USDC_RESERVE_PARA_ID: u32 = 2002; -// Inner junction of reserve asset on `USDC_RESERVE_PARA_ID`. -pub const USDC_INNER_JUNCTION: Junction = PalletInstance(42); - -// This child parachain is a trusted teleporter for say.. USDT (T from Teleport :)). -// We'll use USDT in tests that teleport fees. -pub const USDT_PARA_ID: u32 = 2003; - -// This child parachain is not configured as trusted reserve or teleport location for any assets. -pub const OTHER_PARA_ID: u32 = 2009; - -// This child parachain is used for filtered/disallowed assets. -pub const FILTERED_PARA_ID: u32 = 2010; - -parameter_types! { - pub const RelayLocation: MultiLocation = Here.into_location(); - pub const NativeAsset: MultiAsset = MultiAsset { - fun: Fungible(10), - id: Concrete(Here.into_location()), - }; - pub const SystemParachainLocation: MultiLocation = MultiLocation { - parents: 0, - interior: X1(Parachain(SOME_SYSTEM_PARA)) - }; - pub const ForeignReserveLocation: MultiLocation = MultiLocation { - parents: 0, - interior: X1(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID)) - }; - pub const ForeignAsset: MultiAsset = MultiAsset { - fun: Fungible(10), - id: Concrete(MultiLocation { - parents: 0, - interior: X2(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID), FOREIGN_ASSET_INNER_JUNCTION), - }), - }; - pub const UsdcReserveLocation: MultiLocation = MultiLocation { - parents: 0, - interior: X1(Parachain(USDC_RESERVE_PARA_ID)) - }; - pub const Usdc: MultiAsset = MultiAsset { - fun: Fungible(10), - id: Concrete(MultiLocation { - parents: 0, - interior: X2(Parachain(USDC_RESERVE_PARA_ID), USDC_INNER_JUNCTION), - }), - }; - pub const UsdtTeleportLocation: MultiLocation = MultiLocation { - parents: 0, - interior: X1(Parachain(USDT_PARA_ID)) - }; - pub const Usdt: MultiAsset = MultiAsset { - fun: Fungible(10), - id: Concrete(MultiLocation { - parents: 0, - interior: X1(Parachain(USDT_PARA_ID)), - }), - }; - pub const FilteredTeleportLocation: MultiLocation = MultiLocation { - parents: 0, - interior: X1(Parachain(FILTERED_PARA_ID)) - }; - pub const FilteredTeleportAsset: MultiAsset = MultiAsset { - fun: Fungible(10), - id: Concrete(MultiLocation { - parents: 0, - interior: X1(Parachain(FILTERED_PARA_ID)), - }), - }; - pub const AnyNetwork: Option = None; - pub UniversalLocation: InteriorMultiLocation = Here; - pub UnitWeightCost: u64 = 1_000; - pub CheckingAccount: AccountId = XcmPallet::check_account(); -} - -pub type SovereignAccountOf = ( - ChildParachainConvertsVia, - AccountId32Aliases, - HashedDescription, -); - -pub type ForeignAssetsConvertedConcreteId = MatchedConvertedConcreteId< - MultiLocation, - Balance, - // Excludes relay/parent chain currency - EverythingBut<(Equals,)>, - Identity, - JustTry, ->; - -pub type AssetTransactors = ( - XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>, - FungiblesAdapter< - Assets, - ForeignAssetsConvertedConcreteId, - SovereignAccountOf, - AccountId, - NoChecking, - CheckingAccount, - >, -); - -type LocalOriginConverter = ( - SovereignSignedViaLocation, - ChildParachainAsNative, - SignedAccountId32AsNative, - ChildSystemParachainAsSuperuser, -); - -parameter_types! { - pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); - pub CurrencyPerSecondPerByte: (AssetId, u128, u128) = (Concrete(RelayLocation::get()), 1, 1); - pub TrustedLocal: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); - pub TrustedSystemPara: (MultiAssetFilter, MultiLocation) = (NativeAsset::get().into(), SystemParachainLocation::get()); - pub TrustedUsdt: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), UsdtTeleportLocation::get()); - pub TrustedFilteredTeleport: (MultiAssetFilter, MultiLocation) = (FilteredTeleportAsset::get().into(), FilteredTeleportLocation::get()); - pub TeleportUsdtToForeign: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), ForeignReserveLocation::get()); - pub TrustedForeign: (MultiAssetFilter, MultiLocation) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); - pub TrustedUsdc: (MultiAssetFilter, MultiLocation) = (Usdc::get().into(), UsdcReserveLocation::get()); - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; - pub XcmFeesTargetAccount: AccountId = AccountId::new([167u8; 32]); -} - -pub const XCM_FEES_NOT_WAIVED_USER_ACCOUNT: [u8; 32] = [37u8; 32]; -match_types! { - pub type XcmFeesNotWaivedLocations: impl Contains = { - MultiLocation { parents: 0, interior: X1(Junction::AccountId32 {network: None, id: XCM_FEES_NOT_WAIVED_USER_ACCOUNT})} - }; -} - -pub type Barrier = ( - TakeWeightCredit, - AllowTopLevelPaidExecutionFrom, - AllowKnownQueryResponses, - AllowSubscriptionsFrom, -); - -pub type XcmRouter = (TestPaidForPara3000SendXcm, TestSendXcmErrX8, TestSendXcm); - -pub struct XcmConfig; -impl xcm_executor::Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - type AssetTransactor = AssetTransactors; - type OriginConverter = LocalOriginConverter; - type IsReserve = (Case, Case); - type IsTeleporter = ( - Case, - Case, - Case, - Case, - Case, - ); - type UniversalLocation = UniversalLocation; - type Barrier = Barrier; - type Weigher = FixedWeightBounds; - type Trader = FixedRateOfFungible; - type ResponseHandler = XcmPallet; - type AssetTrap = XcmPallet; - type AssetLocker = (); - type AssetExchanger = (); - type AssetClaims = XcmPallet; - type SubscriptionService = XcmPallet; - type PalletInstancesInfo = AllPalletsWithSystem; - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type FeeManager = (); - //type FeeManager = XcmFeeManagerFromComponents< - // EverythingBut, - // XcmFeeToAccount, - //>; - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = RuntimeCall; - type SafeCallFilter = Everything; - type Aliasers = Nothing; -} - -pub type LocalOriginToLocation = SignedToAccountId32; - -parameter_types! { - pub static AdvertisedXcmVersion: pallet_xcm::XcmVersion = 3; -} - -pub struct XcmTeleportFiltered; -impl Contains<(MultiLocation, Vec)> for XcmTeleportFiltered { - fn contains(t: &(MultiLocation, Vec)) -> bool { - let filtered = FilteredTeleportAsset::get(); - t.1.iter().any(|asset| asset == &filtered) - } -} - -impl pallet_xcm::Config for Test { - type RuntimeEvent = RuntimeEvent; - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; - type XcmRouter = XcmRouter; - type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; - type XcmExecuteFilter = Everything; - type XcmExecutorConfig = XcmConfig; - type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = EverythingBut; - type XcmReserveTransferFilter = Everything; - type Weigher = FixedWeightBounds; - type UniversalLocation = UniversalLocation; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = AdvertisedXcmVersion; - type AdminOrigin = EnsureRoot; - type TrustedLockers = (); - type SovereignAccountOf = AccountId32Aliases<(), AccountId32>; - type Currency = Balances; - type CurrencyMatcher = IsConcrete; - type MaxLockers = frame_support::traits::ConstU32<8>; - type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; - type RemoteLockConsumerIdentifier = (); - type WeightInfo = TestWeightInfo; -} - -impl origin::Config for Test {} - -impl pallet_test_notifier::Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; -} - -#[cfg(feature = "runtime-benchmarks")] -impl super::benchmarking::Config for Test { - fn reachable_dest() -> Option { - Some(Parachain(1000).into()) - } - - fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - Some((NativeAsset::get(), SystemParachainLocation::get())) - } - - fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Here.into_location()) }, - Parachain(OTHER_PARA_ID).into(), - )) - } -} - -pub(crate) fn last_event() -> RuntimeEvent { - System::events().pop().expect("RuntimeEvent expected").event -} - -pub(crate) fn last_events(n: usize) -> Vec { - System::events().into_iter().map(|e| e.event).rev().take(n).rev().collect() -} - -pub(crate) fn buy_execution(fees: impl Into) -> Instruction { - use xcm::latest::prelude::*; - BuyExecution { fees: fees.into(), weight_limit: Unlimited } -} - -pub(crate) fn buy_limited_execution( - fees: impl Into, - weight_limit: WeightLimit, -) -> Instruction { - use xcm::latest::prelude::*; - BuyExecution { fees: fees.into(), weight_limit } -} - -pub(crate) fn new_test_ext_with_balances( - balances: Vec<(AccountId, Balance)>, -) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut t) - .unwrap(); - - pallet_xcm::GenesisConfig:: { safe_xcm_version: Some(2), ..Default::default() } - .assimilate_storage(&mut t) - .unwrap(); - - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext -} - -pub(crate) fn fake_message_hash(message: &Xcm) -> XcmHash { - message.using_encoded(sp_io::hashing::blake2_256) -} diff --git a/pallets/pallet-xcm/src/tests/assets_transfer.rs b/pallets/pallet-xcm/src/tests/assets_transfer.rs deleted file mode 100644 index 5101b4b..0000000 --- a/pallets/pallet-xcm/src/tests/assets_transfer.rs +++ /dev/null @@ -1,1381 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -// Modified by Alex Wang 2023/11 - -#![cfg(test)] - -use crate::{ - mock::*, - tests::{ALICE, BOB, FEE_AMOUNT, INITIAL_BALANCE, SEND_AMOUNT}, -}; -use frame_support::{ - assert_ok, - traits::{tokens::fungibles::Inspect, Currency}, - weights::Weight, -}; -use polkadot_parachain_primitives::primitives::Id as ParaId; -use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; -use xcm::prelude::*; -use xcm_executor::traits::ConvertLocation; - -// Helper function to deduplicate testing different teleport types. -fn do_test_and_verify_teleport_assets( - expected_beneficiary: MultiLocation, - call: Call, - expected_weight_limit: WeightLimit, -) { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - // call extrinsic - call(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_limited_execution((Here, SEND_AMOUNT), expected_weight_limit), - DepositAsset { - assets: AllCounted(1).into(), - beneficiary: expected_beneficiary - }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn teleport_assets_works() { - let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - do_test_and_verify_teleport_assets( - beneficiary, - || { - assert_ok!(XcmPallet::teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(beneficiary.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - }, - Unlimited, - ); -} - -/// Test `limited_teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn limited_teleport_assets_works() { - let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); - let expected_weight_limit = weight_limit.clone(); - do_test_and_verify_teleport_assets( - beneficiary, - || { - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(beneficiary.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - weight_limit, - )); - }, - expected_weight_limit, - ); -} - -/// `limited_teleport_assets` should fail for filtered assets -#[test] -fn limited_teleport_filtered_assets_disallowed() { - let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - new_test_ext_with_balances(vec![(ALICE, INITIAL_BALANCE)]).execute_with(|| { - let result = XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(FilteredTeleportLocation::get().into()), - Box::new(beneficiary.into()), - Box::new(FilteredTeleportAsset::get().into()), - 0, - Unlimited, - ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); - }); -} - -fn set_up_foreign_asset( - reserve_para_id: u32, - inner_junction: Option, - initial_amount: u128, - is_sufficient: bool, -) -> (MultiLocation, AccountId, MultiLocation) { - let reserve_location = - RelayLocation::get().pushed_with_interior(Parachain(reserve_para_id)).unwrap(); - let reserve_sovereign_account = - SovereignAccountOf::convert_location(&reserve_location).unwrap(); - - let foreign_asset_id_multilocation = if let Some(junction) = inner_junction { - reserve_location.pushed_with_interior(junction).unwrap() - } else { - reserve_location - }; - - // create sufficient (to be used as fees as well) foreign asset (0 total issuance) - assert_ok!(Assets::force_create( - RuntimeOrigin::root(), - foreign_asset_id_multilocation, - BOB, - is_sufficient, - 1 - )); - // this asset should have been teleported/reserve-transferred in, but for this test we just - // mint it locally. - assert_ok!(Assets::mint( - RuntimeOrigin::signed(BOB), - foreign_asset_id_multilocation, - ALICE, - initial_amount - )); - - (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) -} - -// Helper function that provides correct `fee_index` after `sort()` done by -// `vec![MultiAsset, MultiAsset].into()`. -fn into_multiassets_checked( - fee_asset: MultiAsset, - transfer_asset: MultiAsset, -) -> (MultiAssets, usize, MultiAsset, MultiAsset) { - let assets: MultiAssets = vec![fee_asset.clone(), transfer_asset.clone()].into(); - let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; - (assets, fee_index, fee_asset, transfer_asset) -} - -/// Test `limited_reserve_transfer_assets` with local asset reserve and local fee reserve. -/// -/// Transferring native asset (local reserve) to some `OTHER_PARA_ID` (no teleport trust). -/// Using native asset for fees as well. -/// -/// ```nocompile -/// Here (source) OTHER_PARA_ID (destination) -/// | `assets` reserve -/// | `fees` reserve -/// | -/// | 1. execute `TransferReserveAsset(assets_and_fees_batched_together)` -/// | \--> sends `ReserveAssetDeposited(both), ClearOrigin, BuyExecution(fees), DepositAsset` -/// \------------------------------------------> -/// ``` -#[test] -fn limited_reserve_transfer_assets_with_local_asset_reserve_and_local_fee_reserve_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); - let expected_weight_limit = weight_limit.clone(); - let expected_beneficiary = beneficiary; - let dest: MultiLocation = Parachain(OTHER_PARA_ID).into(); - - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - // call extrinsic - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - weight_limit, - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(OTHER_PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - dest, - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_limited_execution((Parent, SEND_AMOUNT), expected_weight_limit), - DepositAsset { - assets: AllCounted(1).into(), - beneficiary: expected_beneficiary - }, - ]), - )] - ); - let mut last_events = last_events(3).into_iter(); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::FeesPaid { - paying: expected_beneficiary, - fees: MultiAssets::new(), - }) - ); - assert!(matches!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) - )); - }); -} - -/// Test `reserve_transfer_assets` with destination asset reserve and local fee reserve. -/// -/// Transferring foreign asset (`FOREIGN_ASSET_RESERVE_PARA_ID` reserve) to -/// `FOREIGN_ASSET_RESERVE_PARA_ID` (no teleport trust). -/// Using native asset (local reserve) for fees. -/// -/// ```nocompile -/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) -/// | `fees` reserve `assets` reserve -/// | -/// | 1. execute `TransferReserveAsset(fees)` -/// | \-> sends `ReserveAssetDeposited(fees), ClearOrigin, BuyExecution(fees), DepositAsset` -/// | 2. execute `InitiateReserveWithdraw(assets)` -/// | \--> sends `WithdrawAsset(assets), ClearOrigin, BuyExecution(fees), DepositAsset` -/// \------------------------------------------> -/// ``` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_works() { - let weight = BaseXcmWeight::get() * 3; - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // create non-sufficient foreign asset BLA (0 total issuance) - let foreign_initial_amount = 142; - let (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) = - set_up_foreign_asset( - FOREIGN_ASSET_RESERVE_PARA_ID, - Some(FOREIGN_ASSET_INNER_JUNCTION), - foreign_initial_amount, - false, - ); - - // transfer destination is reserve location (no teleport trust) - let dest = reserve_location; - - let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( - // native asset for fee - local reserve - (MultiLocation::here(), FEE_AMOUNT).into(), - // foreign asset to transfer - destination reserve - (foreign_asset_id_multilocation, SEND_AMOUNT).into(), - ); - - // reanchor according to test-case - let context = UniversalLocation::get(); - let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); - let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); - - // balances checks before - assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - )); - - let mut last_events = last_events(3).into_iter(); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - - // Alice spent (transferred) amount - assert_eq!( - Assets::balance(foreign_asset_id_multilocation, ALICE), - foreign_initial_amount - SEND_AMOUNT - ); - // Alice used native asset for fees - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - FEE_AMOUNT); - // Destination account (parachain account) added native reserve used as fee to balances - assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), FEE_AMOUNT); - assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); - // Verify total and active issuance of foreign BLA have decreased (burned on - // reserve-withdraw) - let expected_issuance = foreign_initial_amount - SEND_AMOUNT; - assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); - assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); - - // Verify sent XCM program - assert_eq!( - sent_xcm(), - vec![( - dest, - // `fees` are being sent through local-reserve transfer because fee reserve is - // local chain; `assets` are burned on source and withdrawn from SA here - Xcm(vec![ - ReserveAssetDeposited((Parent, FEE_AMOUNT).into()), - buy_limited_execution(expected_fee, Unlimited), - WithdrawAsset(expected_asset.into()), - ClearOrigin, - DepositAsset { assets: AllCounted(2).into(), beneficiary }, - ]) - )] - ); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::FeesPaid { - paying: beneficiary, - fees: MultiAssets::new(), - }) - ); - assert!(matches!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) - )); - }); -} - -/// Test `reserve_transfer_assets` with remote asset reserve and local fee reserve. -/// -/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to `OTHER_PARA_ID`. -/// Using native (local reserve) as fee should be disallowed. -#[test] -fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // create non-sufficient foreign asset BLA (0 total issuance) - let foreign_initial_amount = 142; - let (_, _, foreign_asset_id_multilocation) = set_up_foreign_asset( - FOREIGN_ASSET_RESERVE_PARA_ID, - Some(FOREIGN_ASSET_INNER_JUNCTION), - foreign_initial_amount, - false, - ); - - // transfer destination is OTHER_PARA_ID (foreign asset needs to go through its reserve - // chain) - let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); - - let (assets, fee_index, _, _) = into_multiassets_checked( - // native asset for fee - local reserve - (MultiLocation::here(), FEE_AMOUNT).into(), - // foreign asset to transfer - remote reserve - (foreign_asset_id_multilocation, SEND_AMOUNT).into(), - ); - - // balances checks before - assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // try the transfer - let result = XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve") - })) - ); - - // Alice transferred nothing - assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); - // Alice spent native asset for fees - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - // Verify total and active issuance of foreign BLA asset have decreased (burned on - // reserve-withdraw) - let expected_issuance = foreign_initial_amount; - assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); - assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); - }); -} - -/// Test `reserve_transfer_assets` with local asset reserve and destination fee reserve. -/// -/// Transferring native asset (local reserve) to `USDC_RESERVE_PARA_ID` (no teleport trust). Using -/// foreign asset (`USDC_RESERVE_PARA_ID` reserve) for fees. -/// -/// ```nocompile -/// Here (source) USDC_RESERVE_PARA_ID (destination) -/// | `assets` reserve `fees` reserve -/// | -/// | 1. execute `InitiateReserveWithdraw(fees)` -/// | \--> sends `WithdrawAsset(fees), ClearOrigin, BuyExecution(fees), DepositAsset` -/// | 2. execute `TransferReserveAsset(assets)` -/// | \-> sends `ReserveAssetDeposited(assets), ClearOrigin, BuyExecution(fees), DepositAsset` -/// \------------------------------------------> -/// ``` -#[test] -fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_works() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // create sufficient foreign asset USDC (0 total issuance) - let usdc_initial_local_amount = 142; - let (usdc_reserve_location, usdc_chain_sovereign_account, usdc_id_multilocation) = - set_up_foreign_asset( - USDC_RESERVE_PARA_ID, - Some(USDC_INNER_JUNCTION), - usdc_initial_local_amount, - true, - ); - - // native assets transfer to fee reserve location (no teleport trust) - let dest = usdc_reserve_location; - - let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( - // usdc for fees (is sufficient on local chain too) - destination reserve - (usdc_id_multilocation, FEE_AMOUNT).into(), - // native asset to transfer (not used for fees) - local reserve - (MultiLocation::here(), SEND_AMOUNT).into(), - ); - - // reanchor according to test-case - let context = UniversalLocation::get(); - let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); - let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); - - // balances checks before - assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - )); - let weight = BaseXcmWeight::get() * 3; - let mut last_events = last_events(3).into_iter(); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::FeesPaid { - paying: beneficiary, - fees: MultiAssets::new(), - }) - ); - assert!(matches!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) - )); - - // Alice spent (fees) amount - assert_eq!( - Assets::balance(usdc_id_multilocation, ALICE), - usdc_initial_local_amount - FEE_AMOUNT - ); - // Alice used native asset for transfer - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Sovereign account of dest parachain holds `SEND_AMOUNT` native asset in local reserve - assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), SEND_AMOUNT); - assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); - // Verify total and active issuance of USDC have decreased (burned on reserve-withdraw) - let expected_issuance = usdc_initial_local_amount - FEE_AMOUNT; - assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_issuance); - assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_issuance); - - // Verify sent XCM program - assert_eq!( - sent_xcm(), - vec![( - dest, - Xcm(vec![ - // fees are being sent through destination-reserve transfer because fee reserve - // is destination chain - WithdrawAsset(expected_fee.clone().into()), - buy_limited_execution(expected_fee, Unlimited), - // transfer is through local-reserve transfer because `assets` (native asset) - // have local reserve - ReserveAssetDeposited(expected_asset.into()), - ClearOrigin, - DepositAsset { assets: AllCounted(2).into(), beneficiary }, - ]) - )] - ); - }); -} - -/// Test `reserve_transfer_assets` with destination asset reserve and destination fee reserve. -/// -/// ```nocompile -/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) -/// | `fees` reserve -/// | `assets` reserve -/// | -/// | 1. execute `InitiateReserveWithdraw(assets_and_fees_batched_together)` -/// | \--> sends `WithdrawAsset(batch), ClearOrigin, BuyExecution(fees), DepositAsset` -/// \------------------------------------------> -/// ``` -#[test] -fn reserve_transfer_assets_with_destination_asset_reserve_and_destination_fee_reserve_works() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // we'll send just this foreign asset back to its reserve location and use it for fees as - // well - let foreign_initial_amount = 142; - let (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) = - set_up_foreign_asset( - FOREIGN_ASSET_RESERVE_PARA_ID, - Some(FOREIGN_ASSET_INNER_JUNCTION), - foreign_initial_amount, - true, - ); - - // transfer destination is reserve location - let dest = reserve_location; - let assets: MultiAssets = vec![(foreign_asset_id_multilocation, SEND_AMOUNT).into()].into(); - let fee_index = 0; - - // reanchor according to test-case - let mut expected_assets = assets.clone(); - expected_assets.reanchor(&dest, UniversalLocation::get()).unwrap(); - - // balances checks before - assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index, - Unlimited, - )); - - let weight = BaseXcmWeight::get() * 2; - let mut last_events = last_events(3).into_iter(); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::FeesPaid { - paying: beneficiary, - fees: MultiAssets::new(), - }) - ); - assert!(matches!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) - )); - - // Alice spent (transferred) amount - assert_eq!( - Assets::balance(foreign_asset_id_multilocation, ALICE), - foreign_initial_amount - SEND_AMOUNT - ); - // Alice's native asset balance is untouched - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - // Reserve sovereign account has same balances - assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), 0); - assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); - // Verify total and active issuance of foreign BLA have decreased (burned on - // reserve-withdraw) - let expected_issuance = foreign_initial_amount - SEND_AMOUNT; - assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); - assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); - - // Verify sent XCM program - assert_eq!( - sent_xcm(), - vec![( - Parachain(FOREIGN_ASSET_RESERVE_PARA_ID).into(), - Xcm(vec![ - WithdrawAsset(expected_assets.clone()), - ClearOrigin, - buy_limited_execution(expected_assets.get(0).unwrap().clone(), Unlimited), - DepositAsset { assets: AllCounted(1).into(), beneficiary }, - ]), - )] - ); - }); -} - -/// Test `reserve_transfer_assets` with remote asset reserve and destination fee reserve is -/// disallowed. -/// -/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to -/// `USDC_RESERVE_PARA_ID`. Using USDC (destination reserve) as fee. -#[test] -fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // create sufficient foreign asset USDC (0 total issuance) - let usdc_initial_local_amount = 42; - let (usdc_chain, _, usdc_id_multilocation) = set_up_foreign_asset( - USDC_RESERVE_PARA_ID, - Some(USDC_INNER_JUNCTION), - usdc_initial_local_amount, - true, - ); - - // create non-sufficient foreign asset BLA (0 total issuance) - let foreign_initial_amount = 142; - let (_, _, foreign_asset_id_multilocation) = set_up_foreign_asset( - FOREIGN_ASSET_RESERVE_PARA_ID, - Some(FOREIGN_ASSET_INNER_JUNCTION), - foreign_initial_amount, - false, - ); - - // transfer destination is USDC chain (foreign asset BLA needs to go through its separate - // reserve chain) - let dest = usdc_chain; - - let (assets, fee_index, _, _) = into_multiassets_checked( - // USDC for fees (is sufficient on local chain too) - destination reserve - (usdc_id_multilocation, FEE_AMOUNT).into(), - // foreign asset to transfer (not used for fees) - remote reserve - (foreign_asset_id_multilocation, SEND_AMOUNT).into(), - ); - - // balances checks before - assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); - assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - let result = XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve") - })) - ); - - // Alice native asset untouched - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); - assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); - let expected_usdc_issuance = usdc_initial_local_amount; - assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); - assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); - let expected_bla_issuance = foreign_initial_amount; - assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); - assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); - }); -} - -/// Test `reserve_transfer_assets` with local asset reserve and remote fee reserve is disallowed. -/// -/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign -/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. -#[test] -fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // create sufficient foreign asset USDC (0 total issuance) - let usdc_initial_local_amount = 142; - let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( - USDC_RESERVE_PARA_ID, - Some(USDC_INNER_JUNCTION), - usdc_initial_local_amount, - true, - ); - - // transfer destination is some other parachain != fee reserve location (no teleport trust) - let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); - let dest_sovereign_account = SovereignAccountOf::convert_location(&dest).unwrap(); - - let (assets, fee_index, _, _) = into_multiassets_checked( - // USDC for fees (is sufficient on local chain too) - remote reserve - (usdc_id_multilocation, FEE_AMOUNT).into(), - // native asset to transfer (not used for fees) - local reserve - (MultiLocation::here(), SEND_AMOUNT).into(), - ); - - // balances checks before - assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - let result = XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve") - })) - ); - assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - // Sovereign account of reserve parachain is unchanged - assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); - assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); - assert_eq!(Balances::free_balance(dest_sovereign_account), 0); - let expected_usdc_issuance = usdc_initial_local_amount; - assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); - assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); - }); -} - -/// Test `reserve_transfer_assets` with destination asset reserve and remote fee reserve is -/// disallowed. -/// -/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign -/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. -#[test] -fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // create sufficient foreign asset USDC (0 total issuance) - let usdc_initial_local_amount = 42; - let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( - USDC_RESERVE_PARA_ID, - Some(USDC_INNER_JUNCTION), - usdc_initial_local_amount, - true, - ); - - // create non-sufficient foreign asset BLA (0 total issuance) - let foreign_initial_amount = 142; - let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = - set_up_foreign_asset( - FOREIGN_ASSET_RESERVE_PARA_ID, - Some(FOREIGN_ASSET_INNER_JUNCTION), - foreign_initial_amount, - false, - ); - - // transfer destination is asset reserve location - let dest = reserve_location; - let dest_sovereign_account = foreign_sovereign_account; - - let (assets, fee_index, _, _) = into_multiassets_checked( - // USDC for fees (is sufficient on local chain too) - remote reserve - (usdc_id_multilocation, FEE_AMOUNT).into(), - // foreign asset to transfer (not used for fees) - destination reserve - (foreign_asset_id_multilocation, SEND_AMOUNT).into(), - ); - - // balances checks before - assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - let result = XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve") - })) - ); - // Alice native asset untouched - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); - assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); - assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); - assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); - assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); - assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); - let expected_usdc_issuance = usdc_initial_local_amount; - assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); - assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); - let expected_bla_issuance = foreign_initial_amount; - assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); - assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); - }); -} - -/// Test `reserve_transfer_assets` with remote asset reserve and (same) remote fee reserve. -/// -/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign -/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. -/// -/// ```nocompile -/// | chain `A` | chain `C` | chain `B` -/// | Here (source) | USDC_RESERVE_PARA_ID | OTHER_PARA_ID (destination) -/// | | `fees` reserve | -/// | | `assets` reserve | -/// | -/// | 1. `A` executes `InitiateReserveWithdraw(both)` dest `C` -/// | -----------------> `C` executes `DepositReserveAsset(both)` dest `B` -/// | --------------------------> `DepositAsset(both)` -/// ``` -#[test] -fn reserve_transfer_assets_with_remote_asset_reserve_and_remote_fee_reserve_works() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // create sufficient foreign asset USDC (0 total issuance) - let usdc_initial_local_amount = 142; - let (usdc_chain, usdc_chain_sovereign_account, usdc_id_multilocation) = - set_up_foreign_asset( - USDC_RESERVE_PARA_ID, - Some(USDC_INNER_JUNCTION), - usdc_initial_local_amount, - true, - ); - - // transfer destination is some other parachain - let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); - - let assets: MultiAssets = vec![(usdc_id_multilocation, SEND_AMOUNT).into()].into(); - let fee_index = 0; - - // reanchor according to test-case - let context = UniversalLocation::get(); - let expected_dest_on_reserve = dest.reanchored(&usdc_chain, context).unwrap(); - let fees = assets.get(fee_index).unwrap().clone(); - let (fees_half_1, fees_half_2) = XcmPallet::halve_fees(fees).unwrap(); - let mut expected_assets_on_reserve = assets.clone(); - expected_assets_on_reserve.reanchor(&usdc_chain, context).unwrap(); - let expected_fee_on_reserve = fees_half_1.reanchored(&usdc_chain, context).unwrap(); - let expected_fee_on_dest = fees_half_2.reanchored(&dest, context).unwrap(); - - // balances checks before - assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - )); - assert!(matches!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(_) }) - )); - - // Alice spent (transferred) amount - assert_eq!( - Assets::balance(usdc_id_multilocation, ALICE), - usdc_initial_local_amount - SEND_AMOUNT - ); - // Alice's native asset balance is untouched - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - // Destination account (parachain account) has expected (same) balances - assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); - assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); - // Verify total and active issuance of USDC have decreased (burned on reserve-withdraw) - let expected_usdc_issuance = usdc_initial_local_amount - SEND_AMOUNT; - assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); - assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); - - // Verify sent XCM program - assert_eq!( - sent_xcm(), - vec![( - // first message sent to reserve chain - usdc_chain, - Xcm(vec![ - WithdrawAsset(expected_assets_on_reserve), - ClearOrigin, - BuyExecution { fees: expected_fee_on_reserve, weight_limit: Unlimited }, - DepositReserveAsset { - assets: Wild(AllCounted(1)), - // final destination is `dest` as seen by `reserve` - dest: expected_dest_on_reserve, - // message sent onward to `dest` - xcm: Xcm(vec![ - buy_limited_execution(expected_fee_on_dest, Unlimited), - DepositAsset { assets: AllCounted(1).into(), beneficiary } - ]) - } - ]) - )], - ); - }); -} - -/// Test `reserve_transfer_assets` with local asset reserve and teleported fee. -/// -/// Transferring native asset (local reserve) to `USDT_PARA_ID`. Using teleport-trusted USDT for -/// fees. -/// -/// ```nocompile -/// Here (source) USDT_PARA_ID (destination) -/// | `assets` reserve `fees` teleport-trust -/// | -/// | 1. execute `InitiateTeleport(fees)` -/// | \--> sends `ReceiveTeleportedAsset(fees), .., DepositAsset(fees)` -/// | 2. execute `TransferReserveAsset(assets)` -/// | \-> sends `ReserveAssetDeposited(assets), ClearOrigin, BuyExecution(fees), DepositAsset` -/// \------------------------------------------> -/// ``` -#[test] -fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_works() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // create sufficient foreign asset USDT (0 total issuance) - let usdt_initial_local_amount = 42; - let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = - set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); - - // native assets transfer destination is USDT chain (teleport trust only for USDT) - let dest = usdt_chain; - - let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( - // USDT for fees (is sufficient on local chain too) - teleported - (usdt_id_multilocation, FEE_AMOUNT).into(), - // native asset to transfer (not used for fees) - local reserve - (MultiLocation::here(), SEND_AMOUNT).into(), - ); - - // reanchor according to test-case - let context = UniversalLocation::get(); - let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); - let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); - - // balances checks before - assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - )); - let weight = BaseXcmWeight::get() * 3; - let mut last_events = last_events(3).into_iter(); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::FeesPaid { - paying: beneficiary, - fees: MultiAssets::new(), - }) - ); - assert!(matches!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) - )); - // Alice spent (fees) amount - assert_eq!( - Assets::balance(usdt_id_multilocation, ALICE), - usdt_initial_local_amount - FEE_AMOUNT - ); - // Alice used native asset for transfer - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Sovereign account of dest parachain holds `SEND_AMOUNT` native asset in local reserve - assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), SEND_AMOUNT); - assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); - // Verify total and active issuance have decreased (teleported) - let expected_usdt_issuance = usdt_initial_local_amount - FEE_AMOUNT; - assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); - assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); - - // Verify sent XCM program - assert_eq!( - sent_xcm(), - vec![( - dest, - Xcm(vec![ - // fees are teleported to destination chain - ReceiveTeleportedAsset(expected_fee.clone().into()), - buy_limited_execution(expected_fee, Unlimited), - // transfer is through local-reserve transfer because `assets` (native - // asset) have local reserve - ReserveAssetDeposited(expected_asset.into()), - ClearOrigin, - DepositAsset { assets: AllCounted(2).into(), beneficiary }, - ]) - )] - ); - }); -} - -/// Test `reserve_transfer_assets` with destination asset reserve and teleported fee. -/// -/// Transferring foreign asset (destination reserve) to `FOREIGN_ASSET_RESERVE_PARA_ID`. Using -/// teleport-trusted USDT for fees. -/// -/// ```nocompile -/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) -/// | `fees` (USDT) teleport-trust -/// | `assets` reserve -/// | -/// | 1. execute `InitiateTeleport(fees)` -/// | \--> sends `ReceiveTeleportedAsset(fees), .., DepositAsset(fees)` -/// | 2. execute `InitiateReserveWithdraw(assets)` -/// | \--> sends `WithdrawAsset(asset), ClearOrigin, BuyExecution(fees), DepositAsset` -/// \------------------------------------------> -/// ``` -#[test] -fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_works() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // create sufficient foreign asset USDT (0 total issuance) - let usdt_initial_local_amount = 42; - let (_, usdt_chain_sovereign_account, usdt_id_multilocation) = - set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); - - // create non-sufficient foreign asset BLA (0 total issuance) - let foreign_initial_amount = 142; - let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = - set_up_foreign_asset( - FOREIGN_ASSET_RESERVE_PARA_ID, - Some(FOREIGN_ASSET_INNER_JUNCTION), - foreign_initial_amount, - false, - ); - - // transfer destination is asset reserve location - let dest = reserve_location; - let dest_sovereign_account = foreign_sovereign_account; - - let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( - // USDT for fees (is sufficient on local chain too) - teleported - (usdt_id_multilocation, FEE_AMOUNT).into(), - // foreign asset to transfer (not used for fees) - destination reserve - (foreign_asset_id_multilocation, SEND_AMOUNT).into(), - ); - - // reanchor according to test-case - let context = UniversalLocation::get(); - let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); - let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); - - // balances checks before - assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - )); - let weight = BaseXcmWeight::get() * 4; - let mut last_events = last_events(3).into_iter(); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - assert_eq!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::FeesPaid { - paying: beneficiary, - fees: MultiAssets::new(), - }) - ); - assert!(matches!( - last_events.next().unwrap(), - RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) - )); - // Alice native asset untouched - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - // Alice spent USDT for fees - assert_eq!( - Assets::balance(usdt_id_multilocation, ALICE), - usdt_initial_local_amount - FEE_AMOUNT - ); - // Alice transferred BLA - assert_eq!( - Assets::balance(foreign_asset_id_multilocation, ALICE), - foreign_initial_amount - SEND_AMOUNT - ); - // Verify balances of USDT reserve parachain - assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); - assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); - // Verify balances of transferred-asset reserve parachain - assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); - assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); - // Verify total and active issuance of USDT have decreased (teleported) - let expected_usdt_issuance = usdt_initial_local_amount - FEE_AMOUNT; - assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); - assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); - // Verify total and active issuance of foreign BLA asset have decreased (burned on - // reserve-withdraw) - let expected_bla_issuance = foreign_initial_amount - SEND_AMOUNT; - assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); - assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); - - // Verify sent XCM program - assert_eq!( - sent_xcm(), - vec![( - dest, - Xcm(vec![ - // fees are teleported to destination chain - ReceiveTeleportedAsset(expected_fee.clone().into()), - buy_limited_execution(expected_fee, Unlimited), - // assets are withdrawn from origin's local SA - WithdrawAsset(expected_asset.into()), - ClearOrigin, - DepositAsset { assets: AllCounted(2).into(), beneficiary }, - ]) - )] - ); - }); -} - -/// Test `reserve_transfer_assets` with remote asset reserve and teleported fee is disallowed. -/// -/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to `USDT_PARA_ID`. -/// Using teleport-trusted USDT for fees. -#[test] -fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - new_test_ext_with_balances(balances).execute_with(|| { - // create sufficient foreign asset USDT (0 total issuance) - let usdt_initial_local_amount = 42; - let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = - set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); - - // create non-sufficient foreign asset BLA (0 total issuance) - let foreign_initial_amount = 142; - let (_, reserve_sovereign_account, foreign_asset_id_multilocation) = set_up_foreign_asset( - FOREIGN_ASSET_RESERVE_PARA_ID, - Some(FOREIGN_ASSET_INNER_JUNCTION), - foreign_initial_amount, - false, - ); - - // transfer destination is USDT chain (foreign asset needs to go through its reserve chain) - let dest = usdt_chain; - - let (assets, fee_index, _, _) = into_multiassets_checked( - // USDT for fees (is sufficient on local chain too) - teleported - (usdt_id_multilocation, FEE_AMOUNT).into(), - // foreign asset to transfer (not used for fees) - remote reserve - (foreign_asset_id_multilocation, SEND_AMOUNT).into(), - ); - - // balances checks before - assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - let result = XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve") - })) - ); - // Alice native asset untouched - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); - assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); - assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); - assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); - assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), 0); - assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); - let expected_usdt_issuance = usdt_initial_local_amount; - assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); - assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); - let expected_bla_issuance = foreign_initial_amount; - assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); - assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); - }); -} - -/// Test `reserve_transfer_assets` single asset which is teleportable - should fail. -/// -/// Attempting to reserve-transfer teleport-trusted USDT to `USDT_PARA_ID` should fail. -#[test] -fn reserve_transfer_assets_with_teleportable_asset_fails() { - let balances = vec![(ALICE, INITIAL_BALANCE)]; - let beneficiary: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - - new_test_ext_with_balances(balances).execute_with(|| { - // create sufficient foreign asset USDT (0 total issuance) - let usdt_initial_local_amount = 42; - let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = - set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); - - // transfer destination is USDT chain (foreign asset needs to go through its reserve chain) - let dest = usdt_chain; - let assets: MultiAssets = vec![(usdt_id_multilocation, FEE_AMOUNT).into()].into(); - let fee_index = 0; - - // balances checks before - assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - - // do the transfer - let res = XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(dest.into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - ); - assert_eq!( - res, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); - // Alice native asset is still same - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); - // Alice USDT balance is still same - assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); - // No USDT moved to sovereign account of reserve parachain - assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); - // Verify total and active issuance of USDT are still the same - assert_eq!(Assets::total_issuance(usdt_id_multilocation), usdt_initial_local_amount); - assert_eq!(Assets::active_issuance(usdt_id_multilocation), usdt_initial_local_amount); - }); -} - -/// Test `reserve_transfer_assets` with teleportable fee that is filtered - should fail. -#[test] -fn reserve_transfer_assets_with_filtered_teleported_fee_disallowed() { - let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - new_test_ext_with_balances(vec![(ALICE, INITIAL_BALANCE)]).execute_with(|| { - let (assets, fee_index, _, _) = into_multiassets_checked( - // FilteredTeleportAsset for fees - teleportable but filtered - FilteredTeleportAsset::get().into(), - // native asset to transfer (not used for fees) - local reserve - (MultiLocation::here(), SEND_AMOUNT).into(), - ); - let result = XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(FilteredTeleportLocation::get().into()), - Box::new(beneficiary.into()), - Box::new(assets.into()), - fee_index as u32, - Unlimited, - ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); - }); -} diff --git a/pallets/pallet-xcm/src/tests/mod.rs b/pallets/pallet-xcm/src/tests/mod.rs deleted file mode 100644 index 72814e5..0000000 --- a/pallets/pallet-xcm/src/tests/mod.rs +++ /dev/null @@ -1,951 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -#![cfg(test)] - -mod assets_transfer; - -use crate::{ - mock::*, AssetTraps, CurrentMigration, Error, LatestVersionedMultiLocation, Queries, - QueryStatus, VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, - VersionNotifyTargets, -}; -use frame_support::{ - assert_noop, assert_ok, - traits::{Currency, Hooks}, - weights::Weight, -}; -use polkadot_parachain_primitives::primitives::Id as ParaId; -use sp_runtime::traits::{AccountIdConversion, BlakeTwo256, Hash}; -use xcm::{latest::QueryResponseInfo, prelude::*}; -use xcm_builder::AllowKnownQueryResponses; -use xcm_executor::{ - traits::{Properties, QueryHandler, QueryResponseStatus, ShouldExecute}, - XcmExecutor, -}; - -const ALICE: AccountId = AccountId::new([0u8; 32]); -const BOB: AccountId = AccountId::new([1u8; 32]); -const INITIAL_BALANCE: u128 = 100; -const SEND_AMOUNT: u128 = 10; -const FEE_AMOUNT: u128 = 2; - -#[test] -fn report_outcome_notify_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); - let mut message = - Xcm(vec![TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }]); - let call = pallet_test_notifier::Call::notification_received { - query_id: 0, - response: Default::default(), - }; - let notify = RuntimeCall::TestNotifier(call); - new_test_ext_with_balances(balances).execute_with(|| { - XcmPallet::report_outcome_notify( - &mut message, - Parachain(OTHER_PARA_ID).into_location(), - notify, - 100, - ) - .unwrap(); - assert_eq!( - message, - Xcm(vec![ - SetAppendix(Xcm(vec![ReportError(QueryResponseInfo { - destination: Parent.into(), - query_id: 0, - max_weight: Weight::from_parts(1_000_000, 1_000_000), - })])), - TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }, - ]) - ); - let querier: MultiLocation = Here.into(); - let status = QueryStatus::Pending { - responder: MultiLocation::from(Parachain(OTHER_PARA_ID)).into(), - maybe_notify: Some((5, 2)), - timeout: 100, - maybe_match_querier: Some(querier.into()), - }; - assert_eq!(crate::Queries::::iter().collect::>(), vec![(0, status)]); - - let message = Xcm(vec![QueryResponse { - query_id: 0, - response: Response::ExecutionResult(None), - max_weight: Weight::from_parts(1_000_000, 1_000_000), - querier: Some(querier), - }]); - let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm( - Parachain(OTHER_PARA_ID), - message, - hash, - Weight::from_parts(1_000_000_000, 1_000_000_000), - ); - assert_eq!(r, Outcome::Complete(Weight::from_parts(1_000, 1_000))); - assert_eq!( - last_events(2), - vec![ - RuntimeEvent::TestNotifier(pallet_test_notifier::Event::ResponseReceived( - Parachain(OTHER_PARA_ID).into(), - 0, - Response::ExecutionResult(None), - )), - RuntimeEvent::XcmPallet(crate::Event::Notified { - query_id: 0, - pallet_index: 5, - call_index: 2 - }), - ] - ); - assert_eq!(crate::Queries::::iter().collect::>(), vec![]); - }); -} - -#[test] -fn report_outcome_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); - let mut message = - Xcm(vec![TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }]); - new_test_ext_with_balances(balances).execute_with(|| { - XcmPallet::report_outcome(&mut message, Parachain(OTHER_PARA_ID).into_location(), 100) - .unwrap(); - assert_eq!( - message, - Xcm(vec![ - SetAppendix(Xcm(vec![ReportError(QueryResponseInfo { - destination: Parent.into(), - query_id: 0, - max_weight: Weight::zero(), - })])), - TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }, - ]) - ); - let querier: MultiLocation = Here.into(); - let status = QueryStatus::Pending { - responder: MultiLocation::from(Parachain(OTHER_PARA_ID)).into(), - maybe_notify: None, - timeout: 100, - maybe_match_querier: Some(querier.into()), - }; - assert_eq!(crate::Queries::::iter().collect::>(), vec![(0, status)]); - - let message = Xcm(vec![QueryResponse { - query_id: 0, - response: Response::ExecutionResult(None), - max_weight: Weight::zero(), - querier: Some(querier), - }]); - let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm( - Parachain(OTHER_PARA_ID), - message, - hash, - Weight::from_parts(1_000_000_000, 1_000_000_000), - ); - assert_eq!(r, Outcome::Complete(Weight::from_parts(1_000, 1_000))); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::ResponseReady { - query_id: 0, - response: Response::ExecutionResult(None), - }) - ); - - let response = - QueryResponseStatus::Ready { response: Response::ExecutionResult(None), at: 1 }; - assert_eq!(XcmPallet::take_response(0), response); - }); -} - -#[test] -fn custom_querier_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let querier: MultiLocation = - (Parent, AccountId32 { network: None, id: ALICE.into() }).into(); - - let r = TestNotifier::prepare_new_query(RuntimeOrigin::signed(ALICE), querier); - assert_eq!(r, Ok(())); - let status = QueryStatus::Pending { - responder: MultiLocation::from(AccountId32 { network: None, id: ALICE.into() }).into(), - maybe_notify: None, - timeout: 100, - maybe_match_querier: Some(querier.into()), - }; - assert_eq!(crate::Queries::::iter().collect::>(), vec![(0, status)]); - - // Supplying no querier when one is expected will fail - let message = Xcm(vec![QueryResponse { - query_id: 0, - response: Response::ExecutionResult(None), - max_weight: Weight::zero(), - querier: None, - }]); - let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm_in_credit( - AccountId32 { network: None, id: ALICE.into() }, - message, - hash, - Weight::from_parts(1_000_000_000, 1_000_000_000), - Weight::from_parts(1_000, 1_000), - ); - assert_eq!(r, Outcome::Complete(Weight::from_parts(1_000, 1_000))); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::InvalidQuerier { - origin: AccountId32 { network: None, id: ALICE.into() }.into(), - query_id: 0, - expected_querier: querier, - maybe_actual_querier: None, - }), - ); - - // Supplying the wrong querier will also fail - let message = Xcm(vec![QueryResponse { - query_id: 0, - response: Response::ExecutionResult(None), - max_weight: Weight::zero(), - querier: Some(MultiLocation::here()), - }]); - let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm_in_credit( - AccountId32 { network: None, id: ALICE.into() }, - message, - hash, - Weight::from_parts(1_000_000_000, 1_000_000_000), - Weight::from_parts(1_000, 1_000), - ); - assert_eq!(r, Outcome::Complete(Weight::from_parts(1_000, 1_000))); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::InvalidQuerier { - origin: AccountId32 { network: None, id: ALICE.into() }.into(), - query_id: 0, - expected_querier: querier, - maybe_actual_querier: Some(MultiLocation::here()), - }), - ); - - // Multiple failures should not have changed the query state - let message = Xcm(vec![QueryResponse { - query_id: 0, - response: Response::ExecutionResult(None), - max_weight: Weight::zero(), - querier: Some(querier), - }]); - let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm( - AccountId32 { network: None, id: ALICE.into() }, - message, - hash, - Weight::from_parts(1_000_000_000, 1_000_000_000), - ); - assert_eq!(r, Outcome::Complete(Weight::from_parts(1_000, 1_000))); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::ResponseReady { - query_id: 0, - response: Response::ExecutionResult(None), - }) - ); - - let response = - QueryResponseStatus::Ready { response: Response::ExecutionResult(None), at: 1 }; - assert_eq!(XcmPallet::take_response(0), response); - }); -} - -/// Test sending an `XCM` message (`XCM::ReserveAssetDeposit`) -/// -/// Asserts that the expected message is sent and the event is emitted -#[test] -fn send_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); - let message = Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: sender }, - ]); - - let versioned_dest = Box::new(RelayLocation::get().into()); - let versioned_message = Box::new(VersionedXcm::from(message.clone())); - assert_ok!(XcmPallet::send( - RuntimeOrigin::signed(ALICE), - versioned_dest, - versioned_message - )); - let sent_message = Xcm(Some(DescendOrigin(sender.try_into().unwrap())) - .into_iter() - .chain(message.0.clone().into_iter()) - .collect()); - let id = fake_message_hash(&sent_message); - assert_eq!(sent_xcm(), vec![(Here.into(), sent_message)]); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Sent { - origin: sender, - destination: RelayLocation::get(), - message, - message_id: id, - }) - ); - }); -} - -/// Test that sending an `XCM` message fails when the `XcmRouter` blocks the -/// matching message format -/// -/// Asserts that `send` fails with `Error::SendFailure` -#[test] -fn send_fails_when_xcm_router_blocks() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let sender: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - let message = Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: sender }, - ]); - assert_noop!( - XcmPallet::send( - RuntimeOrigin::signed(ALICE), - Box::new(MultiLocation::ancestor(8).into()), - Box::new(VersionedXcm::from(message.clone())), - ), - crate::Error::::SendFailure - ); - }); -} - -/// Test local execution of XCM -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the expected event is emitted. -#[test] -fn execute_withdraw_to_deposit_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: BOB.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::execute( - RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ - WithdrawAsset((Here, SEND_AMOUNT).into()), - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), - weight - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!(Balances::total_balance(&BOB), SEND_AMOUNT); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test drop/claim assets. -#[test] -fn trapped_assets_can_be_claimed() { - let balances = vec![(ALICE, INITIAL_BALANCE), (BOB, INITIAL_BALANCE)]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 6; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: BOB.into() }.into(); - - assert_ok!(XcmPallet::execute( - RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ - WithdrawAsset((Here, SEND_AMOUNT).into()), - buy_execution((Here, SEND_AMOUNT)), - // Don't propagated the error into the result. - SetErrorHandler(Xcm(vec![ClearError])), - // This will make an error. - Trap(0), - // This would succeed, but we never get to it. - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), - weight - )); - let source: MultiLocation = - Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - let trapped = AssetTraps::::iter().collect::>(); - let vma = VersionedMultiAssets::from(MultiAssets::from((Here, SEND_AMOUNT))); - let hash = BlakeTwo256::hash_of(&(source, vma.clone())); - assert_eq!( - last_events(2), - vec![ - RuntimeEvent::XcmPallet(crate::Event::AssetsTrapped { - hash, - origin: source, - assets: vma - }), - RuntimeEvent::XcmPallet(crate::Event::Attempted { - outcome: Outcome::Complete(BaseXcmWeight::get() * 5) - }), - ] - ); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!(Balances::total_balance(&BOB), INITIAL_BALANCE); - - let expected = vec![(hash, 1u32)]; - assert_eq!(trapped, expected); - - let weight = BaseXcmWeight::get() * 3; - assert_ok!(XcmPallet::execute( - RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ - ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), - weight - )); - - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!(Balances::total_balance(&BOB), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!(AssetTraps::::iter().collect::>(), vec![]); - - let weight = BaseXcmWeight::get() * 3; - assert_ok!(XcmPallet::execute( - RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ - ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), - weight - )); - let outcome = Outcome::Incomplete(BaseXcmWeight::get(), XcmError::UnknownClaim); - assert_eq!(last_event(), RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome })); - }); -} - -#[test] -fn fake_latest_versioned_multilocation_works() { - use codec::Encode; - let remote: MultiLocation = Parachain(1000).into(); - let versioned_remote = LatestVersionedMultiLocation(&remote); - assert_eq!(versioned_remote.encode(), remote.into_versioned().encode()); -} - -#[test] -fn basic_subscription_works() { - new_test_ext_with_balances(vec![]).execute_with(|| { - let remote: MultiLocation = Parachain(1000).into(); - assert_ok!(XcmPallet::force_subscribe_version_notify( - RuntimeOrigin::root(), - Box::new(remote.into()), - )); - - assert_eq!( - Queries::::iter().collect::>(), - vec![(0, QueryStatus::VersionNotifier { origin: remote.into(), is_active: false })] - ); - assert_eq!( - VersionNotifiers::::iter().collect::>(), - vec![(XCM_VERSION, remote.into(), 0)] - ); - - assert_eq!( - take_sent_xcm(), - vec![( - remote, - Xcm(vec![SubscribeVersion { query_id: 0, max_response_weight: Weight::zero() }]), - ),] - ); - - let weight = BaseXcmWeight::get(); - let mut message = Xcm::<()>(vec![ - // Remote supports XCM v2 - QueryResponse { - query_id: 0, - max_weight: Weight::zero(), - response: Response::Version(1), - querier: None, - }, - ]); - assert_ok!(AllowKnownQueryResponses::::should_execute( - &remote, - message.inner_mut(), - weight, - &mut Properties { weight_credit: Weight::zero(), message_id: None }, - )); - }); -} - -#[test] -fn subscriptions_increment_id() { - new_test_ext_with_balances(vec![]).execute_with(|| { - let remote: MultiLocation = Parachain(1000).into(); - assert_ok!(XcmPallet::force_subscribe_version_notify( - RuntimeOrigin::root(), - Box::new(remote.into()), - )); - - let remote2: MultiLocation = Parachain(1001).into(); - assert_ok!(XcmPallet::force_subscribe_version_notify( - RuntimeOrigin::root(), - Box::new(remote2.into()), - )); - - assert_eq!( - take_sent_xcm(), - vec![ - ( - remote, - Xcm(vec![SubscribeVersion { - query_id: 0, - max_response_weight: Weight::zero() - }]), - ), - ( - remote2, - Xcm(vec![SubscribeVersion { - query_id: 1, - max_response_weight: Weight::zero() - }]), - ), - ] - ); - }); -} - -#[test] -fn double_subscription_fails() { - new_test_ext_with_balances(vec![]).execute_with(|| { - let remote: MultiLocation = Parachain(1000).into(); - assert_ok!(XcmPallet::force_subscribe_version_notify( - RuntimeOrigin::root(), - Box::new(remote.into()), - )); - assert_noop!( - XcmPallet::force_subscribe_version_notify( - RuntimeOrigin::root(), - Box::new(remote.into()) - ), - Error::::AlreadySubscribed, - ); - }) -} - -#[test] -fn unsubscribe_works() { - new_test_ext_with_balances(vec![]).execute_with(|| { - let remote: MultiLocation = Parachain(1000).into(); - assert_ok!(XcmPallet::force_subscribe_version_notify( - RuntimeOrigin::root(), - Box::new(remote.into()), - )); - assert_ok!(XcmPallet::force_unsubscribe_version_notify( - RuntimeOrigin::root(), - Box::new(remote.into()) - )); - assert_noop!( - XcmPallet::force_unsubscribe_version_notify( - RuntimeOrigin::root(), - Box::new(remote.into()) - ), - Error::::NoSubscription, - ); - - assert_eq!( - take_sent_xcm(), - vec![ - ( - remote, - Xcm(vec![SubscribeVersion { - query_id: 0, - max_response_weight: Weight::zero() - }]), - ), - (remote, Xcm(vec![UnsubscribeVersion]),), - ] - ); - }); -} - -/// Parachain 1000 is asking us for a version subscription. -#[test] -fn subscription_side_works() { - new_test_ext_with_balances(vec![]).execute_with(|| { - AdvertisedXcmVersion::set(1); - - let remote: MultiLocation = Parachain(1000).into(); - let weight = BaseXcmWeight::get(); - let message = - Xcm(vec![SubscribeVersion { query_id: 0, max_response_weight: Weight::zero() }]); - let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm(remote, message, hash, weight); - assert_eq!(r, Outcome::Complete(weight)); - - let instr = QueryResponse { - query_id: 0, - max_weight: Weight::zero(), - response: Response::Version(1), - querier: None, - }; - assert_eq!(take_sent_xcm(), vec![(remote, Xcm(vec![instr]))]); - - // A runtime upgrade which doesn't alter the version sends no notifications. - CurrentMigration::::put(VersionMigrationStage::default()); - XcmPallet::on_initialize(1); - assert_eq!(take_sent_xcm(), vec![]); - - // New version. - AdvertisedXcmVersion::set(2); - - // A runtime upgrade which alters the version does send notifications. - CurrentMigration::::put(VersionMigrationStage::default()); - XcmPallet::on_initialize(2); - let instr = QueryResponse { - query_id: 0, - max_weight: Weight::zero(), - response: Response::Version(2), - querier: None, - }; - assert_eq!(take_sent_xcm(), vec![(remote, Xcm(vec![instr]))]); - }); -} - -#[test] -fn subscription_side_upgrades_work_with_notify() { - new_test_ext_with_balances(vec![]).execute_with(|| { - AdvertisedXcmVersion::set(1); - - // An entry from a previous runtime with v2 XCM. - let v2_location = VersionedMultiLocation::V2(xcm::v2::Junction::Parachain(1001).into()); - VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 2)); - let v3_location = Parachain(1003).into_versioned(); - VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 2)); - - // New version. - AdvertisedXcmVersion::set(3); - - // A runtime upgrade which alters the version does send notifications. - CurrentMigration::::put(VersionMigrationStage::default()); - XcmPallet::on_initialize(1); - - let instr1 = QueryResponse { - query_id: 70, - max_weight: Weight::zero(), - response: Response::Version(3), - querier: None, - }; - let instr3 = QueryResponse { - query_id: 72, - max_weight: Weight::zero(), - response: Response::Version(3), - querier: None, - }; - let mut sent = take_sent_xcm(); - sent.sort_by_key(|k| match (k.1).0[0] { - QueryResponse { query_id: q, .. } => q, - _ => 0, - }); - assert_eq!( - sent, - vec![ - (Parachain(1001).into(), Xcm(vec![instr1])), - (Parachain(1003).into(), Xcm(vec![instr3])), - ] - ); - - let mut contents = VersionNotifyTargets::::iter().collect::>(); - contents.sort_by_key(|k| k.2 .0); - assert_eq!( - contents, - vec![ - (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 3)), - (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 3)), - ] - ); - }); -} - -#[test] -fn subscription_side_upgrades_work_without_notify() { - new_test_ext_with_balances(vec![]).execute_with(|| { - // An entry from a previous runtime with v2 XCM. - let v2_location = VersionedMultiLocation::V2(xcm::v2::Junction::Parachain(1001).into()); - VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 2)); - let v3_location = Parachain(1003).into_versioned(); - VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 2)); - - // A runtime upgrade which alters the version does send notifications. - CurrentMigration::::put(VersionMigrationStage::default()); - XcmPallet::on_initialize(1); - - let mut contents = VersionNotifyTargets::::iter().collect::>(); - contents.sort_by_key(|k| k.2 .0); - assert_eq!( - contents, - vec![ - (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 3)), - (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 3)), - ] - ); - }); -} - -#[test] -fn subscriber_side_subscription_works() { - new_test_ext_with_balances(vec![]).execute_with(|| { - let remote: MultiLocation = Parachain(1000).into(); - assert_ok!(XcmPallet::force_subscribe_version_notify( - RuntimeOrigin::root(), - Box::new(remote.into()), - )); - take_sent_xcm(); - - // Assume subscription target is working ok. - - let weight = BaseXcmWeight::get(); - let message = Xcm(vec![ - // Remote supports XCM v2 - QueryResponse { - query_id: 0, - max_weight: Weight::zero(), - response: Response::Version(1), - querier: None, - }, - ]); - let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm(remote, message, hash, weight); - assert_eq!(r, Outcome::Complete(weight)); - assert_eq!(take_sent_xcm(), vec![]); - - // This message cannot be sent to a v2 remote. - let v2_msg = xcm::v2::Xcm::<()>(vec![xcm::v2::Instruction::Trap(0)]); - assert_eq!(XcmPallet::wrap_version(&remote, v2_msg.clone()), Err(())); - - let message = Xcm(vec![ - // Remote upgraded to XCM v2 - QueryResponse { - query_id: 0, - max_weight: Weight::zero(), - response: Response::Version(2), - querier: None, - }, - ]); - let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm(remote, message, hash, weight); - assert_eq!(r, Outcome::Complete(weight)); - - // This message can now be sent to remote as it's v2. - assert_eq!( - XcmPallet::wrap_version(&remote, v2_msg.clone()), - Ok(VersionedXcm::from(v2_msg)) - ); - }); -} - -/// We should auto-subscribe when we don't know the remote's version. -#[test] -fn auto_subscription_works() { - new_test_ext_with_balances(vec![]).execute_with(|| { - let remote_v2: MultiLocation = Parachain(1000).into(); - let remote_v3: MultiLocation = Parachain(1001).into(); - - assert_ok!(XcmPallet::force_default_xcm_version(RuntimeOrigin::root(), Some(2))); - - // Wrapping a version for a destination we don't know elicits a subscription. - let msg_v2 = xcm::v2::Xcm::<()>(vec![xcm::v2::Instruction::Trap(0)]); - let msg_v3 = xcm::v3::Xcm::<()>(vec![xcm::v3::Instruction::ClearTopic]); - assert_eq!( - XcmPallet::wrap_version(&remote_v2, msg_v2.clone()), - Ok(VersionedXcm::from(msg_v2.clone())), - ); - assert_eq!(XcmPallet::wrap_version(&remote_v2, msg_v3.clone()), Err(())); - - let expected = vec![(remote_v2.into(), 2)]; - assert_eq!(VersionDiscoveryQueue::::get().into_inner(), expected); - - assert_eq!( - XcmPallet::wrap_version(&remote_v3, msg_v2.clone()), - Ok(VersionedXcm::from(msg_v2.clone())), - ); - assert_eq!(XcmPallet::wrap_version(&remote_v3, msg_v3.clone()), Err(())); - - let expected = vec![(remote_v2.into(), 2), (remote_v3.into(), 2)]; - assert_eq!(VersionDiscoveryQueue::::get().into_inner(), expected); - - XcmPallet::on_initialize(1); - assert_eq!( - take_sent_xcm(), - vec![( - remote_v3, - Xcm(vec![SubscribeVersion { query_id: 0, max_response_weight: Weight::zero() }]), - )] - ); - - // Assume remote_v3 is working ok and XCM version 3. - - let weight = BaseXcmWeight::get(); - let message = Xcm(vec![ - // Remote supports XCM v3 - QueryResponse { - query_id: 0, - max_weight: Weight::zero(), - response: Response::Version(3), - querier: None, - }, - ]); - let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm(remote_v3, message, hash, weight); - assert_eq!(r, Outcome::Complete(weight)); - - // V2 messages can be sent to remote_v3 under XCM v3. - assert_eq!( - XcmPallet::wrap_version(&remote_v3, msg_v2.clone()), - Ok(VersionedXcm::from(msg_v2.clone()).into_version(3).unwrap()), - ); - // This message can now be sent to remote_v3 as it's v3. - assert_eq!( - XcmPallet::wrap_version(&remote_v3, msg_v3.clone()), - Ok(VersionedXcm::from(msg_v3.clone())) - ); - - XcmPallet::on_initialize(2); - assert_eq!( - take_sent_xcm(), - vec![( - remote_v2, - Xcm(vec![SubscribeVersion { query_id: 1, max_response_weight: Weight::zero() }]), - )] - ); - - // Assume remote_v2 is working ok and XCM version 2. - - let weight = BaseXcmWeight::get(); - let message = Xcm(vec![ - // Remote supports XCM v2 - QueryResponse { - query_id: 1, - max_weight: Weight::zero(), - response: Response::Version(2), - querier: None, - }, - ]); - let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm(remote_v2, message, hash, weight); - assert_eq!(r, Outcome::Complete(weight)); - - // v3 messages cannot be sent to remote_v2... - assert_eq!( - XcmPallet::wrap_version(&remote_v2, msg_v2.clone()), - Ok(VersionedXcm::V2(msg_v2)) - ); - assert_eq!(XcmPallet::wrap_version(&remote_v2, msg_v3.clone()), Err(())); - }) -} - -#[test] -fn subscription_side_upgrades_work_with_multistage_notify() { - new_test_ext_with_balances(vec![]).execute_with(|| { - AdvertisedXcmVersion::set(1); - - // An entry from a previous runtime with v0 XCM. - let v2_location = VersionedMultiLocation::V2(xcm::v2::Junction::Parachain(1001).into()); - VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 1)); - let v2_location = VersionedMultiLocation::V2(xcm::v2::Junction::Parachain(1002).into()); - VersionNotifyTargets::::insert(2, v2_location, (71, Weight::zero(), 1)); - let v3_location = Parachain(1003).into_versioned(); - VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 1)); - - // New version. - AdvertisedXcmVersion::set(3); - - // A runtime upgrade which alters the version does send notifications. - CurrentMigration::::put(VersionMigrationStage::default()); - let mut maybe_migration = CurrentMigration::::take(); - let mut counter = 0; - while let Some(migration) = maybe_migration.take() { - counter += 1; - let (_, m) = XcmPallet::check_xcm_version_change(migration, Weight::zero()); - maybe_migration = m; - } - assert_eq!(counter, 4); - - let instr1 = QueryResponse { - query_id: 70, - max_weight: Weight::zero(), - response: Response::Version(3), - querier: None, - }; - let instr2 = QueryResponse { - query_id: 71, - max_weight: Weight::zero(), - response: Response::Version(3), - querier: None, - }; - let instr3 = QueryResponse { - query_id: 72, - max_weight: Weight::zero(), - response: Response::Version(3), - querier: None, - }; - let mut sent = take_sent_xcm(); - sent.sort_by_key(|k| match (k.1).0[0] { - QueryResponse { query_id: q, .. } => q, - _ => 0, - }); - assert_eq!( - sent, - vec![ - (Parachain(1001).into(), Xcm(vec![instr1])), - (Parachain(1002).into(), Xcm(vec![instr2])), - (Parachain(1003).into(), Xcm(vec![instr3])), - ] - ); - - let mut contents = VersionNotifyTargets::::iter().collect::>(); - contents.sort_by_key(|k| k.2 .0); - assert_eq!( - contents, - vec![ - (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 3)), - (XCM_VERSION, Parachain(1002).into_versioned(), (71, Weight::zero(), 3)), - (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 3)), - ] - ); - }); -} diff --git a/pallets/pot/rpc/src/lib.rs b/pallets/pot/rpc/src/lib.rs index 5e09abf..41503b3 100644 --- a/pallets/pot/rpc/src/lib.rs +++ b/pallets/pot/rpc/src/lib.rs @@ -19,7 +19,7 @@ use jsonrpsee::{ core::RpcResult, proc_macros::rpc, - types::error::{CallError, ErrorObject}, + types::{error::ErrorObject, ErrorObjectOwned}, }; use std::sync::Arc; @@ -101,6 +101,7 @@ where } } -fn map_err(error: impl ToString, desc: &'static str) -> CallError { - CallError::Custom(ErrorObject::owned(Error::RuntimeError.into(), desc, Some(error.to_string()))) +fn map_err(error: impl ToString, desc: &'static str) -> ErrorObjectOwned { + //CallError::Custom(ErrorObject::owned(Error::RuntimeError.into(), desc, Some(error.to_string()))) + ErrorObject::owned(Error::RuntimeError.into(), desc, Some(error.to_string())) } diff --git a/pallets/pot/runtime-api/src/lib.rs b/pallets/pot/runtime-api/src/lib.rs index 8bb2717..345180a 100644 --- a/pallets/pot/runtime-api/src/lib.rs +++ b/pallets/pot/runtime-api/src/lib.rs @@ -15,7 +15,6 @@ // along with Magnet. If not, see . #![cfg_attr(not(feature = "std"), no_std)] -#![deny(unused_crate_dependencies)] use scale_info::prelude::string::String; diff --git a/pallets/pot/src/mock.rs b/pallets/pot/src/mock.rs index ac7f4b4..1001511 100644 --- a/pallets/pot/src/mock.rs +++ b/pallets/pot/src/mock.rs @@ -17,6 +17,7 @@ use super::*; use frame_support::{ + derive_impl, dispatch::DispatchClass, parameter_types, traits::{ConstU32, ConstU64, Get}, @@ -64,6 +65,7 @@ impl Get for BlockWeights { pub type AccountId = AccountId32; +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; @@ -103,7 +105,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxFreezes = (); type RuntimeHoldReason = (); - type MaxHolds = (); + type RuntimeFreezeReason = (); } parameter_types! { diff --git a/pallets/preimage/Cargo.toml b/pallets/preimage/Cargo.toml deleted file mode 100644 index 017495a..0000000 --- a/pallets/preimage/Cargo.toml +++ /dev/null @@ -1,56 +0,0 @@ -[package] -name = "pallet-preimage" -version = "4.0.0-dev" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -homepage = "https://substrate.io" -repository.workspace = true -description = "FRAME pallet for storing preimages of hashes" - -[dependencies] -codec = { package = "parity-scale-codec", workspace = true, default-features = false, features = ["derive"] } -scale-info = { workspace = true, default-features = false, features = ["derive"] } -impl-trait-for-tuples = { workspace = true } -frame-benchmarking = { workspace = true, default-features = false, optional = true} -frame-support = { workspace = true, default-features = false} -frame-system = { workspace = true, default-features = false} -sp-core = { workspace = true, default-features = false} -sp-io = { workspace = true, default-features = false} -sp-runtime = { workspace = true, default-features = false} -sp-std = { workspace = true, default-features = false} -log = { workspace = true, default-features = false } - -[dev-dependencies] -pallet-balances = { workspace = true, default-features = false} -sp-core = { workspace = true, default-features = false} - -[features] -default = [ "std" ] -runtime-benchmarks = [ - "frame-benchmarking", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -std = [ - "codec/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-balances/std", - "scale-info/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "pallet-balances/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/pallets/preimage/src/benchmarking.rs b/pallets/preimage/src/benchmarking.rs deleted file mode 100644 index d0c3404..0000000 --- a/pallets/preimage/src/benchmarking.rs +++ /dev/null @@ -1,248 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Preimage pallet benchmarking. - -use super::*; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; -use frame_support::assert_ok; -use frame_system::RawOrigin; -use sp_runtime::traits::Bounded; -use sp_std::{prelude::*, vec}; - -use crate::Pallet as Preimage; - -fn funded_account() -> T::AccountId { - let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); - caller -} - -fn preimage_and_hash() -> (Vec, T::Hash) { - sized_preimage_and_hash::(MAX_SIZE) -} - -fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { - let mut preimage = vec![]; - preimage.resize(size as usize, 0); - let hash = ::Hashing::hash(&preimage[..]); - (preimage, hash) -} - -benchmarks! { - // Expensive note - will reserve. - note_preimage { - let s in 0 .. MAX_SIZE; - let caller = funded_account::(); - let (preimage, hash) = sized_preimage_and_hash::(s); - }: _(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); - } - // Cheap note - will not reserve since it was requested. - note_requested_preimage { - let s in 0 .. MAX_SIZE; - let caller = funded_account::(); - let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: note_preimage(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); - } - // Cheap note - will not reserve since it's the manager. - note_no_deposit_preimage { - let s in 0 .. MAX_SIZE; - let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: note_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - preimage - ) verify { - assert!(Preimage::::have_preimage(&hash)); - } - - // Expensive unnote - will unreserve. - unnote_preimage { - let caller = funded_account::(); - let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); - }: _(RawOrigin::Signed(caller), hash) - verify { - assert!(!Preimage::::have_preimage(&hash)); - } - // Cheap unnote - will not unreserve since there's no deposit held. - unnote_no_deposit_preimage { - let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: unnote_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - assert!(!Preimage::::have_preimage(&hash)); - } - - // Expensive request - will unreserve the noter's deposit. - request_preimage { - let (preimage, hash) = preimage_and_hash::(); - let noter = funded_account::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); - let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; - assert_eq!(RequestStatusFor::::get(&hash), Some(s)); - } - // Cheap request - would unreserve the deposit but none was held. - request_no_deposit_preimage { - let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; - assert_eq!(RequestStatusFor::::get(&hash), Some(s)); - } - // Cheap request - the preimage is not yet noted, so deposit to unreserve. - request_unnoted_preimage { - let (_, hash) = preimage_and_hash::(); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; - assert_eq!(RequestStatusFor::::get(&hash), Some(s)); - } - // Cheap request - the preimage is already requested, so just a counter bump. - request_requested_preimage { - let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: None }; - assert_eq!(RequestStatusFor::::get(&hash), Some(s)); - } - - // Expensive unrequest - last reference and it's noted, so will destroy the preimage. - unrequest_preimage { - let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - assert_eq!(RequestStatusFor::::get(&hash), None); - } - // Cheap unrequest - last reference, but it's not noted. - unrequest_unnoted_preimage { - let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - assert_eq!(RequestStatusFor::::get(&hash), None); - } - // Cheap unrequest - not the last reference. - unrequest_multi_referenced_preimage { - let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; - assert_eq!(RequestStatusFor::::get(&hash), Some(s)); - } - - ensure_updated { - let n in 1..MAX_HASH_UPGRADE_BULK_COUNT; - - let caller = funded_account::(); - let hashes = (0..n).map(|i| insert_old_unrequested::(i)).collect::>(); - }: _(RawOrigin::Signed(caller), hashes) - verify { - assert_eq!(RequestStatusFor::::iter_keys().count(), n as usize); - #[allow(deprecated)] - let c = StatusFor::::iter_keys().count(); - assert_eq!(c, 0); - } - - impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); -} - -fn insert_old_unrequested(s: u32) -> ::Hash { - let acc = account("old", s, 0); - T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); - - // The preimage size does not matter here as it is not touched. - let preimage = s.to_le_bytes(); - let hash = ::Hashing::hash(&preimage[..]); - - #[allow(deprecated)] - StatusFor::::insert( - &hash, - OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, - ); - hash -} diff --git a/pallets/preimage/src/lib.rs b/pallets/preimage/src/lib.rs deleted file mode 100644 index 0f8b693..0000000 --- a/pallets/preimage/src/lib.rs +++ /dev/null @@ -1,591 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Preimage Pallet -//! -//! - [`Config`] -//! - [`Call`] -//! -//! ## Overview -//! -//! The Preimage pallet allows for the users and the runtime to store the preimage -//! of a hash on chain. This can be used by other pallets for storing and managing -//! large byte-blobs. - -//Modified by Alex Wang 2023/11 - -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; -pub mod migration; -#[cfg(test)] -mod mock; -#[cfg(test)] -mod tests; -pub mod weights; - -pub mod storage; - -use storage::{Consideration, Footprint}; - -use sp_runtime::{ - traits::{BadOrigin, Hash, Saturating}, - Perbill, -}; -use sp_std::{borrow::Cow, prelude::*}; - -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::{ - dispatch::Pays, - ensure, - pallet_prelude::Get, - traits::{ - Currency, Defensive, FetchResult, Hash as PreimageHash, PreimageProvider, - PreimageRecipient, QueryPreimage, ReservableCurrency, StorePreimage, - }, - BoundedSlice, BoundedVec, -}; - -use scale_info::TypeInfo; -pub use weights::WeightInfo; - -use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::*; - -pub use pallet::*; - -/// A type to note whether a preimage is owned by a user or the system. -#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] -pub enum OldRequestStatus { - /// The associated preimage has not yet been requested by the system. The given deposit (if - /// some) is being held until either it becomes requested or the user retracts the preimage. - Unrequested { deposit: (AccountId, Balance), len: u32 }, - /// There are a non-zero number of outstanding requests for this hash by this chain. If there - /// is a preimage registered, then `len` is `Some` and it may be removed iff this counter - /// becomes zero. - Requested { deposit: Option<(AccountId, Balance)>, count: u32, len: Option }, -} - -/// A type to note whether a preimage is owned by a user or the system. -#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] -pub enum RequestStatus { - /// The associated preimage has not yet been requested by the system. The given deposit (if - /// some) is being held until either it becomes requested or the user retracts the preimage. - Unrequested { ticket: (AccountId, Ticket), len: u32 }, - /// There are a non-zero number of outstanding requests for this hash by this chain. If there - /// is a preimage registered, then `len` is `Some` and it may be removed iff this counter - /// becomes zero. - Requested { maybe_ticket: Option<(AccountId, Ticket)>, count: u32, maybe_len: Option }, -} - -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; -type TicketOf = ::Consideration; - -/// Maximum size of preimage we can store is 4mb. -const MAX_SIZE: u32 = 4 * 1024 * 1024; -/// Hard-limit on the number of hashes that can be passed to `ensure_updated`. -/// -/// Exists only for benchmarking purposes. -pub const MAX_HASH_UPGRADE_BULK_COUNT: u32 = 1024; - -#[frame_support::pallet] -#[allow(deprecated)] -pub mod pallet { - use super::*; - - /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - /// The Weight information for this pallet. - type WeightInfo: weights::WeightInfo; - - /// Currency type for this pallet. - // TODO#1569: Remove. - type Currency: ReservableCurrency; - - /// An origin that can request a preimage be placed on-chain without a deposit or fee, or - /// manage existing preimages. - type ManagerOrigin: EnsureOrigin; - - /// A means of providing some cost while data is stored on-chain. - type Consideration: Consideration; - } - - #[pallet::pallet] - #[pallet::storage_version(STORAGE_VERSION)] - pub struct Pallet(_); - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// A preimage has been noted. - Noted { hash: T::Hash }, - /// A preimage has been requested. - Requested { hash: T::Hash }, - /// A preimage has ben cleared. - Cleared { hash: T::Hash }, - } - - #[pallet::error] - pub enum Error { - /// Preimage is too large to store on-chain. - TooBig, - /// Preimage has already been noted on-chain. - AlreadyNoted, - /// The user is not authorized to perform this action. - NotAuthorized, - /// The preimage cannot be removed since it has not yet been noted. - NotNoted, - /// A preimage may not be removed when there are outstanding requests. - Requested, - /// The preimage request cannot be removed since no outstanding requests exist. - NotRequested, - /// More than `MAX_HASH_UPGRADE_BULK_COUNT` hashes were requested to be upgraded at once. - TooMany, - /// Too few hashes were requested to be upgraded (i.e. zero). - TooFew, - } - - /// A reason for this pallet placing a hold on funds. - #[pallet::composite_enum] - pub enum HoldReason { - /// The funds are held as storage deposit for a preimage. - Preimage, - } - - /// The request status of a given hash. - #[deprecated = "RequestStatusFor"] - #[pallet::storage] - pub(super) type StatusFor = - StorageMap<_, Identity, T::Hash, OldRequestStatus>>; - - /// The request status of a given hash. - #[pallet::storage] - pub(super) type RequestStatusFor = - StorageMap<_, Identity, T::Hash, RequestStatus>>; - - #[pallet::storage] - pub(super) type PreimageFor = - StorageMap<_, Identity, (T::Hash, u32), BoundedVec>>; - - #[pallet::call(weight = T::WeightInfo)] - impl Pallet { - /// Register a preimage on-chain. - /// - /// If the preimage was previously requested, no fees or deposits are taken for providing - /// the preimage. Otherwise, a deposit is taken proportional to the size of the preimage. - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::note_preimage(bytes.len() as u32))] - pub fn note_preimage(origin: OriginFor, bytes: Vec) -> DispatchResultWithPostInfo { - // We accept a signed origin which will pay a deposit, or a root origin where a deposit - // is not taken. - let maybe_sender = Self::ensure_signed_or_manager(origin)?; - let (system_requested, _) = Self::note_bytes(bytes.into(), maybe_sender.as_ref())?; - if system_requested || maybe_sender.is_none() { - Ok(Pays::No.into()) - } else { - Ok(().into()) - } - } - - /// Clear an unrequested preimage from the runtime storage. - /// - /// If `len` is provided, then it will be a much cheaper operation. - /// - /// - `hash`: The hash of the preimage to be removed from the store. - /// - `len`: The length of the preimage of `hash`. - #[pallet::call_index(1)] - pub fn unnote_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { - let maybe_sender = Self::ensure_signed_or_manager(origin)?; - Self::do_unnote_preimage(&hash, maybe_sender) - } - - /// Request a preimage be uploaded to the chain without paying any fees or deposits. - /// - /// If the preimage requests has already been provided on-chain, we unreserve any deposit - /// a user may have paid, and take the control of the preimage out of their hands. - #[pallet::call_index(2)] - pub fn request_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { - T::ManagerOrigin::ensure_origin(origin)?; - Self::do_request_preimage(&hash); - Ok(()) - } - - /// Clear a previously made request for a preimage. - /// - /// NOTE: THIS MUST NOT BE CALLED ON `hash` MORE TIMES THAN `request_preimage`. - #[pallet::call_index(3)] - pub fn unrequest_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { - T::ManagerOrigin::ensure_origin(origin)?; - Self::do_unrequest_preimage(&hash) - } - - /// Ensure that the a bulk of pre-images is upgraded. - /// - /// The caller pays no fee if at least 90% of pre-images were successfully updated. - #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::ensure_updated(hashes.len() as u32))] - pub fn ensure_updated( - origin: OriginFor, - hashes: Vec, - ) -> DispatchResultWithPostInfo { - ensure_signed(origin)?; - ensure!(hashes.len() > 0, Error::::TooFew); - ensure!(hashes.len() <= MAX_HASH_UPGRADE_BULK_COUNT as usize, Error::::TooMany); - - let updated = hashes.iter().map(Self::do_ensure_updated).filter(|b| *b).count() as u32; - let ratio = Perbill::from_rational(updated, hashes.len() as u32); - - let pays: Pays = if ratio < Perbill::from_percent(90) { Pays::Yes } else { Pays::No }; - Ok(pays.into()) - } - } -} - -impl Pallet { - fn do_ensure_updated(h: &T::Hash) -> bool { - #[allow(deprecated)] - let r = match StatusFor::::take(h) { - Some(r) => r, - None => return false, - }; - let n = match r { - OldRequestStatus::Unrequested { deposit: (who, amount), len } => { - // unreserve deposit - T::Currency::unreserve(&who, amount); - // take consideration - let Ok(ticket) = - T::Consideration::new(&who, Footprint::from_parts(1, len as usize)) - .defensive_proof("Unexpected inability to take deposit after unreserved") - else { - return true; - }; - RequestStatus::Unrequested { ticket: (who, ticket), len } - }, - OldRequestStatus::Requested { deposit: maybe_deposit, count, len: maybe_len } => { - let maybe_ticket = if let Some((who, deposit)) = maybe_deposit { - // unreserve deposit - T::Currency::unreserve(&who, deposit); - // take consideration - if let Some(len) = maybe_len { - let Ok(ticket) = - T::Consideration::new(&who, Footprint::from_parts(1, len as usize)) - .defensive_proof( - "Unexpected inability to take deposit after unreserved", - ) - else { - return true; - }; - Some((who, ticket)) - } else { - None - } - } else { - None - }; - RequestStatus::Requested { maybe_ticket, count, maybe_len } - }, - }; - RequestStatusFor::::insert(h, n); - true - } - - /// Ensure that the origin is either the `ManagerOrigin` or a signed origin. - fn ensure_signed_or_manager( - origin: T::RuntimeOrigin, - ) -> Result, BadOrigin> { - if T::ManagerOrigin::ensure_origin(origin.clone()).is_ok() { - return Ok(None); - } - let who = ensure_signed(origin)?; - Ok(Some(who)) - } - - /// Store some preimage on chain. - /// - /// If `maybe_depositor` is `None` then it is also requested. If `Some`, then it is not. - /// - /// We verify that the preimage is within the bounds of what the pallet supports. - /// - /// If the preimage was requested to be uploaded, then the user pays no deposits or tx fees. - fn note_bytes( - preimage: Cow<[u8]>, - maybe_depositor: Option<&T::AccountId>, - ) -> Result<(bool, T::Hash), DispatchError> { - let hash = T::Hashing::hash(&preimage); - let len = preimage.len() as u32; - ensure!(len <= MAX_SIZE, Error::::TooBig); - - Self::do_ensure_updated(&hash); - // We take a deposit only if there is a provided depositor and the preimage was not - // previously requested. This also allows the tx to pay no fee. - let status = match (RequestStatusFor::::get(hash), maybe_depositor) { - (Some(RequestStatus::Requested { maybe_ticket, count, .. }), _) => { - RequestStatus::Requested { maybe_ticket, count, maybe_len: Some(len) } - }, - (Some(RequestStatus::Unrequested { .. }), Some(_)) => { - return Err(Error::::AlreadyNoted.into()) - }, - (Some(RequestStatus::Unrequested { ticket, len }), None) => RequestStatus::Requested { - maybe_ticket: Some(ticket), - count: 1, - maybe_len: Some(len), - }, - (None, None) => { - RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: Some(len) } - }, - (None, Some(depositor)) => { - let ticket = - T::Consideration::new(depositor, Footprint::from_parts(1, len as usize))?; - RequestStatus::Unrequested { ticket: (depositor.clone(), ticket), len } - }, - }; - let was_requested = matches!(status, RequestStatus::Requested { .. }); - RequestStatusFor::::insert(hash, status); - - let _ = Self::insert(&hash, preimage) - .defensive_proof("Unable to insert. Logic error in `note_bytes`?"); - - Self::deposit_event(Event::Noted { hash }); - - Ok((was_requested, hash)) - } - - // This function will add a hash to the list of requested preimages. - // - // If the preimage already exists before the request is made, the deposit for the preimage is - // returned to the user, and removed from their management. - fn do_request_preimage(hash: &T::Hash) { - Self::do_ensure_updated(&hash); - let (count, maybe_len, maybe_ticket) = - RequestStatusFor::::get(hash).map_or((1, None, None), |x| match x { - RequestStatus::Requested { maybe_ticket, mut count, maybe_len } => { - count.saturating_inc(); - (count, maybe_len, maybe_ticket) - }, - RequestStatus::Unrequested { ticket, len } => (1, Some(len), Some(ticket)), - }); - RequestStatusFor::::insert( - hash, - RequestStatus::Requested { maybe_ticket, count, maybe_len }, - ); - if count == 1 { - Self::deposit_event(Event::Requested { hash: *hash }); - } - } - - // Clear a preimage from the storage of the chain, returning any deposit that may be reserved. - // - // If `len` is provided, it will be a much cheaper operation. - // - // If `maybe_owner` is provided, we verify that it is the correct owner before clearing the - // data. - fn do_unnote_preimage( - hash: &T::Hash, - maybe_check_owner: Option, - ) -> DispatchResult { - Self::do_ensure_updated(&hash); - match RequestStatusFor::::get(hash).ok_or(Error::::NotNoted)? { - RequestStatus::Requested { maybe_ticket: Some((owner, ticket)), count, maybe_len } => { - ensure!(maybe_check_owner.map_or(true, |c| c == owner), Error::::NotAuthorized); - let _ = ticket.drop(&owner); - RequestStatusFor::::insert( - hash, - RequestStatus::Requested { maybe_ticket: None, count, maybe_len }, - ); - Ok(()) - }, - RequestStatus::Requested { maybe_ticket: None, .. } => { - ensure!(maybe_check_owner.is_none(), Error::::NotAuthorized); - Self::do_unrequest_preimage(hash) - }, - RequestStatus::Unrequested { ticket: (owner, ticket), len } => { - ensure!(maybe_check_owner.map_or(true, |c| c == owner), Error::::NotAuthorized); - let _ = ticket.drop(&owner); - RequestStatusFor::::remove(hash); - - Self::remove(hash, len); - Self::deposit_event(Event::Cleared { hash: *hash }); - Ok(()) - }, - } - } - - /// Clear a preimage request. - fn do_unrequest_preimage(hash: &T::Hash) -> DispatchResult { - Self::do_ensure_updated(&hash); - match RequestStatusFor::::get(hash).ok_or(Error::::NotRequested)? { - RequestStatus::Requested { mut count, maybe_len, maybe_ticket } if count > 1 => { - count.saturating_dec(); - RequestStatusFor::::insert( - hash, - RequestStatus::Requested { maybe_ticket, count, maybe_len }, - ); - }, - RequestStatus::Requested { count, maybe_len, maybe_ticket } => { - debug_assert!(count == 1, "preimage request counter at zero?"); - match (maybe_len, maybe_ticket) { - // Preimage was never noted. - (None, _) => RequestStatusFor::::remove(hash), - // Preimage was noted without owner - just remove it. - (Some(len), None) => { - Self::remove(hash, len); - RequestStatusFor::::remove(hash); - Self::deposit_event(Event::Cleared { hash: *hash }); - }, - // Preimage was noted with owner - move to unrequested so they can get refund. - (Some(len), Some(ticket)) => { - RequestStatusFor::::insert( - hash, - RequestStatus::Unrequested { ticket, len }, - ); - }, - } - }, - RequestStatus::Unrequested { .. } => return Err(Error::::NotRequested.into()), - } - Ok(()) - } - - fn insert(hash: &T::Hash, preimage: Cow<[u8]>) -> Result<(), ()> { - BoundedSlice::>::try_from(preimage.as_ref()) - .map_err(|_| ()) - .map(|s| PreimageFor::::insert((hash, s.len() as u32), s)) - } - - fn remove(hash: &T::Hash, len: u32) { - PreimageFor::::remove((hash, len)) - } - - fn have(hash: &T::Hash) -> bool { - Self::len(hash).is_some() - } - - fn len(hash: &T::Hash) -> Option { - use RequestStatus::*; - Self::do_ensure_updated(&hash); - match RequestStatusFor::::get(hash) { - Some(Requested { maybe_len: Some(len), .. }) | Some(Unrequested { len, .. }) => { - Some(len) - }, - _ => None, - } - } - - fn fetch(hash: &T::Hash, len: Option) -> FetchResult { - let len = len.or_else(|| Self::len(hash)).ok_or(DispatchError::Unavailable)?; - PreimageFor::::get((hash, len)) - .map(|p| p.into_inner()) - .map(Into::into) - .ok_or(DispatchError::Unavailable) - } -} - -impl PreimageProvider for Pallet { - fn have_preimage(hash: &T::Hash) -> bool { - Self::have(hash) - } - - fn preimage_requested(hash: &T::Hash) -> bool { - Self::do_ensure_updated(hash); - matches!(RequestStatusFor::::get(hash), Some(RequestStatus::Requested { .. })) - } - - fn get_preimage(hash: &T::Hash) -> Option> { - Self::fetch(hash, None).ok().map(Cow::into_owned) - } - - fn request_preimage(hash: &T::Hash) { - Self::do_request_preimage(hash) - } - - fn unrequest_preimage(hash: &T::Hash) { - let res = Self::do_unrequest_preimage(hash); - debug_assert!(res.is_ok(), "do_unrequest_preimage failed - counter underflow?"); - } -} - -impl PreimageRecipient for Pallet { - type MaxSize = ConstU32; // 2**22 - - fn note_preimage(bytes: BoundedVec) { - // We don't really care if this fails, since that's only the case if someone else has - // already noted it. - let _ = Self::note_bytes(bytes.into_inner().into(), None); - } - - fn unnote_preimage(hash: &T::Hash) { - // Should never fail if authorization check is skipped. - let res = Self::do_unrequest_preimage(hash); - debug_assert!(res.is_ok(), "unnote_preimage failed - request outstanding?"); - } -} - -impl> QueryPreimage for Pallet { - //type H = T::Hashing; - - fn len(hash: &T::Hash) -> Option { - Pallet::::len(hash) - } - - fn fetch(hash: &T::Hash, len: Option) -> FetchResult { - Pallet::::fetch(hash, len) - } - - fn is_requested(hash: &T::Hash) -> bool { - Self::do_ensure_updated(&hash); - matches!(RequestStatusFor::::get(hash), Some(RequestStatus::Requested { .. })) - } - - fn request(hash: &T::Hash) { - Self::do_request_preimage(hash) - } - - fn unrequest(hash: &T::Hash) { - let res = Self::do_unrequest_preimage(hash); - debug_assert!(res.is_ok(), "do_unrequest_preimage failed - counter underflow?"); - } -} - -impl> StorePreimage for Pallet { - const MAX_LENGTH: usize = MAX_SIZE as usize; - - fn note(bytes: Cow<[u8]>) -> Result { - // We don't really care if this fails, since that's only the case if someone else has - // already noted it. - let maybe_hash = Self::note_bytes(bytes, None).map(|(_, h)| h); - // Map to the correct trait error. - if maybe_hash == Err(DispatchError::from(Error::::TooBig)) { - Err(DispatchError::Exhausted) - } else { - maybe_hash - } - } - - fn unnote(hash: &T::Hash) { - // Should never fail if authorization check is skipped. - let res = Self::do_unnote_preimage(hash, None); - debug_assert!(res.is_ok(), "unnote_preimage failed - request outstanding?"); - } -} diff --git a/pallets/preimage/src/migration.rs b/pallets/preimage/src/migration.rs deleted file mode 100644 index d8f7af6..0000000 --- a/pallets/preimage/src/migration.rs +++ /dev/null @@ -1,273 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Storage migrations for the preimage pallet. - -use super::*; -use frame_support::{ - storage_alias, - traits::{ConstU32, OnRuntimeUpgrade}, -}; -use sp_std::collections::btree_map::BTreeMap; - -#[cfg(feature = "try-runtime")] -use frame_support::ensure; -#[cfg(feature = "try-runtime")] -use sp_runtime::TryRuntimeError; - -/// The log target. -const TARGET: &'static str = "runtime::preimage::migration::v1"; - -/// The original data layout of the preimage pallet without a specific version number. -mod v0 { - use super::*; - - #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] - pub enum OldRequestStatus { - Unrequested(Option<(AccountId, Balance)>), - Requested(u32), - } - - #[storage_alias] - pub type PreimageFor = StorageMap< - Pallet, - Identity, - ::Hash, - BoundedVec>, - >; - - #[storage_alias] - pub type StatusFor = StorageMap< - Pallet, - Identity, - ::Hash, - OldRequestStatus<::AccountId, BalanceOf>, - >; - - /// Returns the number of images or `None` if the storage is corrupted. - #[cfg(feature = "try-runtime")] - pub fn image_count() -> Option { - let images = v0::PreimageFor::::iter_values().count() as u32; - let status = v0::StatusFor::::iter_values().count() as u32; - - if images == status { - Some(images) - } else { - None - } - } -} - -pub mod v1 { - use super::*; - - /// Migration for moving preimage from V0 to V1 storage. - /// - /// Note: This needs to be run with the same hashing algorithm as before - /// since it is not re-hashing the preimages. - pub struct Migration(sp_std::marker::PhantomData); - - impl OnRuntimeUpgrade for Migration { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, TryRuntimeError> { - ensure!(StorageVersion::get::>() == 0, "can only upgrade from version 0"); - - let images = v0::image_count::().expect("v0 storage corrupted"); - log::info!(target: TARGET, "Migrating {} images", &images); - Ok((images as u32).encode()) - } - - fn on_runtime_upgrade() -> Weight { - let mut weight = T::DbWeight::get().reads(1); - if StorageVersion::get::>() != 0 { - log::warn!( - target: TARGET, - "skipping MovePreimagesIntoBuckets: executed on wrong storage version.\ - Expected version 0" - ); - return weight; - } - - let status = v0::StatusFor::::drain().collect::>(); - weight.saturating_accrue(T::DbWeight::get().reads(status.len() as u64)); - - let preimages = v0::PreimageFor::::drain().collect::>(); - weight.saturating_accrue(T::DbWeight::get().reads(preimages.len() as u64)); - - for (hash, status) in status.into_iter() { - let preimage = if let Some(preimage) = preimages.get(&hash) { - preimage - } else { - log::error!(target: TARGET, "preimage not found for hash {:?}", &hash); - continue; - }; - let len = preimage.len() as u32; - if len > MAX_SIZE { - log::error!( - target: TARGET, - "preimage too large for hash {:?}, len: {}", - &hash, - len - ); - continue; - } - - let status = match status { - v0::OldRequestStatus::Unrequested(deposit) => match deposit { - Some(deposit) => OldRequestStatus::Unrequested { deposit, len }, - // `None` depositor becomes system-requested. - None => { - OldRequestStatus::Requested { deposit: None, count: 1, len: Some(len) } - }, - }, - v0::OldRequestStatus::Requested(0) => { - log::error!(target: TARGET, "preimage has counter of zero: {:?}", hash); - continue; - }, - v0::OldRequestStatus::Requested(count) => { - OldRequestStatus::Requested { deposit: None, count, len: Some(len) } - }, - }; - log::trace!(target: TARGET, "Moving preimage {:?} with len {}", hash, len); - - #[allow(deprecated)] - crate::StatusFor::::insert(hash, status); - crate::PreimageFor::::insert(&(hash, len), preimage); - - weight.saturating_accrue(T::DbWeight::get().writes(2)); - } - StorageVersion::new(1).put::>(); - - weight.saturating_add(T::DbWeight::get().writes(1)) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> DispatchResult { - let old_images: u32 = - Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); - let new_images = image_count::().expect("V1 storage corrupted"); - - if new_images != old_images { - log::error!( - target: TARGET, - "migrated {} images, expected {}", - new_images, - old_images - ); - } - ensure!(StorageVersion::get::>() == 1, "must upgrade"); - Ok(()) - } - } - - /// Returns the number of images or `None` if the storage is corrupted. - #[cfg(feature = "try-runtime")] - pub fn image_count() -> Option { - // Use iter_values() to ensure that the values are decodable. - let images = crate::PreimageFor::::iter_values().count() as u32; - #[allow(deprecated)] - let status = crate::StatusFor::::iter_values().count() as u32; - - if images == status { - Some(images) - } else { - None - } - } -} - -#[cfg(test)] -#[cfg(feature = "try-runtime")] -mod test { - #![allow(deprecated)] - use super::*; - use crate::mock::{Test as T, *}; - - use sp_runtime::bounded_vec; - - #[test] - fn migration_works() { - new_test_ext().execute_with(|| { - assert_eq!(StorageVersion::get::>(), 0); - // Insert some preimages into the v0 storage: - - // Case 1: Unrequested without deposit - let (p, h) = preimage::(128); - v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::OldRequestStatus::Unrequested(None)); - // Case 2: Unrequested with deposit - let (p, h) = preimage::(1024); - v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::OldRequestStatus::Unrequested(Some((1, 1)))); - // Case 3: Requested by 0 (invalid) - let (p, h) = preimage::(8192); - v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::OldRequestStatus::Requested(0)); - // Case 4: Requested by 10 - let (p, h) = preimage::(65536); - v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::OldRequestStatus::Requested(10)); - - assert_eq!(v0::image_count::(), Some(4)); - assert_eq!(v1::image_count::(), None, "V1 storage should be corrupted"); - - let state = v1::Migration::::pre_upgrade().unwrap(); - let _w = v1::Migration::::on_runtime_upgrade(); - v1::Migration::::post_upgrade(state).unwrap(); - - // V0 and V1 share the same prefix, so `iter_values` still counts the same. - assert_eq!(v0::image_count::(), Some(3)); - assert_eq!(v1::image_count::(), Some(3)); // One gets skipped therefore 3. - assert_eq!(StorageVersion::get::>(), 1); - - // Case 1: Unrequested without deposit becomes system-requested - let (p, h) = preimage::(128); - assert_eq!(crate::PreimageFor::::get(&(h, 128)), Some(p)); - assert_eq!( - crate::StatusFor::::get(h), - Some(OldRequestStatus::Requested { deposit: None, count: 1, len: Some(128) }) - ); - // Case 2: Unrequested with deposit becomes unrequested - let (p, h) = preimage::(1024); - assert_eq!(crate::PreimageFor::::get(&(h, 1024)), Some(p)); - assert_eq!( - crate::StatusFor::::get(h), - Some(OldRequestStatus::Unrequested { deposit: (1, 1), len: 1024 }) - ); - // Case 3: Requested by 0 should be skipped - let (_, h) = preimage::(8192); - assert_eq!(crate::PreimageFor::::get(&(h, 8192)), None); - assert_eq!(crate::StatusFor::::get(h), None); - // Case 4: Requested by 10 becomes requested by 10 - let (p, h) = preimage::(65536); - assert_eq!(crate::PreimageFor::::get(&(h, 65536)), Some(p)); - assert_eq!( - crate::StatusFor::::get(h), - Some(OldRequestStatus::Requested { deposit: None, count: 10, len: Some(65536) }) - ); - }); - } - - /// Returns a preimage with a given size and its hash. - fn preimage( - len: usize, - ) -> (BoundedVec>, ::Hash) { - let p = bounded_vec![1; len]; - let h = ::Hashing::hash_of(&p); - (p, h) - } -} diff --git a/pallets/preimage/src/mock.rs b/pallets/preimage/src/mock.rs deleted file mode 100644 index 16f1fef..0000000 --- a/pallets/preimage/src/mock.rs +++ /dev/null @@ -1,197 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Preimage test environment. - -// Modified by Alex Wang 2023/11 - -use super::*; - -use crate as pallet_preimage; -use frame_support::{ - ord_parameter_types, - traits::{ConstU32, ConstU64, Everything}, - weights::constants::RocksDbWeight, -}; -use frame_system::EnsureSignedBy; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, Convert, IdentityLookup}, - BuildStorage, -}; - -mod hold { - use super::*; - - use frame_support::pallet_prelude::*; - use frame_support::traits::{ - fungible::MutateHold, - tokens::{Fortitude::Force, Precision::BestEffort}, - }; - use frame_support::{CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; - use pallet_preimage::storage::{Consideration, Footprint}; - use sp_runtime::traits::Convert; - /// Consideration method using a `fungible` balance frozen as the cost exacted for the footprint. - #[derive( - CloneNoBound, - EqNoBound, - PartialEqNoBound, - Encode, - Decode, - TypeInfo, - MaxEncodedLen, - RuntimeDebugNoBound, - )] - #[scale_info(skip_type_params(A, F, R, D))] - #[codec(mel_bound())] - pub struct HoldConsideration(F::Balance, PhantomData (A, R, D)>) - where - F: MutateHold; - impl< - A: 'static, - F: 'static + MutateHold, - R: 'static + Get, - D: 'static + Convert, - > Consideration for HoldConsideration - where - F::Balance: Send + Sync, - //>::Balance: Send + Sync, - { - fn new(who: &A, footprint: Footprint) -> Result { - let new = D::convert(footprint); - F::hold(&R::get(), who, new)?; - Ok(Self(new, PhantomData)) - } - fn update(self, who: &A, footprint: Footprint) -> Result { - let new = D::convert(footprint); - if self.0 > new { - F::release(&R::get(), who, self.0 - new, BestEffort)?; - } else if new > self.0 { - F::hold(&R::get(), who, new - self.0)?; - } - Ok(Self(new, PhantomData)) - } - fn drop(self, who: &A) -> Result<(), DispatchError> { - F::release(&R::get(), who, self.0, BestEffort).map(|_| ()) - } - fn burn(self, who: &A) { - let _ = F::burn_held(&R::get(), who, self.0, BestEffort, Force); - } - } -} - -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - Balances: pallet_balances, - Preimage: pallet_preimage, - } -); - -impl frame_system::Config for Test { - type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; -} - -impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<5>; - type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); - type MaxHolds = ConstU32<2>; -} - -ord_parameter_types! { - pub const One: u64 = 1; -} - -pub struct ConvertDeposit; -impl Convert for ConvertDeposit { - fn convert(a: Footprint) -> u64 { - a.count * 2 + a.size - } -} - -impl Config for Test { - type WeightInfo = (); - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type ManagerOrigin = EnsureSignedBy; - type Consideration = hold::HoldConsideration; -} - -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let balances = pallet_balances::GenesisConfig:: { - balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], - }; - balances.assimilate_storage(&mut t).unwrap(); - t.into() -} - -pub fn hashed(data: impl AsRef<[u8]>) -> H256 { - BlakeTwo256::hash(data.as_ref()) -} - -/// Insert an un-migrated preimage. -pub fn insert_old_unrequested( - s: u32, - acc: T::AccountId, -) -> ::Hash { - // The preimage size does not matter here as it is not touched. - let preimage = s.to_le_bytes(); - let hash = ::Hashing::hash(&preimage[..]); - - #[allow(deprecated)] - StatusFor::::insert( - &hash, - OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, - ); - hash -} diff --git a/pallets/preimage/src/preimages.rs b/pallets/preimage/src/preimages.rs deleted file mode 100644 index 532691d..0000000 --- a/pallets/preimage/src/preimages.rs +++ /dev/null @@ -1,341 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Stuff for dealing with hashed preimages. - -use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use scale_info::TypeInfo; -use sp_core::RuntimeDebug; -use sp_runtime::{ - traits::{ConstU32, Hash}, - DispatchError, -}; -use sp_std::borrow::Cow; - -pub type BoundedInline = crate::BoundedVec>; - -/// The maximum we expect a single legacy hash lookup to be. -const MAX_LEGACY_LEN: u32 = 1_000_000; - -#[derive(Encode, Decode, MaxEncodedLen, Clone, Eq, PartialEq, TypeInfo, RuntimeDebug)] -#[codec(mel_bound())] -pub enum Bounded { - /// A hash with no preimage length. We do not support creation of this except - /// for transitioning from legacy state. In the future we will make this a pure - /// `Dummy` item storing only the final `dummy` field. - Legacy { hash: H::Output, dummy: sp_std::marker::PhantomData }, - /// A an bounded `Call`. Its encoding must be at most 128 bytes. - Inline(BoundedInline), - /// A hash of the call together with an upper limit for its size.` - Lookup { hash: H::Output, len: u32 }, -} - -impl Bounded { - /// Casts the wrapped type into something that encodes alike. - /// - /// # Examples - /// ``` - /// use sp_runtime::traits::BlakeTwo256; - /// use preimages::Bounded; - /// - /// // Transmute from `String` to `&str`. - /// let x: Bounded = Bounded::Inline(Default::default()); - /// let _: Bounded<&str, BlakeTwo256> = x.transmute(); - /// ``` - pub fn transmute(self) -> Bounded - where - T: Encode + EncodeLike, - { - use Bounded::*; - match self { - Legacy { hash, .. } => Legacy { hash, dummy: sp_std::marker::PhantomData }, - Inline(x) => Inline(x), - Lookup { hash, len } => Lookup { hash, len }, - } - } - - /// Returns the hash of the preimage. - /// - /// The hash is re-calculated every time if the preimage is inlined. - pub fn hash(&self) -> H::Output { - use Bounded::*; - match self { - Lookup { hash, .. } | Legacy { hash, .. } => *hash, - Inline(x) => ::hash(x.as_ref()), - } - } - - /// Returns the hash to lookup the preimage. - /// - /// If this is a `Bounded::Inline`, `None` is returned as no lookup is required. - pub fn lookup_hash(&self) -> Option { - use Bounded::*; - match self { - Lookup { hash, .. } | Legacy { hash, .. } => Some(*hash), - Inline(_) => None, - } - } - - /// Returns the length of the preimage or `None` if the length is unknown. - pub fn len(&self) -> Option { - match self { - Self::Legacy { .. } => None, - Self::Inline(i) => Some(i.len() as u32), - Self::Lookup { len, .. } => Some(*len), - } - } - - /// Returns whether the image will require a lookup to be peeked. - pub fn lookup_needed(&self) -> bool { - match self { - Self::Inline(..) => false, - Self::Legacy { .. } | Self::Lookup { .. } => true, - } - } - - /// The maximum length of the lookup that is needed to peek `Self`. - pub fn lookup_len(&self) -> Option { - match self { - Self::Inline(..) => None, - Self::Legacy { .. } => Some(MAX_LEGACY_LEN), - Self::Lookup { len, .. } => Some(*len), - } - } - - /// Constructs a `Lookup` bounded item. - pub fn unrequested(hash: H::Output, len: u32) -> Self { - Self::Lookup { hash, len } - } - - /// Constructs a `Legacy` bounded item. - #[deprecated = "This API is only for transitioning to Scheduler v3 API"] - pub fn from_legacy_hash(hash: impl Into) -> Self { - Self::Legacy { hash: hash.into(), dummy: sp_std::marker::PhantomData } - } -} - -pub type FetchResult = Result, DispatchError>; - -/// A interface for looking up preimages from their hash on chain. -pub trait QueryPreimage { - /// The hasher used in the runtime. - type H: Hash; - - /// Returns whether a preimage exists for a given hash and if so its length. - fn len(hash: &::Out) -> Option; - - /// Returns the preimage for a given hash. If given, `len` must be the size of the preimage. - fn fetch(hash: &::Out, len: Option) -> FetchResult; - - /// Returns whether a preimage request exists for a given hash. - fn is_requested(hash: &::Out) -> bool; - - /// Request that someone report a preimage. Providers use this to optimise the economics for - /// preimage reporting. - fn request(hash: &::Out); - - /// Cancel a previous preimage request. - fn unrequest(hash: &::Out); - - /// Request that the data required for decoding the given `bounded` value is made available. - fn hold(bounded: &Bounded) { - use Bounded::*; - match bounded { - Inline(..) => {}, - Legacy { hash, .. } | Lookup { hash, .. } => Self::request(hash), - } - } - - /// No longer request that the data required for decoding the given `bounded` value is made - /// available. - fn drop(bounded: &Bounded) { - use Bounded::*; - match bounded { - Inline(..) => {}, - Legacy { hash, .. } | Lookup { hash, .. } => Self::unrequest(hash), - } - } - - /// Check to see if all data required for the given `bounded` value is available for its - /// decoding. - fn have(bounded: &Bounded) -> bool { - use Bounded::*; - match bounded { - Inline(..) => true, - Legacy { hash, .. } | Lookup { hash, .. } => Self::len(hash).is_some(), - } - } - - /// Create a `Bounded` instance based on the `hash` and `len` of the encoded value. - /// - /// It also directly requests the given `hash` using [`Self::request`]. - /// - /// This may not be `peek`-able or `realize`-able. - fn pick(hash: ::Out, len: u32) -> Bounded { - Self::request(&hash); - Bounded::Lookup { hash, len } - } - - /// Convert the given `bounded` instance back into its original instance, also returning the - /// exact size of its encoded form if it needed to be looked-up from a stored preimage). - /// - /// NOTE: This does not remove any data needed for realization. If you will no longer use the - /// `bounded`, call `realize` instead or call `drop` afterwards. - fn peek(bounded: &Bounded) -> Result<(T, Option), DispatchError> { - use Bounded::*; - match bounded { - Inline(data) => T::decode(&mut &data[..]).ok().map(|x| (x, None)), - Lookup { hash, len } => { - let data = Self::fetch(hash, Some(*len))?; - T::decode(&mut &data[..]).ok().map(|x| (x, Some(data.len() as u32))) - }, - Legacy { hash, .. } => { - let data = Self::fetch(hash, None)?; - T::decode(&mut &data[..]).ok().map(|x| (x, Some(data.len() as u32))) - }, - } - .ok_or(DispatchError::Corruption) - } - - /// Convert the given `bounded` value back into its original instance. If successful, - /// `drop` any data backing it. This will not break the realisability of independently - /// created instances of `Bounded` which happen to have identical data. - fn realize( - bounded: &Bounded, - ) -> Result<(T, Option), DispatchError> { - let r = Self::peek(bounded)?; - Self::drop(bounded); - Ok(r) - } -} - -/// A interface for managing preimages to hashes on chain. -/// -/// Note that this API does not assume any underlying user is calling, and thus -/// does not handle any preimage ownership or fees. Other system level logic that -/// uses this API should implement that on their own side. -pub trait StorePreimage: QueryPreimage { - /// The maximum length of preimage we can store. - /// - /// This is the maximum length of the *encoded* value that can be passed to `bound`. - const MAX_LENGTH: usize; - - /// Request and attempt to store the bytes of a preimage on chain. - /// - /// May return `DispatchError::Exhausted` if the preimage is just too big. - fn note(bytes: Cow<[u8]>) -> Result<::Out, DispatchError>; - - /// Attempt to clear a previously noted preimage. Exactly the same as `unrequest` but is - /// provided for symmetry. - fn unnote(hash: &::Out) { - Self::unrequest(hash) - } - - /// Convert an otherwise unbounded or large value into a type ready for placing in storage. - /// - /// The result is a type whose `MaxEncodedLen` is 131 bytes. - /// - /// NOTE: Once this API is used, you should use either `drop` or `realize`. - /// The value is also noted using [`Self::note`]. - fn bound(t: T) -> Result, DispatchError> { - let data = t.encode(); - let len = data.len() as u32; - Ok(match BoundedInline::try_from(data) { - Ok(bounded) => Bounded::Inline(bounded), - Err(unbounded) => Bounded::Lookup { hash: Self::note(unbounded.into())?, len }, - }) - } -} - -impl QueryPreimage for () { - type H = sp_runtime::traits::BlakeTwo256; - - fn len(_: &sp_core::H256) -> Option { - None - } - fn fetch(_: &sp_core::H256, _: Option) -> FetchResult { - Err(DispatchError::Unavailable) - } - fn is_requested(_: &sp_core::H256) -> bool { - false - } - fn request(_: &sp_core::H256) {} - fn unrequest(_: &sp_core::H256) {} -} - -impl StorePreimage for () { - const MAX_LENGTH: usize = 0; - fn note(_: Cow<[u8]>) -> Result { - Err(DispatchError::Exhausted) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::BoundedVec; - use sp_runtime::{bounded_vec, traits::BlakeTwo256}; - - #[test] - fn bounded_size_is_correct() { - assert_eq!(, BlakeTwo256> as MaxEncodedLen>::max_encoded_len(), 131); - } - - #[test] - fn bounded_basic_works() { - let data: BoundedVec = bounded_vec![b'a', b'b', b'c']; - let len = data.len() as u32; - let hash = BlakeTwo256::hash(&data).into(); - - // Inline works - { - let bound: Bounded, BlakeTwo256> = Bounded::Inline(data.clone()); - assert_eq!(bound.hash(), hash); - assert_eq!(bound.len(), Some(len)); - assert!(!bound.lookup_needed()); - assert_eq!(bound.lookup_len(), None); - } - // Legacy works - { - let bound: Bounded, BlakeTwo256> = - Bounded::Legacy { hash, dummy: Default::default() }; - assert_eq!(bound.hash(), hash); - assert_eq!(bound.len(), None); - assert!(bound.lookup_needed()); - assert_eq!(bound.lookup_len(), Some(1_000_000)); - } - // Lookup works - { - let bound: Bounded, BlakeTwo256> = - Bounded::Lookup { hash, len: data.len() as u32 }; - assert_eq!(bound.hash(), hash); - assert_eq!(bound.len(), Some(len)); - assert!(bound.lookup_needed()); - assert_eq!(bound.lookup_len(), Some(len)); - } - } - - #[test] - fn bounded_transmuting_works() { - let data: BoundedVec = bounded_vec![b'a', b'b', b'c']; - - // Transmute a `String` into a `&str`. - let x: Bounded = Bounded::Inline(data.clone()); - let y: Bounded<&str, BlakeTwo256> = x.transmute(); - assert_eq!(y, Bounded::Inline(data)); - } -} diff --git a/pallets/preimage/src/storage.rs b/pallets/preimage/src/storage.rs deleted file mode 100644 index fe1b9bf..0000000 --- a/pallets/preimage/src/storage.rs +++ /dev/null @@ -1,306 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Traits for encoding data related to pallet's storage items. - -use codec::{Encode, FullCodec, MaxEncodedLen}; -use core::marker::PhantomData; -use impl_trait_for_tuples::impl_for_tuples; -use scale_info::TypeInfo; -pub use sp_core::storage::TrackedStorageKey; -use sp_core::Get; -use sp_runtime::{ - traits::{Convert, Member, Saturating}, - DispatchError, RuntimeDebug, -}; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; - -/// An instance of a pallet in the storage. -/// -/// It is required that these instances are unique, to support multiple instances per pallet in the -/// same runtime! -/// -/// E.g. for module MyModule default instance will have prefix "MyModule" and other instances -/// "InstanceNMyModule". -pub trait Instance: 'static { - /// Unique module prefix. E.g. "InstanceNMyModule" or "MyModule" - const PREFIX: &'static str; - /// Unique numerical identifier for an instance. - const INDEX: u8; -} - -// Dummy implementation for `()`. -impl Instance for () { - const PREFIX: &'static str = ""; - const INDEX: u8 = 0; -} - -/// An instance of a storage in a pallet. -/// -/// Define an instance for an individual storage inside a pallet. -/// The pallet prefix is used to isolate the storage between pallets, and the storage prefix is -/// used to isolate storages inside a pallet. -/// -/// NOTE: These information can be used to define storages in pallet such as a `StorageMap` which -/// can use keys after `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)` -pub trait StorageInstance { - /// Prefix of a pallet to isolate it from other pallets. - fn pallet_prefix() -> &'static str; - - /// Return the prefix hash of pallet instance. - /// - /// NOTE: This hash must be `twox_128(pallet_prefix())`. - /// Should not impl this function by hand. Only use the default or macro generated impls. - fn pallet_prefix_hash() -> [u8; 16] { - sp_io::hashing::twox_128(Self::pallet_prefix().as_bytes()) - } - - /// Prefix given to a storage to isolate from other storages in the pallet. - const STORAGE_PREFIX: &'static str; - - /// Return the prefix hash of storage instance. - /// - /// NOTE: This hash must be `twox_128(STORAGE_PREFIX)`. - fn storage_prefix_hash() -> [u8; 16] { - sp_io::hashing::twox_128(Self::STORAGE_PREFIX.as_bytes()) - } - - /// Return the prefix hash of instance. - /// - /// NOTE: This hash must be `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)`. - /// Should not impl this function by hand. Only use the default or macro generated impls. - fn prefix_hash() -> [u8; 32] { - let mut final_key = [0u8; 32]; - final_key[..16].copy_from_slice(&Self::pallet_prefix_hash()); - final_key[16..].copy_from_slice(&Self::storage_prefix_hash()); - - final_key - } -} - -/// Metadata about storage from the runtime. -#[derive( - codec::Encode, codec::Decode, RuntimeDebug, Eq, PartialEq, Clone, scale_info::TypeInfo, -)] -pub struct StorageInfo { - /// Encoded string of pallet name. - pub pallet_name: Vec, - /// Encoded string of storage name. - pub storage_name: Vec, - /// The prefix of the storage. All keys after the prefix are considered part of this storage. - pub prefix: Vec, - /// The maximum number of values in the storage, or none if no maximum specified. - pub max_values: Option, - /// The maximum size of key/values in the storage, or none if no maximum specified. - pub max_size: Option, -} - -/// A trait to give information about storage. -/// -/// It can be used to calculate PoV worst case size. -pub trait StorageInfoTrait { - fn storage_info() -> Vec; -} - -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] -impl StorageInfoTrait for Tuple { - fn storage_info() -> Vec { - let mut res = vec![]; - for_tuples!( #( res.extend_from_slice(&Tuple::storage_info()); )* ); - res - } -} - -/// Similar to [`StorageInfoTrait`], a trait to give partial information about storage. -/// -/// This is useful when a type can give some partial information with its generic parameter doesn't -/// implement some bounds. -pub trait PartialStorageInfoTrait { - fn partial_storage_info() -> Vec; -} - -/// Allows a pallet to specify storage keys to whitelist during benchmarking. -/// This means those keys will be excluded from the benchmarking performance -/// calculation. -pub trait WhitelistedStorageKeys { - /// Returns a [`Vec`] indicating the storage keys that - /// should be whitelisted during benchmarking. This means that those keys - /// will be excluded from the benchmarking performance calculation. - fn whitelisted_storage_keys() -> Vec; -} - -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] -impl WhitelistedStorageKeys for Tuple { - fn whitelisted_storage_keys() -> Vec { - // de-duplicate the storage keys - let mut combined_keys: BTreeSet = BTreeSet::new(); - for_tuples!( #( - for storage_key in Tuple::whitelisted_storage_keys() { - combined_keys.insert(storage_key); - } - )* ); - combined_keys.into_iter().collect::>() - } -} - -/// The resource footprint of a bunch of blobs. We assume only the number of blobs and their total -/// size in bytes matter. -#[derive(Default, Copy, Clone, Eq, PartialEq, RuntimeDebug)] -pub struct Footprint { - /// The number of blobs. - pub count: u64, - /// The total size of the blobs in bytes. - pub size: u64, -} - -impl Footprint { - pub fn from_parts(items: usize, len: usize) -> Self { - Self { count: items as u64, size: len as u64 } - } - - pub fn from_encodable(e: impl Encode) -> Self { - Self::from_parts(1, e.encoded_size()) - } -} - -/// A storage price that increases linearly with the number of elements and their size. -pub struct LinearStoragePrice(PhantomData<(Base, Slope, Balance)>); -impl Convert for LinearStoragePrice -where - Base: Get, - Slope: Get, - Balance: From + sp_runtime::Saturating, -{ - fn convert(a: Footprint) -> Balance { - let s: Balance = (a.count.saturating_mul(a.size)).into(); - s.saturating_mul(Slope::get()).saturating_add(Base::get()) - } -} - -/// Some sort of cost taken from account temporarily in order to offset the cost to the chain of -/// holding some data [`Footprint`] in state. -/// -/// The cost may be increased, reduced or dropped entirely as the footprint changes. -/// -/// A single ticket corresponding to some particular datum held in storage. This is an opaque -/// type, but must itself be stored and generally it should be placed alongside whatever data -/// the ticket was created for. -/// -/// While not technically a linear type owing to the need for `FullCodec`, *this should be -/// treated as one*. Don't type to duplicate it, and remember to drop it when you're done with -/// it. -#[must_use] -pub trait Consideration: Member + FullCodec + TypeInfo + MaxEncodedLen { - /// Create a ticket for the `new` footprint attributable to `who`. This ticket *must* ultimately - /// be consumed through `update` or `drop` once the footprint changes or is removed. - fn new(who: &AccountId, new: Footprint) -> Result; - - /// Optionally consume an old ticket and alter the footprint, enforcing the new cost to `who` - /// and returning the new ticket (or an error if there was an issue). - /// - /// For creating tickets and dropping them, you can use the simpler `new` and `drop` instead. - fn update(self, who: &AccountId, new: Footprint) -> Result; - - /// Consume a ticket for some `old` footprint attributable to `who` which should now been freed. - fn drop(self, who: &AccountId) -> Result<(), DispatchError>; - - /// Consume a ticket for some `old` footprint attributable to `who` which should be sacrificed. - /// - /// This is infallible. In the general case (and it is left unimplemented), then it is - /// equivalent to the consideration never being dropped. Cases which can handle this properly - /// should implement, but it *MUST* rely on the loss of the consideration to the owner. - fn burn(self, _: &AccountId) { - let _ = self; - } -} - -impl Consideration for () { - fn new(_: &A, _: Footprint) -> Result { - Ok(()) - } - fn update(self, _: &A, _: Footprint) -> Result<(), DispatchError> { - Ok(()) - } - fn drop(self, _: &A) -> Result<(), DispatchError> { - Ok(()) - } -} - -macro_rules! impl_incrementable { - ($($type:ty),+) => { - $( - impl Incrementable for $type { - fn increment(&self) -> Option { - let mut val = self.clone(); - val.saturating_inc(); - Some(val) - } - - fn initial_value() -> Option { - Some(0) - } - } - )+ - }; -} - -/// A trait representing an incrementable type. -/// -/// The `increment` and `initial_value` functions are fallible. -/// They should either both return `Some` with a valid value, or `None`. -pub trait Incrementable -where - Self: Sized, -{ - /// Increments the value. - /// - /// Returns `Some` with the incremented value if it is possible, or `None` if it is not. - fn increment(&self) -> Option; - - /// Returns the initial value. - /// - /// Returns `Some` with the initial value if it is available, or `None` if it is not. - fn initial_value() -> Option; -} - -impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); - -#[cfg(test)] -mod tests { - use super::*; - use sp_core::ConstU64; - - #[test] - fn linear_storage_price_works() { - type Linear = LinearStoragePrice, ConstU64<3>, u64>; - let p = |count, size| Linear::convert(Footprint { count, size }); - - assert_eq!(p(0, 0), 7); - assert_eq!(p(0, 1), 7); - assert_eq!(p(1, 0), 7); - - assert_eq!(p(1, 1), 10); - assert_eq!(p(8, 1), 31); - assert_eq!(p(1, 8), 31); - - assert_eq!(p(u64::MAX, u64::MAX), u64::MAX); - } -} diff --git a/pallets/preimage/src/tests.rs b/pallets/preimage/src/tests.rs deleted file mode 100644 index eeaf511..0000000 --- a/pallets/preimage/src/tests.rs +++ /dev/null @@ -1,525 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Scheduler tests. - -//Modified by Alex Wang 2023/11 - -#![cfg(test)] - -use super::*; -use crate::mock::*; - -use frame_support::{ - assert_err, assert_noop, assert_ok, assert_storage_noop, - dispatch::Pays, - traits::{fungible::InspectHold, Bounded, BoundedInline}, - StorageNoopGuard, -}; -use sp_runtime::{bounded_vec, TokenError}; - -/// Returns one `Inline`, `Lookup` and `Legacy` item each with different data and hash. -pub fn make_bounded_values() -> ( - //Bounded, ::Hashing>, - //Bounded, ::Hashing>, - //Bounded, ::Hashing>, - Bounded>, - Bounded>, - Bounded>, -) { - let data: BoundedInline = bounded_vec![1]; - let inline = Bounded::>::Inline(data); - - let data = vec![1, 2]; - let hash = ::Hashing::hash(&data[..]).into(); - let len = data.len() as u32; - let lookup = Bounded::>::unrequested(hash, len); - - let data = vec![1, 2, 3]; - let hash = ::Hashing::hash(&data[..]).into(); - let legacy = Bounded::>::Legacy { hash, dummy: Default::default() }; - - (inline, lookup, legacy) -} - -#[test] -fn user_note_preimage_works() { - new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_eq!(Balances::balance_on_hold(&(), &2), 3); - assert_eq!(Balances::free_balance(2), 97); - - let h = hashed([1]); - assert!(Preimage::have_preimage(&h)); - assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); - - assert_noop!( - Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1]), - Error::::AlreadyNoted, - ); - assert_noop!( - Preimage::note_preimage(RuntimeOrigin::signed(0), vec![2]), - TokenError::CannotCreateHold, - ); - }); -} - -#[test] -fn manager_note_preimage_works() { - new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::free_balance(1), 100); - - let h = hashed([1]); - assert!(Preimage::have_preimage(&h)); - assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); - - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); - }); -} - -#[test] -fn user_unnote_preimage_works() { - new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(3), hashed([1])), - Error::::NotAuthorized - ); - assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([2])), - Error::::NotNoted - ); - - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); - assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1])), - Error::::NotNoted - ); - - let h = hashed([1]); - assert!(!Preimage::have_preimage(&h)); - assert_eq!(Preimage::get_preimage(&h), None); - }); -} - -#[test] -fn manager_unnote_preimage_works() { - new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1])), - Error::::NotNoted - ); - - let h = hashed([1]); - assert!(!Preimage::have_preimage(&h)); - assert_eq!(Preimage::get_preimage(&h), None); - }); -} - -#[test] -fn manager_unnote_user_preimage_works() { - new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(3), hashed([1])), - Error::::NotAuthorized - ); - assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([2])), - Error::::NotNoted - ); - - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1]))); - - let h = hashed([1]); - assert!(!Preimage::have_preimage(&h)); - assert_eq!(Preimage::get_preimage(&h), None); - }); -} - -#[test] -fn requested_then_noted_preimage_cannot_be_unnoted() { - new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1]))); - // it's still here. - - let h = hashed([1]); - assert!(Preimage::have_preimage(&h)); - assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); - - // now it's gone - assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert!(!Preimage::have_preimage(&hashed([1]))); - }); -} - -#[test] -fn request_note_order_makes_no_difference() { - let one_way = new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - ( - RequestStatusFor::::iter().collect::>(), - PreimageFor::::iter().collect::>(), - ) - }); - new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); - let other_way = ( - RequestStatusFor::::iter().collect::>(), - PreimageFor::::iter().collect::>(), - ); - assert_eq!(one_way, other_way); - }); -} - -#[test] -fn requested_then_user_noted_preimage_is_free() { - new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_eq!(Balances::reserved_balance(2), 0); - assert_eq!(Balances::free_balance(2), 100); - - let h = hashed([1]); - assert!(Preimage::have_preimage(&h)); - assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); - }); -} - -#[test] -fn request_user_note_order_makes_no_difference() { - let one_way = new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - ( - RequestStatusFor::::iter().collect::>(), - PreimageFor::::iter().collect::>(), - ) - }); - new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); - let other_way = ( - RequestStatusFor::::iter().collect::>(), - PreimageFor::::iter().collect::>(), - ); - assert_eq!(one_way, other_way); - }); -} - -#[test] -fn unrequest_preimage_works() { - new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_noop!( - Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([2])), - Error::::NotRequested - ); - - assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert!(Preimage::have_preimage(&hashed([1]))); - - assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_noop!( - Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1])), - Error::::NotRequested - ); - }); -} - -#[test] -fn user_noted_then_requested_preimage_is_refunded_once_only() { - new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1; 3])); - assert_eq!(Balances::balance_on_hold(&(), &2), 5); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_eq!(Balances::balance_on_hold(&(), &2), 8); - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); - // Still have hold from `vec[1; 3]`. - assert_eq!(Balances::balance_on_hold(&(), &2), 5); - }); -} - -#[test] -fn noted_preimage_use_correct_map() { - new_test_ext().execute_with(|| { - // Add one preimage per bucket... - for i in 0..7 { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![0; 128 << (i * 2)])); - } - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![0; MAX_SIZE as usize])); - assert_eq!(PreimageFor::::iter().count(), 8); - - // All are present - assert_eq!(RequestStatusFor::::iter().count(), 8); - - // Now start removing them again... - for i in 0..7 { - assert_ok!(Preimage::unnote_preimage( - RuntimeOrigin::signed(1), - hashed(vec![0; 128 << (i * 2)]) - )); - } - assert_eq!(PreimageFor::::iter().count(), 1); - assert_ok!(Preimage::unnote_preimage( - RuntimeOrigin::signed(1), - hashed(vec![0; MAX_SIZE as usize]) - )); - assert_eq!(PreimageFor::::iter().count(), 0); - - // All are gone - assert_eq!(RequestStatusFor::::iter().count(), 0); - }); -} - -/// The `StorePreimage` and `QueryPreimage` traits work together. -#[test] -fn query_and_store_preimage_workflow() { - new_test_ext().execute_with(|| { - let _guard = StorageNoopGuard::default(); - let data: Vec = vec![1; 512]; - let encoded = data.encode(); - - // Bound an unbound value. - let bound = Preimage::bound(data.clone()).unwrap(); - let (len, hash) = (bound.len().unwrap(), bound.hash()); - - assert_eq!(hash, ::Hashing::hash(&encoded).into()); - assert_eq!(bound.len(), Some(len)); - assert!(bound.lookup_needed(), "Should not be Inlined"); - assert_eq!(bound.lookup_len(), Some(len)); - - // The value is requested and available. - assert!(Preimage::is_requested(&hash)); - assert!(::have(&bound)); - assert_eq!(Preimage::len(&hash), Some(len)); - - // It can be fetched with length. - assert_eq!(Preimage::fetch(&hash, Some(len)).unwrap(), encoded); - // ... and without length. - assert_eq!(Preimage::fetch(&hash, None).unwrap(), encoded); - // ... but not with wrong length. - assert_err!(Preimage::fetch(&hash, Some(0)), DispatchError::Unavailable); - - // It can be peeked and decoded correctly. - assert_eq!(Preimage::peek::>(&bound).unwrap(), (data.clone(), Some(len))); - // Request it two more times. - assert_eq!(Preimage::pick::>(hash, len), bound); - Preimage::request(&hash); - // It is requested thrice. - assert!(matches!( - RequestStatusFor::::get(&hash).unwrap(), - RequestStatus::Requested { count: 3, .. } - )); - - // It can be realized and decoded correctly. - assert_eq!(Preimage::realize::>(&bound).unwrap(), (data.clone(), Some(len))); - assert!(matches!( - RequestStatusFor::::get(&hash).unwrap(), - RequestStatus::Requested { count: 2, .. } - )); - // Dropping should unrequest. - Preimage::drop(&bound); - assert!(matches!( - RequestStatusFor::::get(&hash).unwrap(), - RequestStatus::Requested { count: 1, .. } - )); - - // Is still available. - assert!(::have(&bound)); - // Manually unnote it. - Preimage::unnote(&hash); - // Is not available anymore. - assert!(!::have(&bound)); - assert_err!(Preimage::fetch(&hash, Some(len)), DispatchError::Unavailable); - // And not requested since the traits assume permissioned origin. - assert!(!Preimage::is_requested(&hash)); - - // No storage changes remain. Checked by `StorageNoopGuard`. - }); -} - -/// The request function behaves as expected. -#[test] -fn query_preimage_request_works() { - new_test_ext().execute_with(|| { - let _guard = StorageNoopGuard::default(); - let data: Vec = vec![1; 10]; - let hash = ::Hashing::hash(&data[..]).into(); - - // Request the preimage. - ::request(&hash); - - // The preimage is requested with unknown length and cannot be fetched. - assert!(::is_requested(&hash)); - assert!(::len(&hash).is_none()); - assert_noop!(::fetch(&hash, None), DispatchError::Unavailable); - - // Request again. - ::request(&hash); - // The preimage is still requested. - assert!(::is_requested(&hash)); - assert!(::len(&hash).is_none()); - assert_noop!(::fetch(&hash, None), DispatchError::Unavailable); - // But there is only one entry in the map. - assert_eq!(RequestStatusFor::::iter().count(), 1); - - // Un-request the preimage. - ::unrequest(&hash); - // It is still requested. - assert!(::is_requested(&hash)); - // Un-request twice. - ::unrequest(&hash); - // It is not requested anymore. - assert!(!::is_requested(&hash)); - // And there is no entry in the map. - assert_eq!(RequestStatusFor::::iter().count(), 0); - }); -} - -/// The `QueryPreimage` functions can be used together with `Bounded` values. -#[test] -fn query_preimage_hold_and_drop_work() { - new_test_ext().execute_with(|| { - let _guard = StorageNoopGuard::default(); - let (inline, lookup, legacy) = make_bounded_values(); - - // `hold` does nothing for `Inline` values. - assert_storage_noop!(::hold(&inline)); - // `hold` requests `Lookup` values. - ::hold(&lookup); - assert!(::is_requested(&lookup.hash())); - // `hold` requests `Legacy` values. - ::hold(&legacy); - assert!(::is_requested(&legacy.hash())); - - // There are two values requested in total. - assert_eq!(RequestStatusFor::::iter().count(), 2); - - // Cleanup by dropping both. - ::drop(&lookup); - assert!(!::is_requested(&lookup.hash())); - ::drop(&legacy); - assert!(!::is_requested(&legacy.hash())); - - // There are no values requested anymore. - assert_eq!(RequestStatusFor::::iter().count(), 0); - }); -} - -/// The `StorePreimage` trait works as expected. -#[test] -fn store_preimage_basic_works() { - new_test_ext().execute_with(|| { - let _guard = StorageNoopGuard::default(); - let data: Vec = vec![1; 512]; // Too large to inline. - let encoded = Cow::from(data.encode()); - - // Bound the data. - let bound = ::bound(data.clone()).unwrap(); - // The preimage can be peeked. - assert_ok!(::peek(&bound)); - // Un-note the preimage. - ::unnote(&bound.hash()); - // The preimage cannot be peeked anymore. - assert_err!(::peek(&bound), DispatchError::Unavailable); - // Noting the wrong pre-image does not make it peek-able. - assert_ok!(::note(Cow::Borrowed(&data))); - assert_err!(::peek(&bound), DispatchError::Unavailable); - - // Manually note the preimage makes it peek-able again. - assert_ok!(::note(encoded.clone())); - // Noting again works. - assert_ok!(::note(encoded)); - assert_ok!(::peek(&bound)); - - // Cleanup. - ::unnote(&bound.hash()); - let data_hash = ::Hashing::hash(&data); - ::unnote(&data_hash.into()); - - // No storage changes remain. Checked by `StorageNoopGuard`. - }); -} - -#[test] -fn store_preimage_note_too_large_errors() { - new_test_ext().execute_with(|| { - // Works with `MAX_LENGTH`. - let len = ::MAX_LENGTH; - let data = vec![0u8; len]; - assert_ok!(::note(data.into())); - - // Errors with `MAX_LENGTH+1`. - let data = vec![0u8; len + 1]; - assert_err!(::note(data.into()), DispatchError::Exhausted); - }); -} - -#[test] -fn store_preimage_bound_too_large_errors() { - new_test_ext().execute_with(|| { - // Using `MAX_LENGTH` number of bytes in a vector does not work - // since SCALE prepends the length. - let len = ::MAX_LENGTH; - let data: Vec = vec![0; len]; - assert_err!(::bound(data.clone()), DispatchError::Exhausted); - - // Works with `MAX_LENGTH-4`. - let data: Vec = vec![0; len - 4]; - assert_ok!(::bound(data.clone())); - }); -} - -#[test] -fn ensure_updated_works() { - #![allow(deprecated)] - new_test_ext().execute_with(|| { - let alice = 2; - - for i in 0..100 { - let hashes = - (0..100).map(|j| insert_old_unrequested::(j, alice)).collect::>(); - let old = hashes.iter().take(i).cloned().collect::>(); - let bad = vec![hashed([0; 32]); 100 - i]; - - let hashes = [old.as_slice(), bad.as_slice()].concat(); - let res = Preimage::ensure_updated(RuntimeOrigin::signed(alice), hashes).unwrap(); - - // Alice pays a fee when less than 90% of the hashes are new. - let p90 = if i < 90 { Pays::Yes } else { Pays::No }; - assert_eq!(res.pays_fee, p90); - - assert_eq!(RequestStatusFor::::iter().count(), i); - assert_eq!(StatusFor::::iter().count(), 100 - i); - } - }); -} diff --git a/pallets/preimage/src/weights.rs b/pallets/preimage/src/weights.rs deleted file mode 100644 index c11ab74..0000000 --- a/pallets/preimage/src/weights.rs +++ /dev/null @@ -1,470 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_preimage` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-mia4uyug-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` - -// Executed Command: -// target/production/substrate-node -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_preimage -// --chain=dev -// --header=./substrate/HEADER-APACHE2 -// --output=./substrate/frame/preimage/src/weights.rs -// --template=./substrate/.maintain/frame-weight-template.hbs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_preimage`. -pub trait WeightInfo { - fn note_preimage(s: u32, ) -> Weight; - fn note_requested_preimage(s: u32, ) -> Weight; - fn note_no_deposit_preimage(s: u32, ) -> Weight; - fn unnote_preimage() -> Weight; - fn unnote_no_deposit_preimage() -> Weight; - fn request_preimage() -> Weight; - fn request_no_deposit_preimage() -> Weight; - fn request_unnoted_preimage() -> Weight; - fn request_requested_preimage() -> Weight; - fn unrequest_preimage() -> Weight; - fn unrequest_unnoted_preimage() -> Weight; - fn unrequest_multi_referenced_preimage() -> Weight; - fn ensure_updated(n: u32, ) -> Weight; -} - -/// Weights for `pallet_preimage` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 4194304]`. - fn note_preimage(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3556` - // Minimum execution time: 15_936_000 picoseconds. - Weight::from_parts(16_271_000, 3556) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_916, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 4194304]`. - fn note_requested_preimage(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3556` - // Minimum execution time: 16_468_000 picoseconds. - Weight::from_parts(17_031_000, 3556) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_948, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 4194304]`. - fn note_no_deposit_preimage(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3556` - // Minimum execution time: 16_342_000 picoseconds. - Weight::from_parts(16_535_000, 3556) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_906, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - fn unnote_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `3556` - // Minimum execution time: 31_047_000 picoseconds. - Weight::from_parts(34_099_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - fn unnote_no_deposit_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `144` - // Estimated: `3556` - // Minimum execution time: 32_559_000 picoseconds. - Weight::from_parts(36_677_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn request_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `3556` - // Minimum execution time: 27_887_000 picoseconds. - Weight::from_parts(30_303_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn request_no_deposit_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `144` - // Estimated: `3556` - // Minimum execution time: 17_256_000 picoseconds. - Weight::from_parts(19_481_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn request_unnoted_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3556` - // Minimum execution time: 22_344_000 picoseconds. - Weight::from_parts(23_868_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn request_requested_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3556` - // Minimum execution time: 10_542_000 picoseconds. - Weight::from_parts(11_571_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - fn unrequest_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `144` - // Estimated: `3556` - // Minimum execution time: 29_054_000 picoseconds. - Weight::from_parts(32_996_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn unrequest_unnoted_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3556` - // Minimum execution time: 10_775_000 picoseconds. - Weight::from_parts(11_937_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn unrequest_multi_referenced_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3556` - // Minimum execution time: 10_696_000 picoseconds. - Weight::from_parts(11_717_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1024 w:1024) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:0 w:1024) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1024]`. - fn ensure_updated(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `193 + n * (91 ±0)` - // Estimated: `3593 + n * (2566 ±0)` - // Minimum execution time: 2_452_000 picoseconds. - Weight::from_parts(2_641_000, 3593) - // Standard Error: 19_797 - .saturating_add(Weight::from_parts(15_620_946, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 4194304]`. - fn note_preimage(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3556` - // Minimum execution time: 15_936_000 picoseconds. - Weight::from_parts(16_271_000, 3556) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_916, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 4194304]`. - fn note_requested_preimage(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3556` - // Minimum execution time: 16_468_000 picoseconds. - Weight::from_parts(17_031_000, 3556) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_948, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 4194304]`. - fn note_no_deposit_preimage(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3556` - // Minimum execution time: 16_342_000 picoseconds. - Weight::from_parts(16_535_000, 3556) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_906, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - fn unnote_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `3556` - // Minimum execution time: 31_047_000 picoseconds. - Weight::from_parts(34_099_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - fn unnote_no_deposit_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `144` - // Estimated: `3556` - // Minimum execution time: 32_559_000 picoseconds. - Weight::from_parts(36_677_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn request_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `3556` - // Minimum execution time: 27_887_000 picoseconds. - Weight::from_parts(30_303_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn request_no_deposit_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `144` - // Estimated: `3556` - // Minimum execution time: 17_256_000 picoseconds. - Weight::from_parts(19_481_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn request_unnoted_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3556` - // Minimum execution time: 22_344_000 picoseconds. - Weight::from_parts(23_868_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn request_requested_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3556` - // Minimum execution time: 10_542_000 picoseconds. - Weight::from_parts(11_571_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - fn unrequest_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `144` - // Estimated: `3556` - // Minimum execution time: 29_054_000 picoseconds. - Weight::from_parts(32_996_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn unrequest_unnoted_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3556` - // Minimum execution time: 10_775_000 picoseconds. - Weight::from_parts(11_937_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - fn unrequest_multi_referenced_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3556` - // Minimum execution time: 10_696_000 picoseconds. - Weight::from_parts(11_717_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Preimage::StatusFor` (r:1024 w:1024) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:0 w:1024) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1024]`. - fn ensure_updated(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `193 + n * (91 ±0)` - // Estimated: `3593 + n * (2566 ±0)` - // Minimum execution time: 2_452_000 picoseconds. - Weight::from_parts(2_641_000, 3593) - // Standard Error: 19_797 - .saturating_add(Weight::from_parts(15_620_946, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) - } -} diff --git a/primitives/system/src/lib.rs b/primitives/system/src/lib.rs index a2ef37a..fb52ae5 100644 --- a/primitives/system/src/lib.rs +++ b/primitives/system/src/lib.rs @@ -15,7 +15,6 @@ // along with Magnet. If not, see . #![cfg_attr(not(feature = "std"), no_std)] -#![deny(unused_crate_dependencies)] use frame_support::weights::Weight; use sp_core::crypto::AccountId32; diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index d778a3a..61c8cc8 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parachain-magnet-runtime" -version = "0.5.3" +version = "0.5.6" authors = ["Magnet"] description = "A scalable evm smart contract platform runtime, utilizing DOT as the gas fee." license = "Apache License 2.0" @@ -31,9 +31,7 @@ magnet-primitives-order = { path = "../primitives/order", default-features = fal pallet-pot = { path = "../pallets/pot", default-features = false } pallet-pot-runtime-api = { path = "../pallets/pot/runtime-api", default-features = false } pallet-assurance = { path = "../pallets/assurance", default-features = false } -pallet-xcm = { path = "../pallets/pallet-xcm", default-features = false} pallet-liquidation = {path = "../pallets/liquidation", default-features = false} -pallet-preimage = { path = "../pallets/preimage", default-features = false} # Substrate frame-benchmarking = { workspace = true, default-features = false, optional = true} @@ -47,6 +45,7 @@ pallet-aura = { workspace = true, default-features = false} pallet-authorship = { workspace = true, default-features = false} pallet-balances = { workspace = true, default-features = false} pallet-assets = { workspace = true, default-features = false} +pallet-preimage = { workspace = true, default-features = false} pallet-session = { workspace = true, default-features = false} pallet-sudo = { workspace = true, default-features = false} pallet-collective = { workspace = true, default-features = false} @@ -74,10 +73,11 @@ pallet-conviction-voting = { workspace = true, default-features = false} pallet-ranked-collective = { workspace = true, default-features = false} pallet-scheduler = { workspace = true, default-features = false} pallet-insecure-randomness-collective-flip = { workspace = true, default-features = false} -pallet-contracts-primitives = { workspace = true, default-features = false} pallet-contracts = { workspace = true, default-features = false} +pallet-message-queue = { workspace = true, default-features = false } # Polkadot +pallet-xcm = { workspace = true, default-features = false} polkadot-parachain-primitives = { workspace = true, default-features = false} polkadot-primitives = { workspace = true, default-features = false} polkadot-runtime-common = { workspace = true, default-features = false} @@ -87,7 +87,6 @@ xcm-executor = { workspace = true, default-features = false} # Cumulus cumulus-pallet-aura-ext = { workspace = true, default-features = false } -cumulus-pallet-dmp-queue = { workspace = true, default-features = false } cumulus-pallet-parachain-system = { workspace = true, default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-session-benchmarking = { workspace = true, default-features = false} cumulus-pallet-xcm = { workspace = true, default-features = false } @@ -123,7 +122,6 @@ with-paritydb-weights = [] std = [ "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", @@ -142,6 +140,7 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "pallet-message-queue/std", "pallet-session/std", "pallet-sudo/std", "pallet-timestamp/std", @@ -203,7 +202,6 @@ std = [ "pallet-ranked-collective/std", "pallet-scheduler/std", "pallet-insecure-randomness-collective-flip/std", - "pallet-contracts-primitives/std", "pallet-contracts/std", ] @@ -218,6 +216,7 @@ runtime-benchmarks = [ "hex-literal", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", @@ -249,7 +248,6 @@ runtime-benchmarks = [ try-runtime = [ "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -262,6 +260,7 @@ try-runtime = [ "pallet-authorship/try-runtime", "pallet-balances/try-runtime", "pallet-collator-selection/try-runtime", + "pallet-message-queue/try-runtime", "pallet-session/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", diff --git a/runtime/src/contracts_config.rs b/runtime/src/contracts_config.rs index 864a324..1d02118 100644 --- a/runtime/src/contracts_config.rs +++ b/runtime/src/contracts_config.rs @@ -76,4 +76,5 @@ impl pallet_contracts::Config for Runtime { type Environment = (); type Debug = (); type Migrations = (); + type Xcm = (); } diff --git a/runtime/src/governance/fellowship.rs b/runtime/src/governance/fellowship.rs index 5d02109..e53d3c1 100644 --- a/runtime/src/governance/fellowship.rs +++ b/runtime/src/governance/fellowship.rs @@ -339,7 +339,15 @@ impl pallet_ranked_collective::Config for Runtime TryMapSuccess>>, >, >; + // Exchange is by any of: + // - Root can exchange arbitrarily. + // - the Fellows origin; + type ExchangeOrigin = + EitherOf>, Fellows>; type Polls = FellowshipReferenda; type MinRankOfClass = sp_runtime::traits::Identity; + type MemberSwappedHandler = (); type VoteWeight = pallet_ranked_collective::Geometric; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkSetup = (); } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 2ddebbf..3d5e174 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -44,13 +44,13 @@ use frame_support::weights::constants::ParityDbWeight as RuntimeDbWeight; use frame_support::weights::constants::RocksDbWeight as RuntimeDbWeight; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, parameter_types, traits::{ - AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, Currency, - EitherOf, EitherOfDiverse, Everything, FindAuthor, Imbalance, OnFinalize, OnUnbalanced, - PrivilegeCmp, + fungible::HoldConsideration, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, + ConstU64, ConstU8, Currency, EitherOf, EitherOfDiverse, Everything, FindAuthor, Imbalance, + LinearStoragePrice, OnFinalize, OnUnbalanced, PrivilegeCmp, TransformOrigin, }, weights::{ constants::WEIGHT_REF_TIME_PER_SECOND, ConstantMultiplier, Weight, WeightToFeeCoefficient, @@ -58,6 +58,7 @@ use frame_support::{ }, PalletId, }; + use frame_system::{ limits::{BlockLength, BlockWeights}, pallet_prelude::BlockNumberFor, @@ -74,19 +75,22 @@ pub use sp_runtime::BuildStorage; // Cumulus imports pub use parachains_common::impls::{AccountIdOf, DealWithFees}; +pub use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; // Polkadot imports +use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight}; // XCM Imports -use xcm::latest::prelude::BodyId; -use xcm::opaque::lts::{InteriorMultiLocation, Junction::PalletInstance}; -use xcm_executor::XcmExecutor; - -use cumulus_primitives_core::{ParaId, PersistedValidationData}; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId, PersistedValidationData}; pub use pallet_order::{self, OrderGasCost}; +use xcm::latest::prelude::{ + Asset as MultiAsset, BodyId, InteriorLocation as InteriorMultiLocation, + Junction::PalletInstance, Location as MultiLocation, +}; +use xcm_executor::XcmExecutor; use fp_evm::weight_per_gas; use fp_rpc::TransactionStatus; @@ -103,11 +107,6 @@ use precompiles::FrontierPrecompiles; use pallet_pot::PotNameBtreemap; -/// Constant values used within the runtime. -use parachains_common::kusama::currency::*; - -use pallet_preimage::storage::LinearStoragePrice; - // Governance and configurations. pub mod governance; use governance::{pallet_custom_origins, AuctionAdmin}; @@ -349,6 +348,10 @@ pub const DAYS: BlockNumber = HOURS * 24; pub const UNIT: Balance = 1_000_000_000_000_000_000; pub const MILLIUNIT: Balance = 1_000_000_000_000_000; pub const MICROUNIT: Balance = 1_000_000_000_000; +pub const QUID: Balance = UNIT / 30; +pub const CENTS: Balance = QUID / 100; +pub const GRAND: Balance = QUID * 1_000; +pub const MILLICENTS: Balance = CENTS / 1_000; /// The existential deposit. Set to 1/10 of the Connected Relay Chain. pub const EXISTENTIAL_DEPOSIT: Balance = MILLIUNIT; @@ -419,6 +422,10 @@ parameter_types! { // Configure FRAME pallets to include in runtime. +/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from +/// [`ParaChainDefaultConfig`](`struct@frame_system::config_preludes::ParaChainDefaultConfig`), +/// but overridden as needed. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; @@ -497,8 +504,8 @@ impl pallet_balances::Config for Runtime { type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); - type MaxHolds = ConstU32<2>; type MaxFreezes = ConstU32<1>; } @@ -559,14 +566,16 @@ impl pallet_sudo::Config for Runtime { parameter_types! { pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); type RuntimeEvent = RuntimeEvent; type OnSystemEvent = (); type SelfParaId = parachain_info::Pallet; type OutboundXcmpMessageSource = XcmpQueue; - type DmpMessageHandler = DmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; @@ -581,24 +590,45 @@ impl cumulus_pallet_parachain_system::Config for Runtime { impl parachain_info::Config for Runtime {} +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + impl cumulus_pallet_aura_ext::Config for Runtime {} impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; type ChannelInfo = ParachainSystem; type VersionWrapper = (); - type ExecuteOverweightOrigin = EnsureRoot; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); - type PriceForSiblingDelivery = (); -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = EnsureRoot; + type PriceForSiblingDelivery = NoPriceForMessageDelivery; } parameter_types! { @@ -743,6 +773,7 @@ parameter_types! { pub const GasLimitPovSizeRatio: u64 = BLOCK_GAS_LIMIT.saturating_div(MAX_POV_SIZE); pub PrecompilesValue: FrontierPrecompiles = FrontierPrecompiles::<_>::new(); pub WeightPerGas: Weight = Weight::from_parts(weight_per_gas(BLOCK_GAS_LIMIT, NORMAL_DISPATCH_RATIO, WEIGHT_MILLISECS_PER_BLOCK), 0); + pub SuicideQuickClearLimit: u32 = 0; } impl pallet_evm::Config for Runtime { @@ -764,6 +795,7 @@ impl pallet_evm::Config for Runtime { type OnCreate = (); type FindAuthor = FindAuthorTruncated; type GasLimitPovSizeRatio = GasLimitPovSizeRatio; + type SuicideQuickClearLimit = SuicideQuickClearLimit; type Timestamp = Timestamp; type WeightInfo = pallet_evm::weights::SubstrateWeight; } @@ -930,66 +962,6 @@ impl pallet_liquidation::Config for Runtime { type ProfitDistributionCycle = ProfitDistributionCycle; } -mod hold { - use super::*; - - use frame_support::pallet_prelude::*; - use frame_support::traits::{ - fungible::MutateHold, - tokens::{Fortitude::Force, Precision::BestEffort}, - }; - use frame_support::{CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; - use pallet_preimage::storage::{Consideration, Footprint}; - use sp_runtime::traits::Convert; - /// Consideration method using a `fungible` balance frozen as the cost exacted for the footprint. - #[derive( - CloneNoBound, - EqNoBound, - PartialEqNoBound, - Encode, - Decode, - TypeInfo, - MaxEncodedLen, - RuntimeDebugNoBound, - )] - #[scale_info(skip_type_params(A, F, R, D))] - #[codec(mel_bound())] - pub struct HoldConsideration(F::Balance, PhantomData (A, R, D)>) - where - F: MutateHold; - impl< - A: 'static, - F: 'static + MutateHold, - R: 'static + Get, - D: 'static + Convert, - > Consideration for HoldConsideration - where - F::Balance: Send + Sync, - //>::Balance: Send + Sync, - { - fn new(who: &A, footprint: Footprint) -> Result { - let new = D::convert(footprint); - F::hold(&R::get(), who, new)?; - Ok(Self(new, PhantomData)) - } - fn update(self, who: &A, footprint: Footprint) -> Result { - let new = D::convert(footprint); - if self.0 > new { - F::release(&R::get(), who, self.0 - new, BestEffort)?; - } else if new > self.0 { - F::hold(&R::get(), who, new - self.0)?; - } - Ok(Self(new, PhantomData)) - } - fn drop(self, who: &A) -> Result<(), DispatchError> { - F::release(&R::get(), who, self.0, BestEffort).map(|_| ()) - } - fn burn(self, who: &A) { - let _ = F::burn_held(&R::get(), who, self.0, BestEffort, Force); - } - } -} - parameter_types! { pub const PreimageBaseDeposit: Balance = deposit(2, 64); pub const PreimageByteDeposit: Balance = deposit(0, 1); @@ -1001,14 +973,12 @@ impl pallet_preimage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type Consideration = crate::hold::HoldConsideration< + type Consideration = HoldConsideration< AccountId, Balances, PreimageHoldReason, LinearStoragePrice, >; - //type BaseDeposit = PreimageBaseDeposit; - //type ByteDeposit = PreimageByteDeposit; } parameter_types! { @@ -1166,7 +1136,7 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue = 30, PolkadotXcm: pallet_xcm = 31, CumulusXcm: cumulus_pallet_xcm = 32, - DmpQueue: cumulus_pallet_dmp_queue = 33, + MessageQueue: pallet_message_queue = 33, //Frontier EVMChainId: pallet_evm_chain_id = 40, @@ -1606,7 +1576,7 @@ impl_runtime_apis! { gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, - ) -> pallet_contracts_primitives::ContractExecResult { + ) -> pallet_contracts::ContractExecResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); Contracts::bare_call( origin, @@ -1626,10 +1596,10 @@ impl_runtime_apis! { value: Balance, gas_limit: Option, storage_deposit_limit: Option, - code: pallet_contracts_primitives::Code, + code: pallet_contracts::Code, data: Vec, salt: Vec, - ) -> pallet_contracts_primitives::ContractInstantiateResult + ) -> pallet_contracts::ContractInstantiateResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); Contracts::bare_instantiate( @@ -1650,7 +1620,7 @@ impl_runtime_apis! { code: Vec, storage_deposit_limit: Option, determinism: pallet_contracts::Determinism, - ) -> pallet_contracts_primitives::CodeUploadResult + ) -> pallet_contracts::CodeUploadResult { Contracts::bare_upload_code(origin, code, storage_deposit_limit, determinism) } @@ -1658,7 +1628,7 @@ impl_runtime_apis! { fn get_storage( address: AccountId, key: Vec, - ) -> pallet_contracts_primitives::GetStorageResult { + ) -> pallet_contracts::GetStorageResult { Contracts::get_storage(address, key) } } diff --git a/runtime/src/weights/pallet_ranked_collective.rs b/runtime/src/weights/pallet_ranked_collective.rs index 8a556c3..ce9d5fc 100644 --- a/runtime/src/weights/pallet_ranked_collective.rs +++ b/runtime/src/weights/pallet_ranked_collective.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_ranked_collective` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -29,14 +29,13 @@ // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_ranked_collective // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -61,8 +60,8 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `42` // Estimated: `3507` - // Minimum execution time: 17_632_000 picoseconds. - Weight::from_parts(18_252_000, 0) + // Minimum execution time: 13_480_000 picoseconds. + Weight::from_parts(13_786_000, 0) .saturating_add(Weight::from_parts(0, 3507)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) @@ -71,24 +70,24 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `FellowshipCollective::MemberCount` (r:11 w:11) /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IdToIndex` (r:11 w:11) + /// Storage: `FellowshipCollective::IdToIndex` (r:11 w:22) /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IndexToId` (r:11 w:11) + /// Storage: `FellowshipCollective::IndexToId` (r:11 w:22) /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `517 + r * (281 ±0)` + // Measured: `516 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 27_960_000 picoseconds. - Weight::from_parts(30_632_408, 0) + // Minimum execution time: 28_771_000 picoseconds. + Weight::from_parts(29_256_825, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 22_806 - .saturating_add(Weight::from_parts(13_000_901, 0).saturating_mul(r.into())) + // Standard Error: 21_594 + .saturating_add(Weight::from_parts(14_649_527, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) } /// Storage: `FellowshipCollective::Members` (r:1 w:1) @@ -104,11 +103,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `214 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 19_900_000 picoseconds. - Weight::from_parts(20_908_316, 0) + // Minimum execution time: 16_117_000 picoseconds. + Weight::from_parts(16_978_453, 0) .saturating_add(Weight::from_parts(0, 3507)) - // Standard Error: 4_878 - .saturating_add(Weight::from_parts(330_385, 0).saturating_mul(r.into())) + // Standard Error: 4_511 + .saturating_add(Weight::from_parts(324_261, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -116,22 +115,22 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:2) /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IndexToId` (r:1 w:1) + /// Storage: `FellowshipCollective::IndexToId` (r:1 w:2) /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `532 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 27_697_000 picoseconds. - Weight::from_parts(30_341_815, 0) + // Minimum execution time: 28_995_000 picoseconds. + Weight::from_parts(31_343_215, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 17_010 - .saturating_add(Weight::from_parts(642_213, 0).saturating_mul(r.into())) + // Standard Error: 16_438 + .saturating_add(Weight::from_parts(637_462, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `FellowshipCollective::Members` (r:1 w:0) /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) @@ -143,10 +142,10 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn vote() -> Weight { // Proof Size summary in bytes: - // Measured: `638` + // Measured: `603` // Estimated: `83866` - // Minimum execution time: 48_275_000 picoseconds. - Weight::from_parts(49_326_000, 0) + // Minimum execution time: 38_820_000 picoseconds. + Weight::from_parts(40_240_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -160,16 +159,34 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `434 + n * (50 ±0)` + // Measured: `400 + n * (50 ±0)` // Estimated: `4365 + n * (2540 ±0)` - // Minimum execution time: 15_506_000 picoseconds. - Weight::from_parts(17_634_029, 0) + // Minimum execution time: 12_972_000 picoseconds. + Weight::from_parts(15_829_333, 0) .saturating_add(Weight::from_parts(0, 4365)) - // Standard Error: 2_117 - .saturating_add(Weight::from_parts(1_126_879, 0).saturating_mul(n.into())) + // Standard Error: 1_754 + .saturating_add(Weight::from_parts(1_116_520, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(n.into())) } + /// Storage: `FellowshipCollective::Members` (r:2 w:2) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:2 w:2) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:2 w:4) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:2) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn exchange_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `337` + // Estimated: `6048` + // Minimum execution time: 44_601_000 picoseconds. + Weight::from_parts(45_714_000, 0) + .saturating_add(Weight::from_parts(0, 6048)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(10)) + } } diff --git a/runtime/src/xcm_config.rs b/runtime/src/xcm_config.rs index 433391e..c955ec9 100644 --- a/runtime/src/xcm_config.rs +++ b/runtime/src/xcm_config.rs @@ -8,19 +8,23 @@ use crate::{ }; use frame_support::{ match_types, parameter_types, - traits::{ConstU32, Everything, Nothing}, + traits::{ConstU32, Contains, Everything, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; use polkadot_parachain_primitives::primitives::Sibling; use xcm::latest::prelude::*; +use xcm::latest::prelude::{ + Asset as MultiAsset, InteriorLocation as InteriorMultiLocation, Location as MultiLocation, +}; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowTopLevelPaidExecutionFrom, - AllowUnpaidExecutionFrom, CurrencyAdapter, EnsureXcmOrigin, FixedWeightBounds, NativeAsset, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - WithComputedOrigin, WithUniqueTopic, + AllowUnpaidExecutionFrom, CurrencyAdapter, EnsureXcmOrigin, FixedWeightBounds, + FrameTransactionalProcessor, NativeAsset, ParentIsPreset, RelayChainAsNative, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, WithComputedOrigin, + WithUniqueTopic, }; use xcm_executor::XcmExecutor; @@ -91,11 +95,11 @@ parameter_types! { pub const MaxAssetsIntoHolding: u32 = 64; } -match_types! { - pub type ParentOrParentsExecutivePlurality: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(Plurality { id: BodyId::Executive, .. }) } - }; +pub struct ParentOrParentsExecutivePlurality; +impl Contains for ParentOrParentsExecutivePlurality { + fn contains(location: &Location) -> bool { + matches!(location.unpack(), (1, []) | (1, [Plurality { id: BodyId::Executive, .. }])) + } } pub type Barrier = ( @@ -147,6 +151,7 @@ impl xcm_executor::Config for XcmConfig { type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; type Aliasers = Nothing; + type TransactionalProcessor = FrameTransactionalProcessor; } /// No local origins on this chain are allowed to dispatch XCM sends/executions. @@ -174,8 +179,7 @@ impl pallet_xcm::Config for Runtime { type XcmExecuteFilter = Nothing; // ^ Disable dispatchable execute on the XCM pallet. // Needs to be `Everything` for local testing. - type XcmExecutorConfig = XcmConfig; - type XcmExecutor = XcmExecutor; + type XcmExecutor = XcmExecutor; type XcmTeleportFilter = Everything; type XcmReserveTransferFilter = Everything; type Weigher = FixedWeightBounds; diff --git a/runtime/src/xcms/matches_token_ex.rs b/runtime/src/xcms/matches_token_ex.rs index fae3878..bd8788e 100644 --- a/runtime/src/xcms/matches_token_ex.rs +++ b/runtime/src/xcms/matches_token_ex.rs @@ -17,94 +17,24 @@ //! Various implementations for the `MatchesFungible` trait. // Modified by Alex Wang for extending xcm transmit currency and token -// with different precision. +// with different precision, 2024/03 use frame_support::traits::Get; use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData}; use xcm::latest::{ - AssetId::{Abstract, Concrete}, - AssetInstance, + Asset, AssetId, AssetInstance, Fungibility::{Fungible, NonFungible}, - MultiAsset, MultiLocation, + Location, }; use xcm_executor::traits::{MatchesFungible, MatchesNonFungible}; -/// Converts a `MultiAsset` into balance `B` if it is a concrete fungible with an id equal to that -/// given by `T`'s `Get`. -/// -/// # Example -/// -/// ``` -/// use xcm::latest::{MultiLocation, Parent}; -/// use xcm_builder::IsConcrete; -/// use xcm_executor::traits::MatchesFungible; -/// -/// frame_support::parameter_types! { -/// pub TargetLocation: MultiLocation = Parent.into(); -/// } -/// -/// # fn main() { -/// let asset = (Parent, 999).into(); -/// // match `asset` if it is a concrete asset in `TargetLocation`. -/// assert_eq!( as MatchesFungible>::matches_fungible(&asset), Some(999)); -/// # } -/// ``` pub struct IsConcreteEx(PhantomData, PhantomData); -impl, M: Get>, B: TryFrom> - MatchesFungible for IsConcreteEx -{ - fn matches_fungible(a: &MultiAsset) -> Option { - match (&a.id, &a.fun) { - (Concrete(ref id), Fungible(ref amount)) if id == &T::get() => { - let precision_multiplier = if let Some(v) = M::get().get(id) { *v } else { 1u64 }; - (*amount * u128::from(precision_multiplier)).try_into().ok() - }, - _ => None, - } - } -} -impl, I: TryFrom, M> MatchesNonFungible +impl, M: Get>, B: TryFrom> MatchesFungible for IsConcreteEx { - fn matches_nonfungible(a: &MultiAsset) -> Option { - match (&a.id, &a.fun) { - (Concrete(id), NonFungible(instance)) if id == &T::get() => (*instance).try_into().ok(), - _ => None, - } - } -} - -/// Same as [`IsConcrete`] but for a fungible with abstract location. -/// -/// # Example -/// -/// ``` -/// use xcm::latest::prelude::*; -/// use xcm_builder::IsAbstract; -/// use xcm_executor::traits::{MatchesFungible, MatchesNonFungible}; -/// -/// frame_support::parameter_types! { -/// pub TargetLocation: [u8; 32] = [7u8; 32]; -/// } -/// -/// # fn main() { -/// let asset = ([7u8; 32], 999u128).into(); -/// // match `asset` if it is an abstract asset in `TargetLocation`. -/// assert_eq!( as MatchesFungible>::matches_fungible(&asset), Some(999)); -/// let nft = ([7u8; 32], [42u8; 4]).into(); -/// assert_eq!( -/// as MatchesNonFungible<[u8; 4]>>::matches_nonfungible(&nft), -/// Some([42u8; 4]) -/// ); -/// # } -/// ``` -pub struct IsAbstractEx(PhantomData, PhantomData); -impl, M: Get>, B: TryFrom> MatchesFungible - for IsAbstractEx -{ - fn matches_fungible(a: &MultiAsset) -> Option { + fn matches_fungible(a: &Asset) -> Option { match (&a.id, &a.fun) { - (Abstract(ref id), Fungible(ref amount)) if id == &T::get() => { + (AssetId(ref id), Fungible(ref amount)) if id == &T::get() => { let precision_multiplier = if let Some(v) = M::get().get(id) { *v } else { 1u64 }; (*amount * u128::from(precision_multiplier)).try_into().ok() }, @@ -112,10 +42,10 @@ impl, M: Get>, B: TryFrom> Matche } } } -impl, B: TryFrom, M> MatchesNonFungible for IsAbstractEx { - fn matches_nonfungible(a: &MultiAsset) -> Option { +impl, I: TryFrom, M> MatchesNonFungible for IsConcreteEx { + fn matches_nonfungible(a: &Asset) -> Option { match (&a.id, &a.fun) { - (Abstract(id), NonFungible(instance)) if id == &T::get() => (*instance).try_into().ok(), + (AssetId(id), NonFungible(instance)) if id == &T::get() => (*instance).try_into().ok(), _ => None, } } diff --git a/runtime/src/xcms/xcm_weight.rs b/runtime/src/xcms/xcm_weight.rs index 934c6e8..2e49a16 100644 --- a/runtime/src/xcms/xcm_weight.rs +++ b/runtime/src/xcms/xcm_weight.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -// Modified by Alex Wang 2023/12 +// Modified by Alex Wang 2024/03 +// from polkadot-sdk/polkadot/xcm/xcm-builder/src/weight.rs V1.7.0 use codec::Decode; use frame_support::{ @@ -27,10 +28,10 @@ use frame_support::{ }; use sp_runtime::traits::{SaturatedConversion, Saturating, Zero}; use sp_std::{marker::PhantomData, result::Result}; -use xcm::latest::{prelude::*, Weight}; +use xcm::latest::{prelude::*, GetWeight, Weight}; use xcm_executor::{ traits::{WeightBounds, WeightTrader}, - Assets, + AssetsInHolding, }; pub struct FixedWeightBounds(PhantomData<(T, C, M)>); @@ -116,16 +117,16 @@ where } /// Function trait for handling some revenue. Similar to a negative imbalance (credit) handler, but -/// for a `MultiAsset`. Sensible implementations will deposit the asset in some known treasury or +/// for a `Asset`. Sensible implementations will deposit the asset in some known treasury or /// block-author account. pub trait TakeRevenue { - /// Do something with the given `revenue`, which is a single non-wildcard `MultiAsset`. - fn take_revenue(revenue: MultiAsset); + /// Do something with the given `revenue`, which is a single non-wildcard `Asset`. + fn take_revenue(revenue: Asset); } /// Null implementation just burns the revenue. impl TakeRevenue for () { - fn take_revenue(_revenue: MultiAsset) {} + fn take_revenue(_revenue: Asset) {} } /// Simple fee calculator that requires payment in a single fungible at a fixed rate. @@ -145,9 +146,9 @@ impl, R: TakeRevenue> WeightTrader for FixedRateOf fn buy_weight( &mut self, weight: Weight, - payment: Assets, + payment: AssetsInHolding, context: &XcmContext, - ) -> Result { + ) -> Result { log::trace!( target: "xcm::weight", "FixedRateOfFungible::buy_weight weight: {:?}, payment: {:?}, context: {:?}", @@ -167,7 +168,7 @@ impl, R: TakeRevenue> WeightTrader for FixedRateOf Ok(unused) } - fn refund_weight(&mut self, weight: Weight, context: &XcmContext) -> Option { + fn refund_weight(&mut self, weight: Weight, context: &XcmContext) -> Option { log::trace!(target: "xcm::weight", "FixedRateOfFungible::refund_weight weight: {:?}, context: {:?}", weight, context); let (id, units_per_second, units_per_mb) = T::get(); let weight = weight.min(self.0); @@ -196,22 +197,22 @@ impl, R: TakeRevenue> Drop for FixedRateOfFungible /// places any weight bought into the right account. pub struct UsingComponents< WeightToFee: WeightToFeeT, - AssetId: Get, + AssetIdValue: Get, AccountId, Currency: CurrencyT, OnUnbalanced: OnUnbalancedT, >( Weight, Currency::Balance, - PhantomData<(WeightToFee, AssetId, AccountId, Currency, OnUnbalanced)>, + PhantomData<(WeightToFee, AssetIdValue, AccountId, Currency, OnUnbalanced)>, ); impl< WeightToFee: WeightToFeeT, - AssetId: Get, + AssetIdValue: Get, AccountId, Currency: CurrencyT, OnUnbalanced: OnUnbalancedT, - > WeightTrader for UsingComponents + > WeightTrader for UsingComponents { fn new() -> Self { Self(Weight::zero(), Zero::zero(), PhantomData) @@ -220,28 +221,29 @@ impl< fn buy_weight( &mut self, weight: Weight, - payment: Assets, + payment: AssetsInHolding, context: &XcmContext, - ) -> Result { + ) -> Result { log::trace!(target: "xcm::weight", "UsingComponents::buy_weight weight: {:?}, payment: {:?}, context: {:?}", weight, payment, context); let amount = WeightToFee::weight_to_fee(&weight); let u128_amount: u128 = amount.try_into().map_err(|_| XcmError::Overflow)?; - let required = (Concrete(AssetId::get()), u128_amount).into(); + let required = (AssetId(AssetIdValue::get()), u128_amount).into(); let unused = payment.checked_sub(required).map_err(|_| XcmError::TooExpensive)?; self.0 = self.0.saturating_add(weight); self.1 = self.1.saturating_add(amount); Ok(unused) } - fn refund_weight(&mut self, weight: Weight, context: &XcmContext) -> Option { - log::trace!(target: "xcm::weight", "UsingComponents::refund_weight weight: {:?}, context: {:?}", weight, context); + fn refund_weight(&mut self, weight: Weight, context: &XcmContext) -> Option { + log::trace!(target: "xcm::weight", "UsingComponents::refund_weight weight: {:?}, context: {:?}, available weight: {:?}, available amount: {:?}", weight, context, self.0, self.1); let weight = weight.min(self.0); let amount = WeightToFee::weight_to_fee(&weight); self.0 -= weight; self.1 = self.1.saturating_sub(amount); let amount: u128 = amount.saturated_into(); + log::trace!(target: "xcm::weight", "UsingComponents::refund_weight amount to refund: {:?}", amount); if amount > 0 { - Some((AssetId::get(), amount).into()) + Some((AssetIdValue::get(), amount).into()) } else { None } @@ -249,7 +251,7 @@ impl< } impl< WeightToFee: WeightToFeeT, - AssetId: Get, + AssetId: Get, AccountId, Currency: CurrencyT, OnUnbalanced: OnUnbalancedT, @@ -260,30 +262,30 @@ impl< } } -// Weight trader which uses the configured `WeightToFee` to set the right price for weight and then -// places any weight bought into the right account. +/// Weight trader which uses the configured `WeightToFee` to set the right price for weight and then +/// places any weight bought into the right account. // use PrecisionMultiplier for different precision between relaychain and Magnet use frame_support::traits::Imbalance; const PRECISION_MULTIPLIER: u128 = 1_000_000; pub struct UsingComponentsEx< WeightToFee: WeightToFeeT, - AssetId: Get, + AssetIdValue: Get, AccountId, Currency: CurrencyT, OnUnbalanced: OnUnbalancedT, >( Weight, Currency::Balance, - PhantomData<(WeightToFee, AssetId, AccountId, Currency, OnUnbalanced)>, + PhantomData<(WeightToFee, AssetIdValue, AccountId, Currency, OnUnbalanced)>, ); impl< WeightToFee: WeightToFeeT, - AssetId: Get, + AssetIdValue: Get, AccountId, Currency: CurrencyT, OnUnbalanced: OnUnbalancedT, - > WeightTrader for UsingComponentsEx + > WeightTrader for UsingComponentsEx { fn new() -> Self { Self(Weight::zero(), Zero::zero(), PhantomData) @@ -292,33 +294,28 @@ impl< fn buy_weight( &mut self, weight: Weight, - payment: Assets, + payment: AssetsInHolding, context: &XcmContext, - ) -> Result { - log::trace!(target: "runtime::xcm_weight", "UsingComponentsEx::buy_weight weight: {:?}, payment: {:?}, context: {:?}", weight, payment, context); + ) -> Result { + log::trace!(target: "xcm::weight", "UsingComponents::buy_weight weight: {:?}, payment: {:?}, context: {:?}", weight, payment, context); let amount = WeightToFee::weight_to_fee(&weight); - - let mut u128_amount: u128 = amount.saturated_into(); + let mut u128_amount: u128 = amount.try_into().map_err(|_| XcmError::Overflow)?; let is_radix: bool = u128_amount % PRECISION_MULTIPLIER > 0; u128_amount = u128_amount / PRECISION_MULTIPLIER; if is_radix { u128_amount = u128_amount + 1; } - let required = (Concrete(AssetId::get()), u128_amount).into(); + let required = (AssetId(AssetIdValue::get()), u128_amount).into(); let unused = payment.checked_sub(required).map_err(|_| XcmError::TooExpensive)?; - - let amount: Currency::Balance = u128_amount.saturated_into(); self.0 = self.0.saturating_add(weight); self.1 = self.1.saturating_add(amount); - Ok(unused) } - fn refund_weight(&mut self, weight: Weight, context: &XcmContext) -> Option { - log::trace!(target: "runtime::xcm_weight", "UsingComponentsEx::refund_weight weight: {:?}, context: {:?}", weight, context); + fn refund_weight(&mut self, weight: Weight, context: &XcmContext) -> Option { + log::trace!(target: "xcm::weight", "UsingComponents::refund_weight weight: {:?}, context: {:?}, available weight: {:?}, available amount: {:?}", weight, context, self.0, self.1); let weight = weight.min(self.0); let amount = WeightToFee::weight_to_fee(&weight); - let mut u128_amount: u128 = amount.saturated_into(); u128_amount = u128_amount / PRECISION_MULTIPLIER; @@ -326,8 +323,9 @@ impl< self.0 -= weight; self.1 = self.1.saturating_sub(amount); + log::trace!(target: "xcm::weight", "UsingComponentsEx::refund_weight amount to refund: {:?}", amount); if u128_amount > 0 { - Some((AssetId::get(), u128_amount).into()) + Some((AssetIdValue::get(), u128_amount).into()) } else { None } @@ -335,7 +333,7 @@ impl< } impl< WeightToFee: WeightToFeeT, - AssetId: Get, + AssetId: Get, AccountId, Currency: CurrencyT, OnUnbalanced: OnUnbalancedT,